input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
from django.shortcuts import get_object_or_404, render
from django.template import Template, Context
from django.http import HttpResponse
from .forms import *
from datetime import datetime
from .models import Memory
from .models import UserProfile
from django.contrib.auth.models import User
from friendship.models import Friend, Follow
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login as a_login
from django.contrib.auth import logout as a_logout
from django.contrib.auth.hashers import check_password
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
import googlemaps
def profiletest(request, user_id):
u = User.objects.get(pk=user_id)
output = ("<h1>You're looking at user %s.</h1>" % request.user.username)
profile = get_object_or_404(UserProfile, username=request.user.username).bio
output = output + "<br>" + u.username + " " + u.last_name + " " + profile
return HttpResponse(output)
def userlist(request):
lastest_user_list = User.objects.order_by('pk')[:11]
output = ', '.join([u.username+' '+u.email for u in lastest_user_list])
return HttpResponse(output)
def signup(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/timeline/')
if request.method == 'POST':
form = RegistrationForm(request.POST)
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username)
if user is None:
email = request.POST['email']
fname = request.POST['fname']
lname = request.POST['lname']
if form.is_valid():
user = User.objects.create_user(
username=username,
password=password,
email=email,
first_name=fname,
last_name=lname
)
user.save()
profile = UserProfile(username=username, date_created=datetime.now())
profile.save()
user = get_object_or_404(User, email=email)
user = authenticate(username=user.username, password=password)
if user is not None:
if user.is_active:
a_login(request, user)
return HttpResponseRedirect('/timeline/')
else:
return render(request, 'signup.html', {})
else:
form = RegistrationForm()
variables = RequestContext(request, {
'form': form
})
return render(request, 'signup.html', {})
def post(request, memory_id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
memory = get_object_or_404(Memory, pk=memory_id)
author = get_object_or_404(User, username=memory.author)
authorProfile = get_object_or_404(UserProfile, username=memory.author)
st = False #test for whether to show tags
if request.user.id==author.id:
st = True
profile = get_object_or_404(UserProfile, username=request.user.username)
tags = memory.tags.all()
images = []
users = []
tag = []
for t in tags:
t = str(t)
try:
user = get_object_or_404(User, username=t)
userProfile = get_object_or_404(UserProfile, username=t)
users.append(user)
images.append(userProfile.image)
tag = zip(users, images)
except:
pass
location = memory.location
gmaps = googlemaps.Client(key="<KEY>")
geocode_result = gmaps.geocode(location)
lat = geocode_result[0]['geometry']['bounds']['northeast']['lat']
lng = geocode_result[0]['geometry']['bounds']['northeast']['lng']
memories = Memory.objects.filter(lat=lat).filter(lng=lng)
authorProfileImages=[]
authorProfiles=[]
authors=[]
for m in memories:
aa = get_object_or_404(User, username=m.author)
a = get_object_or_404(UserProfile, username=m.author)
authorProfileImages.append(a.image)
authorProfiles.append(a)
authors.append(aa)
link=zip(memories, authorProfileImages, authorProfiles, authors)
all_friends = Friend.objects.friends(request.user)
return render(request, 'post.html', {'memory': memory, 'author': author, 'authorProfile': authorProfile, 'image' : memory.image.name[10:], 'memories': memories, 'all_friends': all_friends, 'link': link, 'profile': profile, "tag":tag, "st":st})
def addTags(request, memory_id):
t = request.GET[u'q']
t = str(t)
t = t.split(",")
for m in t:
m = m.strip()
m = str(m).title()
memory = get_object_or_404(Memory, pk=memory_id)
memory.tags.add(m)
return post(request, memory_id)
def newpost(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
username = request.user.username
return render(request, 'newpost.html', {"username": username})
def newpost_new(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
username = request.user.username
profile = get_object_or_404(UserProfile, username=request.user.username)
return render(request, 'newpost_new.html', {"username": username, "profile": profile})
def newpostsubmit(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
if 'title' in request.POST:
location=request.POST['location']
gmaps = googlemaps.Client(key="<KEY>")
geocode_result = gmaps.geocode(location)
lat = geocode_result[0]['geometry']['bounds']['northeast']['lat']
lng = geocode_result[0]['geometry']['bounds']['northeast']['lng']
profile = get_object_or_404(UserProfile, username=request.user.username)
m = Memory(name=request.POST['title'], author=request.user.username, first_name=request.user.first_name, last_name=request.user.last_name, location=request.POST['location'], lat=lat, lng=lng, date_created=datetime.now(), description=request.POST['note_text'], image=request.FILES['media'], author_image=profile.image)
m.save()
memory = get_object_or_404(Memory, pk=m.id)
t = request.POST['tags']
t = str(t)
t = t.split(",")
for mt in t:
mt = mt.strip()
mt = str(mt).title()
memory.tags.add(mt)
author = get_object_or_404(User, username=memory.author)
return post(request, m.id)
message = 'Successfully added a new memory'
else:
message = 'You submitted an empty form.'
return HttpResponse(message)
def search(request):
l = request.GET[u'q']
return location(request, l)
def location(request, location):
location = location.replace("+"," ")
profile = get_object_or_404(UserProfile, username=request.user.username)
gmaps = googlemaps.Client(key="<KEY>")
geocode_result = gmaps.geocode(location)
lat = geocode_result[0]['geometry']['bounds']['northeast']['lat']
lng = geocode_result[0]['geometry']['bounds']['northeast']['lng']
authorProfileImages=[]
authorProfiles=[]
authors=[]
memories = Memory.objects.filter(lat=lat).filter(lng=lng)
for m in memories:
aa=get_object_or_404(User, username=m.author)
a = get_object_or_404(UserProfile, username=m.author)
authorProfileImages.append(a.image)
authorProfiles.append(a)
authors.append(aa)
link=zip(memories, authorProfileImages, authorProfiles, authors)
return render(request, "location.html", {"memories": memories, "location": location, "lat": lat, "lng": lng, "profile": profile, "link": link})
def settingssubmit(request):
profile = get_object_or_404(UserProfile, username=request.user.username)
number=request.user.id
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
if request.method == 'POST':
if 'livesin' in request.POST:
profile = get_object_or_404(UserProfile, username=request.user.username)
try:
profile.image = request.FILES['media']
except:
pass
profile.livesin = request.POST['livesin']
request.user.username = request.POST['username']
request.user.email = request.POST['email']
request.user.first_name=request.POST['fname']
request.user.last_name=request.POST['lname']
profile.bio = request.POST['bio']
profile.save()
request.user.save()
return profilemod(request, number)
else:
message = 'Please enter the location'
return HttpResponse(message), HttpResponseRedirect('/account/')
def passwordreset(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
if request.user.is_authenticated() and request.method == 'POST':
oldpass = request.POST['oldPassword']
if check_password(oldpass, request.user.password):
oldpasscheck = request.POST['oldPasswordCheck']
newpass = request.POST['newPassword']
if oldpass == oldpasscheck and oldpass != newpass:
request.user.set_password(newpass)
request.user.save()
a_logout(request)
return HttpResponseRedirect('/login/')
else:
return HttpResponseRedirect('/password-reset/')
else:
return HttpResponseRedirect('/password-reset/')
return render(request, 'password-reset.html', {})
def login(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/timeline/')
if request.method == 'POST':
email = request.POST['email']
password = request.POST['password']
user = get_object_or_404(User, email=email)
user = authenticate(username=user.username, password=password)
if user is not None:
if user.is_active:
a_login(request, user)
else:
return login(request)
return HttpResponseRedirect('/timeline/')
return render(request, 'login.html', {})
def logout(request):
a_logout(request)
return HttpResponseRedirect('/login/')
def friends(request, authorProfile_id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = get_object_or_404(User, id=authorProfile_id)
author = get_object_or_404(UserProfile, username=user.username)
username = request.user.username
first_name = user.first_name
last_name = user.last_name
all_friends = Friend.objects.friends(user)
countfriends = len(all_friends)
friendsProfileImages=[]
for friend in all_friends:
friendProfile = get_object_or_404(UserProfile, username=friend.username)
friendsProfileImages.append(friendProfile.image)
link=zip(all_friends, friendsProfileImages)
profile = get_object_or_404(UserProfile, username=request.user.username)
actualUser = request.user
isFriend = Friend.objects.are_friends(actualUser, user)
isSelf = actualUser==user
requests = Friend.objects.unread_requests(user=request.user)
friendshiprequests = []
requestreceive = False
for r in requests:
r = str(r)
idn = r.split()[1][1:]
if authorProfile_id == idn:
requestreceive = True
requestfrom = get_object_or_404(User, id=idn)
friendshiprequests.append(requestfrom)
countrequest = len(friendshiprequests)
c = 0
if countrequest>=2:
c = 1
sent = Friend.objects.sent_requests(user=request.user)
requestsent = False
for s in sent:
s = str(s)
ids = s.split()[4][1:]
if authorProfile_id == ids:
requestsent = True
return render(request, 'friends.html', {"user": user, "profile": profile,"author": author, "all_friends": all_friends, "actualUser": actualUser, "link": link, "isFriend": isFriend, "isSelf": isSelf, "countfriends": countfriends, "friendshiprequests": friendshiprequests, "requestreceive": requestreceive, "c": c, "requestsent": requestsent, "countrequest": countrequest, "first_name": first_name, "last_name": last_name})
def following(request, authorProfile_id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = get_object_or_404(User, id=authorProfile_id)
author = get_object_or_404(UserProfile, username=user.username)
username = request.user.username
first_name = user.first_name
last_name = user.last_name
all_following = Follow.objects.following(user)
countfollowing = len(all_following)
followingProfileImages=[]
for following in all_following:
followingProfile = get_object_or_404(UserProfile, username=following.username)
followingProfileImages.append(followingProfile.image)
link=zip(all_following, followingProfileImages)
profile = get_object_or_404(UserProfile, username=request.user.username)
return render(request, 'following.html', {"user": user, "profile": profile,"author": author, "all_following": all_following, "link": link, "first_name": first_name, "last_name": last_name, "countfollowing": countfollowing})
def follower(request, authorProfile_id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = get_object_or_404(User, id=authorProfile_id)
author = get_object_or_404(UserProfile, username=user.username)
username = request.user.username
first_name = user.first_name
last_name = user.last_name
all_followers = Follow.objects.followers(user)
countfollowers = len(all_followers)
followersProfileImages=[]
for follower in all_followers:
followerProfile = get_object_or_404(UserProfile, username=follower.username)
followersProfileImages.append(followerProfile.image)
link=zip(all_followers, followersProfileImages)
profile = get_object_or_404(UserProfile, username=request.user.username)
return render(request, 'follower.html', {"user": user, "profile": profile,"author": author, "all_followers": all_followers, "link": link, "first_name": first_name, "last_name": last_name, "countfollowers": countfollowers})
def timeline(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = request.user
profile = get_object_or_404(UserProfile, username=request.user.username)
memories = Memory.objects.all()
authorProfileImages=[]
authorProfiles=[]
authors=[]
for memory in memories:
author = get_object_or_404(User, username=memory.author)
authorProfile = get_object_or_404(UserProfile, username=memory.author)
authorProfiles.append(authorProfile)
authorProfileImages.append(authorProfile.image)
authors.append(author)
link=zip(memories, authorProfileImages, authorProfiles, authors)
return render(request, 'timeline.html', {"memories": memories, "user": user, "link": link, "profile": profile })
def profilemod(request, authorProfile_id):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = get_object_or_404(User, id=authorProfile_id)
author = get_object_or_404(UserProfile, username=user.username)
username = request.user.username
first_name = user.first_name
last_name = user.last_name
all_friends = Friend.objects.friends(user)
countfriends = len(all_friends)
friendsProfileImages=[]
for friend in all_friends:
friendProfile = get_object_or_404(UserProfile, username=friend.username)
friendsProfileImages.append(friendProfile.image)
link=zip(all_friends, friendsProfileImages)
memories = Memory.objects.filter(author=author.username)
#user = request.user
profile = get_object_or_404(UserProfile, username=request.user.username)
actualUser = request.user
isFriend = Friend.objects.are_friends(actualUser, user)
isSelf = actualUser==user
requests = Friend.objects.unread_requests(user=request.user)
friendshiprequests = []
requestreceive = False
for r in requests:
r = str(r)
idn = r.split()[1][1:]
if authorProfile_id == idn:
requestreceive = True
requestfrom = get_object_or_404(User, id=idn)
friendshiprequests.append(requestfrom)
countrequest = len(friendshiprequests)
c = 0
if countrequest>=2:
c = 1
sent = Friend.objects.sent_requests(user=request.user)
requestsent = False
for s in sent:
s = str(s)
ids = s.split()[4][1:]
if authorProfile_id == ids:
requestsent = True
return render(request, 'profile-mod.html', {"user": user, "memories": memories, "profile": profile,"author": author, "all_friends": all_friends, "actualUser": actualUser, "link": link, "isFriend": isFriend, "isSelf": isSelf, "countfriends": countfriends, "friendshiprequests": friendshiprequests, "requestreceive": requestreceive, "c": c, "requestsent": requestsent, "countrequest": countrequest})
def getUsers(request):
users = User.objects.all()
name_list = []
for x in users:
name_list.append(x.first_name + ' ' + x.last_name)
return name_list
def getMemories(request):
memories = Memory.objects.all()
memory_list = []
for x in memories:
memorylist.append(x.first_name + ' ' + x.last_name)
return memorylist
def myprofile(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
profile = get_object_or_404(UserProfile, username=request.user.username)
users = User.objects.all()
memories = Memory.objects.filter(author=request.user.username)
return render(request, 'settings/myprofile.html', {"memories": memories, "profile": profile, "user":request.user})
def account(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
user = request.user
| |
so.num_words()
return (tot_num_adjs + tot_num_pron) / tot_num_words
class Noun_Verb_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the number of verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun to verb
Args:
None
Returns:
The total number of nouns to the number of verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_nouns / tot_num_verbs
return NOT_AVAILABLE
class Noun_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the total number of nouns and verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun ratio
Args:
None
Returns:
The total number of nouns to the total number of nouns and verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if (tot_num_nouns + tot_num_verbs) != 0:
return tot_num_nouns / (tot_num_nouns + tot_num_verbs)
class Pronoun_Noun_Ratio(object):
"""Class to calculate the ratio of the number of pronouns to the total number of nouns
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun to noun ratio
Args:
None
Returns:
The ratio of the total number of pronouns to the number of nouns
"""
tot_num_prons, tot_num_nouns = 0, 0
for so in self.sentence_objs:
tot_num_prons += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if tot_num_nouns != 0:
return tot_num_prons / tot_num_nouns
return NOT_AVAILABLE
class Total_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = 0
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist += np.sum([abs(int(dep['id']) - dep['head']) for dep in sd])
return tot_dist
class Average_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist.append(sum([abs(int(dep['id']) - dep['head']) for dep in sd]))
if tot_dist:
return np.mean(tot_dist)
return NOT_AVAILABLE
class Total_Dependencies(object):
"""Class to calculate the number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of unique dependencies across sentences
Args:
None
Returns:
the total number of unique dependencies
"""
deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels.extend([dep['deprel'] for dep in sd])
return len(set(deprels))
class Average_Dependencies(object):
"""Class to calculate the average number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of unique dependencies across sentences
Args:
None
Returns:
the average number of unique dependencies
"""
num_deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels = set([dep['deprel'] for dep in sd])
num_deprels.append(len(deprels))
if num_deprels:
return np.mean(num_deprels)
return NOT_AVAILABLE
class Closed_Class_Word_Rate(object):
"""Class to calculate the proportion of closed class words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of close class words
Args:
None
Returns:
The ratio of the total number of determiners, prepositions, pronouns and conjunctions to the total number of words
"""
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
) / tot_num_words
class Open_Class_Word_Rate(object):
"""Class to calculate the proportion of open class word_count
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of open class words
Args:
None
Returns:
The ratio of the total number of nouns, verbs, adjectives and adverbs to the total number of words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return (
tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
) / tot_num_words
class Content_Density(object):
"""Class to calculate the content density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the content density of words
Args:
None
Returns:
The ratio of the total number of open class words to the total number of closed class words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs = 0, 0, 0, 0
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj = 0, 0, 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
numerator = tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
denominator = tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
if denominator == 0:
return NOT_AVAILABLE
return numerator / denominator
class Idea_Density(object):
"""Class to calculate the idea density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the idea density of words
Args:
None
Returns:
The ratio of the total number of verbs, adjectives, adverbs, prepositions, conjunctions to the number of words
"""
(
tot_num_verbs,
tot_num_adjs,
tot_num_advs,
tot_num_preps,
tot_num_cconjs,
tot_num_words,
) = (
0,
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_preps += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_cconjs += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_verbs + tot_num_adjs + tot_num_advs + tot_num_preps + tot_num_cconjs
) / tot_num_words
class Honore_Statistic(object):
"""Class to calculate the honore's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the honore's statistic
Args:
None
Returns:
The honore's statistic of the words
"""
all_words = []
num_unique_words_spoken, num_words_spoken_only_once = 0, 0
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
word_counts = dict(Counter(all_words))
for key, val in word_counts.items():
if val == 1:
num_words_spoken_only_once += 1
num_words = len(all_words)
if (num_words_spoken_only_once == num_unique_words_spoken) or (num_unique_words_spoken == 0) or (num_words == 0):
return NOT_AVAILABLE
honore_statistic = (100 * math.log(num_words)) / (
1 - (num_words_spoken_only_once) / (num_unique_words_spoken)
)
return honore_statistic
class Brunet_Index(object):
"""Class to calculate the brunet's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the brunet's statistic
Args:
None
Returns:
The brunet's statistic of the words
"""
num_unique_words_spoken = 0
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
num_words = len(all_words)
brunet_index = math.pow(num_words, math.pow(num_unique_words_spoken, -0.165))
return brunet_index
class Type_Token_Ratio(object):
"""Class to calculate the type-token ratio
Ref: https://www.tandfonline.com/doi/abs/10.1080/02687038.2017.1303441
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the type-token statistic
Args:
None
Returns:
The ratio of the number of word types to the number of words
"""
| |
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the ability to find and run plugins.
Each plugin module can or should contain the following:
PLUGIN_NAME : The displayed name of the plugin.
PLUGIN_CATEGORY : The submenu that the plugin should be placed in.
PLUGIN_DESCRIPTION : The description shown next to the plugin name.
PLUGIN_REQUIRES : (optional) The application name that the plugin requires
: to load. ex. "CueCommander"
PLUGIN_PROVIDES : The name of the class that the plugin provides.
When a plugin is instantiated, a reference to the MainWindow is provided to the
constructor.
When a plugin wishes to remove it's instance, it should signal with:
self.emit(QtCore.SIGNAL("closed(PyQt_PyObject)"), self)
You should not have any circular non-weak refrences to yourself. Use weakref.proxy
You may implement __del__ with a print to see if your object is properly removed
The class the plugin provides can have the following functions:
pluginSaveState() : This should return any settings that the plugin would like
: to save for the next time it is loaded as a string.
pluginRestoreState(settings) : This will receive any settings that it previously
: returned from pluginSaveSettings()
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import json
from builtins import str
from builtins import map
from builtins import object
import os
import sys
import traceback
import pickle
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
import cuegui.Constants
import cuegui.Logger
import cuegui.Utils
logger = cuegui.Logger.getLogger(__file__)
CLASS = "CLASS"
DESCRIPTION = "DESCRIPTION"
CATEGORY = "CATEGORY"
SETTINGS_KEY = 0
SETTINGS_GET = 1
SETTINGS_SET = 2
try:
JSON_EXCEPTION_CLASS = json.decoder.JSONDecodeError
except AttributeError:
JSON_EXCEPTION_CLASS = ValueError
class Plugins(object):
# Keyed to name. each is a dictionary with CLASS, DESCRIPTION and optionally CATEGORY
__plugins = {}
_loadedPaths = []
def __init__(self, mainWindow, name):
"""Plugins class initialization.
@param mainWindow: Application main window reference
@type mainWindow: QMainWindow
@param name: Name of current window
@type name: string"""
self.__running = []
self.name = name
self.mainWindow = mainWindow
self.__menu_separator = " \t-> "
# Load plugin paths from the config file
__pluginPaths = QtGui.qApp.settings.value("Plugin_Paths", [])
for path in cuegui.Constants.DEFAULT_PLUGIN_PATHS + __pluginPaths:
self.loadPluginPath(str(path))
# Load plugins explicitly listed in the config file
self.loadConfigFilePlugins("General")
self.loadConfigFilePlugins(self.name)
def loadConfigFilePlugins(self, configGroup):
"""Loads plugins explicitly listed in the config file for the window.
The path is optional if the module is already in the path. The module is
optional if you just want to add to the path.
[General]
Plugins=/example/path/module, package.module2
The imported module must have an init function and a QMainWindow will be
passed to it.
"""
__plugins = QtGui.qApp.settings.value("%s/Plugins" % configGroup, [])
for plugin in __plugins:
path = os.path.dirname(str(plugin))
if path:
logger.info("adding path " + path)
sys.path.append(path)
for plugin in __plugins:
module = os.path.basename(str(plugin))
if module:
logger.info("loading module " + module)
s_class = module.split(".")[-1]
try:
m = __import__(module, globals(), locals(), [s_class])
m.init(self.mainWindow)
logger.info("plugin loaded %s" % module)
except Exception as e:
logger.warning("Failed to load plugin: %s" % s_class)
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
def __closePlugin(self, object):
"""When a running plugin is closed, this is called and the running
plugin is deleted. If it is a dock widget then it is removed from the
main window.
@type object: Object
@param object: The object created by loadin"""
for item in self.__running:
if item[1] == object:
if isinstance(object, QtWidgets.QDockWidget):
self.mainWindow.removeDockWidget(object)
self.__running.remove(item)
return
def runningList(self):
"""Lists all running plugins
@return: [("Class_Name_1", PluginClass1_Instance), ("Class_Name_2", PluginClass2_Instance)]
@rtype: list"""
return self.__running
def saveState(self):
"""Saves the names of all open plugins.
Calls .saveSettings (if available) on all plugins."""
opened = []
for plugin in self.__running:
try:
if hasattr(plugin[1], "pluginSaveState"):
opened.append("%s::%s" % (plugin[0], json.dumps(plugin[1].pluginSaveState())))
except Exception as e:
logger.warning("Error saving plugin state for: %s\n%s" % (plugin[0], e))
QtGui.qApp.settings.setValue("%s/Plugins_Opened" % self.name, opened)
def restoreState(self):
"""Loads any user defined pluggin directories.
Restores all open plugins.
Calls .restoreSettings (if available) on all plugins."""
# Loads any user defined pluggin directories
for path in QtGui.qApp.settings.value("Plugins/Paths", []):
self.loadPluginPath(str(path))
# Runs any plugins that were saved to the settings
for plugin in (QtGui.qApp.settings.value("%s/Plugins_Opened" % self.name) or []):
if '::' in plugin:
plugin_name, plugin_state = str(plugin).split("::")
self.launchPlugin(plugin_name, plugin_state)
def launchPlugin(self, plugin_name, plugin_state):
"""Launches the desired plugin
@param plugin_name: The name of the plugin as provided by PLUGIN_NAME
@type plugin_name: string
@param plugin_state: The state of the plugin's tab
@type plugin_state: string"""
try:
plugin_class = self.__plugins[plugin_name][CLASS]
except KeyError:
logger.warning("Unable to launch previously open plugin, it no longer exists: %s" % plugin_name)
return
try:
plugin_instance = plugin_class(self.mainWindow)
self.__running.append((plugin_name, plugin_instance))
plugin_instance.closed.connect(self.__closePlugin, QtCore.Qt.QueuedConnection)
except Exception:
logger.warning("Failed to load plugin module: %s\n%s" % (plugin_name,
''.join(traceback.format_exception(*sys.exc_info())) ))
return
if hasattr(plugin_instance, "pluginRestoreState"):
try:
try:
if plugin_state:
# Earlier versions of CueGUI saved data via pickle; fall back to that if
# valid JSON is not found.
try:
state = json.loads(plugin_state)
except JSON_EXCEPTION_CLASS:
# Python 2 doesn't support the same bytes() options, but that's ok
# because the pickled data is already in the format we need.
try:
state = pickle.loads(bytes(plugin_state, encoding='latin1'))
except TypeError:
state = pickle.loads(plugin_state)
else:
state = None
except Exception as e:
logger.warning("Failed to load state information stored as %s for %s, error was: %s" % (plugin_state, plugin_name, e))
state = None
plugin_instance.pluginRestoreState(state)
except Exception as e:
logger.warning("Error restoring plugin state for: %s" % plugin_name)
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
def loadPluginPath(self, plugin_dir):
"""This will load all plugin modules located in the path provided
@param plugin_dir: Path to a plugin directory
@type plugin_dir: string"""
if plugin_dir in self._loadedPaths:
return
self._loadedPaths.append(plugin_dir)
if os.path.isdir(plugin_dir):
orig_sys_path = sys.path[:]
sys.path.append(plugin_dir)
for p in os.listdir(plugin_dir):
name, ext = os.path.splitext(p)
if ext == ".py" and not name in ["__init__","Manifest","README"]:
self.loadPlugin(name)
sys.path = orig_sys_path
else:
logger.warning("Unable to read the plugin path: %s" % plugin_dir)
def loadPlugin(self, name):
"""Loads a single plugin that must be in the python path
@param name: Name of the python module that contains a plugin
@type name: string"""
try:
logger.info("Importing: %s" % name)
module = __import__(name, globals(), locals())
logger.info("Has: %s" % dir(module))
logger.info("Name: %s" % module.PLUGIN_NAME)
logger.info("Provides: %s" % module.PLUGIN_PROVIDES)
# If a plugin requires a different app, do not use it
# TODO: accept a list also, log it
if hasattr(module, "PLUGIN_REQUIRES"):
if self.mainWindow.app_name != module.PLUGIN_REQUIRES:
return
newPlugin = {}
newPlugin[CLASS] = getattr(module, module.PLUGIN_PROVIDES)
newPlugin[DESCRIPTION] = str(module.PLUGIN_DESCRIPTION)
if hasattr(module, "PLUGIN_CATEGORY"):
newPlugin[CATEGORY] = str(module.PLUGIN_CATEGORY)
self.__plugins[module.PLUGIN_NAME] = newPlugin
except Exception as e:
logger.warning("Failed to load plugin %s\n%s" % (name,
''.join(traceback.format_exception(*sys.exc_info())) ))
def setupPluginMenu(self, menu):
"""Adds a plugin menu option to the supplied menubar
@param menu: The menu to add the loaded plugins to
@type menu: QMenu"""
menu.triggered.connect(self._handlePluginMenu)
# Create the submenus (ordered)
submenus = {}
menu_locations = {"root": []}
for category in set([plugin[CATEGORY] for plugin in list(self.__plugins.values())
if CATEGORY in plugin]):
submenus[category] = QtWidgets.QMenu(category, menu)
menu.addMenu(submenus[category])
menu_locations[category] = []
# Store the plugin name in the proper menu_locations category
for plugin in self.__plugins:
category = self.__plugins[plugin].get(CATEGORY, "root")
menu_locations[category].append(plugin)
# Create the QAction and add it to the correct menu (sorted)
for category in menu_locations:
for plugin in sorted(menu_locations[category]):
action = QtWidgets.QAction("{}".format(plugin), menu)
if category in submenus:
submenus[category].addAction(action)
else:
menu.addAction(action)
return menu
def _handlePluginMenu(self, action):
"""Handles what happens when a plugin menu item is clicked on
@param action: The action that was selected from the menu
@type action: QAction"""
plugin_name = str(action.text()).split("%s" % self.__menu_separator)[0]
self.launchPlugin(plugin_name, "")
class Plugin(object):
def __init__(self):
self.__settings = []
def pluginRestoreState(self, saved):
"""Called on plugin start with any previously saved state.
@param settings: Last state of the plugin instance
@type settings: dict"""
if self.__settings and saved and isinstance(saved, dict):
for setting in self.__settings:
item = setting[SETTINGS_KEY]
if item in saved:
setting[SETTINGS_SET](saved[item])
def pluginSaveState(self):
"""Called on application exit and returns plugin state information.
@return: Any object to store as the current state of the plugin instance
@rtype: any"""
save = {}
if self.__settings:
| |
height of the video stream in pixels.
codedWidth (int): The coded width of the video stream in pixels.
colorPrimaries (str): The color primaries of the video stream.
colorRange (str): The color range of the video stream.
colorSpace (str): The color space of the video stream (ex: bt2020).
colorTrc (str): The color trc of the video stream.
DOVIBLCompatID (int): Dolby Vision base layer compatibility ID.
DOVIBLPresent (bool): True if Dolby Vision base layer is present.
DOVIELPresent (bool): True if Dolby Vision enhancement layer is present.
DOVILevel (int): Dolby Vision level.
DOVIPresent (bool): True if Dolby Vision is present.
DOVIProfile (int): Dolby Vision profile.
DOVIRPUPresent (bool): True if Dolby Vision reference processing unit is present.
DOVIVersion (float): The Dolby Vision version.
duration (int): The duration of video stream in milliseconds.
frameRate (float): The frame rate of the video stream (ex: 23.976).
frameRateMode (str): The frame rate mode of the video stream.
hasScallingMatrix (bool): True if video stream has a scaling matrix.
height (int): The hight of the video stream in pixels (ex: 1080).
level (int): The codec encoding level of the video stream (ex: 41).
profile (str): The profile of the video stream (ex: asp).
pixelAspectRatio (str): The pixel aspect ratio of the video stream.
pixelFormat (str): The pixel format of the video stream.
refFrames (int): The number of reference frames of the video stream.
scanType (str): The scan type of the video stream (ex: progressive).
streamIdentifier(int): The stream identifier of the video stream.
width (int): The width of the video stream in pixels (ex: 1920).
"""
TAG = 'Stream'
STREAMTYPE = 1
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(VideoStream, self)._loadData(data)
self.anamorphic = data.attrib.get('anamorphic')
self.bitDepth = cast(int, data.attrib.get('bitDepth'))
self.cabac = cast(int, data.attrib.get('cabac'))
self.chromaLocation = data.attrib.get('chromaLocation')
self.chromaSubsampling = data.attrib.get('chromaSubsampling')
self.codecID = data.attrib.get('codecID')
self.codedHeight = cast(int, data.attrib.get('codedHeight'))
self.codedWidth = cast(int, data.attrib.get('codedWidth'))
self.colorPrimaries = data.attrib.get('colorPrimaries')
self.colorRange = data.attrib.get('colorRange')
self.colorSpace = data.attrib.get('colorSpace')
self.colorTrc = data.attrib.get('colorTrc')
self.DOVIBLCompatID = cast(int, data.attrib.get('DOVIBLCompatID'))
self.DOVIBLPresent = cast(bool, data.attrib.get('DOVIBLPresent'))
self.DOVIELPresent = cast(bool, data.attrib.get('DOVIELPresent'))
self.DOVILevel = cast(int, data.attrib.get('DOVILevel'))
self.DOVIPresent = cast(bool, data.attrib.get('DOVIPresent'))
self.DOVIProfile = cast(int, data.attrib.get('DOVIProfile'))
self.DOVIRPUPresent = cast(bool, data.attrib.get('DOVIRPUPresent'))
self.DOVIVersion = cast(float, data.attrib.get('DOVIVersion'))
self.duration = cast(int, data.attrib.get('duration'))
self.frameRate = cast(float, data.attrib.get('frameRate'))
self.frameRateMode = data.attrib.get('frameRateMode')
self.hasScallingMatrix = cast(bool, data.attrib.get('hasScallingMatrix'))
self.height = cast(int, data.attrib.get('height'))
self.level = cast(int, data.attrib.get('level'))
self.profile = data.attrib.get('profile')
self.pixelAspectRatio = data.attrib.get('pixelAspectRatio')
self.pixelFormat = data.attrib.get('pixelFormat')
self.refFrames = cast(int, data.attrib.get('refFrames'))
self.scanType = data.attrib.get('scanType')
self.streamIdentifier = cast(int, data.attrib.get('streamIdentifier'))
self.width = cast(int, data.attrib.get('width'))
@utils.registerPlexObject
class AudioStream(MediaPartStream):
""" Represents a audio stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 2
audioChannelLayout (str): The audio channel layout of the audio stream (ex: 5.1(side)).
bitDepth (int): The bit depth of the audio stream (ex: 16).
bitrateMode (str): The bitrate mode of the audio stream (ex: cbr).
channels (int): The number of audio channels of the audio stream (ex: 6).
duration (int): The duration of audio stream in milliseconds.
profile (str): The profile of the audio stream.
samplingRate (int): The sampling rate of the audio stream (ex: xxx)
streamIdentifier (int): The stream identifier of the audio stream.
<Track_only_attributes>: The following attributes are only available for tracks.
* albumGain (float): The gain for the album.
* albumPeak (float): The peak for the album.
* albumRange (float): The range for the album.
* endRamp (str): The end ramp for the track.
* gain (float): The gain for the track.
* loudness (float): The loudness for the track.
* lra (float): The lra for the track.
* peak (float): The peak for the track.
* startRamp (str): The start ramp for the track.
"""
TAG = 'Stream'
STREAMTYPE = 2
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(AudioStream, self)._loadData(data)
self.audioChannelLayout = data.attrib.get('audioChannelLayout')
self.bitDepth = cast(int, data.attrib.get('bitDepth'))
self.bitrateMode = data.attrib.get('bitrateMode')
self.channels = cast(int, data.attrib.get('channels'))
self.duration = cast(int, data.attrib.get('duration'))
self.profile = data.attrib.get('profile')
self.samplingRate = cast(int, data.attrib.get('samplingRate'))
self.streamIdentifier = cast(int, data.attrib.get('streamIdentifier'))
if self._isChildOf(etag='Track'):
self.albumGain = cast(float, data.attrib.get('albumGain'))
self.albumPeak = cast(float, data.attrib.get('albumPeak'))
self.albumRange = cast(float, data.attrib.get('albumRange'))
self.endRamp = data.attrib.get('endRamp')
self.gain = cast(float, data.attrib.get('gain'))
self.loudness = cast(float, data.attrib.get('loudness'))
self.lra = cast(float, data.attrib.get('lra'))
self.peak = cast(float, data.attrib.get('peak'))
self.startRamp = data.attrib.get('startRamp')
@utils.registerPlexObject
class SubtitleStream(MediaPartStream):
""" Represents a audio stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 3
container (str): The container of the subtitle stream.
forced (bool): True if this is a forced subtitle.
format (str): The format of the subtitle stream (ex: srt).
headerCommpression (str): The header compression of the subtitle stream.
transient (str): Unknown.
"""
TAG = 'Stream'
STREAMTYPE = 3
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(SubtitleStream, self)._loadData(data)
self.container = data.attrib.get('container')
self.forced = cast(bool, data.attrib.get('forced', '0'))
self.format = data.attrib.get('format')
self.headerCompression = data.attrib.get('headerCompression')
self.transient = data.attrib.get('transient')
class LyricStream(MediaPartStream):
""" Represents a lyric stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 4
format (str): The format of the lyric stream (ex: lrc).
minLines (int): The minimum number of lines in the (timed) lyric stream.
provider (str): The provider of the lyric stream (ex: com.plexapp.agents.lyricfind).
timed (bool): True if the lyrics are timed to the track.
"""
TAG = 'Stream'
STREAMTYPE = 4
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(LyricStream, self)._loadData(data)
self.format = data.attrib.get('format')
self.minLines = cast(int, data.attrib.get('minLines'))
self.provider = data.attrib.get('provider')
self.timed = cast(bool, data.attrib.get('timed', '0'))
@utils.registerPlexObject
class Session(PlexObject):
""" Represents a current session.
Attributes:
TAG (str): 'Session'
id (str): The unique identifier for the session.
bandwidth (int): The Plex streaming brain reserved bandwidth for the session.
location (str): The location of the session (lan, wan, or cellular)
"""
TAG = 'Session'
def _loadData(self, data):
self.id = data.attrib.get('id')
self.bandwidth = utils.cast(int, data.attrib.get('bandwidth'))
self.location = data.attrib.get('location')
@utils.registerPlexObject
class TranscodeSession(PlexObject):
""" Represents a current transcode session.
Attributes:
TAG (str): 'TranscodeSession'
audioChannels (int): The number of audio channels of the transcoded media.
audioCodec (str): The audio codec of the transcoded media.
audioDecision (str): The transcode decision for the audio stream.
complete (bool): True if the transcode is complete.
container (str): The container of the transcoded media.
context (str): The context for the transcode sesson.
duration (int): The duration of the transcoded media in milliseconds.
height (int): The height of the transcoded media in pixels.
key (str): API URL (ex: /transcode/sessions/<id>).
maxOffsetAvailable (float): Unknown.
minOffsetAvailable (float): Unknown.
progress (float): The progress percentage of the transcode.
protocol (str): The protocol of the transcode.
remaining (int): Unknown.
size (int): The size of the transcoded media in bytes.
sourceAudioCodec (str): The audio codec of the source media.
sourceVideoCodec (str): The video codec of the source media.
speed (float): The speed of the transcode.
subtitleDecision (str): The transcode decision for the subtitle stream
throttled (bool): True if the transcode is throttled.
timestamp (int): The epoch timestamp when the transcode started.
transcodeHwDecoding (str): The hardware transcoding decoder engine.
transcodeHwDecodingTitle (str): The title of the hardware transcoding decoder engine.
transcodeHwEncoding (str): The hardware transcoding encoder engine.
transcodeHwEncodingTitle (str): The title of the hardware transcoding encoder engine.
transcodeHwFullPipeline (str): True if hardware decoding and encoding is being used for the transcode.
transcodeHwRequested (str): True if hardware transcoding was requested for the transcode.
videoCodec (str): The video codec of the transcoded media.
videoDecision (str): The transcode decision for the video stream.
width (str): The width of the transcoded media in pixels.
"""
TAG = 'TranscodeSession'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.audioChannels = cast(int, data.attrib.get('audioChannels'))
self.audioCodec = data.attrib.get('audioCodec')
self.audioDecision = data.attrib.get('audioDecision')
self.complete = cast(bool, data.attrib.get('complete', '0'))
self.container = data.attrib.get('container')
self.context = data.attrib.get('context')
self.duration = cast(int, data.attrib.get('duration'))
self.height = cast(int, data.attrib.get('height'))
self.key = data.attrib.get('key')
self.maxOffsetAvailable = cast(float, data.attrib.get('maxOffsetAvailable'))
self.minOffsetAvailable = cast(float, data.attrib.get('minOffsetAvailable'))
self.progress = cast(float, data.attrib.get('progress'))
self.protocol = data.attrib.get('protocol')
self.remaining = cast(int, data.attrib.get('remaining'))
self.size = cast(int, data.attrib.get('size'))
self.sourceAudioCodec = data.attrib.get('sourceAudioCodec')
self.sourceVideoCodec = data.attrib.get('sourceVideoCodec')
self.speed = cast(float, data.attrib.get('speed'))
self.subtitleDecision = data.attrib.get('subtitleDecision')
self.throttled = cast(bool, data.attrib.get('throttled', '0'))
self.timestamp = cast(float, data.attrib.get('timeStamp'))
self.transcodeHwDecoding = data.attrib.get('transcodeHwDecoding')
self.transcodeHwDecodingTitle = data.attrib.get('transcodeHwDecodingTitle')
self.transcodeHwEncoding = data.attrib.get('transcodeHwEncoding')
self.transcodeHwEncodingTitle = data.attrib.get('transcodeHwEncodingTitle')
self.transcodeHwFullPipeline = | |
"""
# set function name (cannot break function here)
_ = str(__NAME__) + '.Keyword.validate()'
# deal with no test value (use value set at module level)
if test_value is None:
value = self.value
else:
value = test_value
# deal with no source
if source is None:
source = self.source
# get true value (and test test_value)
vargs = [self.name, self.dtype, value, self.dtypei, self.options,
self.maximum, self.minimum, ]
vkwargs = dict(quiet=quiet, source=source)
true_value, self.source = _validate_value(*vargs, **vkwargs)
# deal with no comment
if self.comment is None:
self.comment = ''
# need a key
if self.key is None:
emsg = 'Keyword "{0}" must have a key'
raise ConfigError(emsg.format(self.name), level='error')
# construct true value as keyword store
true_value = [self.key, true_value, self.comment]
# deal with storing
if test_value is None:
self.true_value = true_value
return True
else:
return true_value
def copy(self, source=None):
"""
Shallow copy of keyword instance
:param source: str, the code/recipe in which keyword instance was
copied (required for use)
:type source: str
:return: Keyword, a shallow copy of the keyword
:rtype: Keyword
:raises ConfigError: if source is None
"""
# set function name (cannot break function here)
func_name = str(__NAME__) + '.Keyword.copy()'
# get display text
textentry = DisplayText()
# check that source is valid
if source is None:
raise ConfigError(textentry('00-003-00008', args=[func_name]),
level='error')
# return new copy of Const
return Keyword(self.name, self.key, self.value, self.dtype,
self.comment, self.options, self.maximum,
self.minimum, source=source, unit=self.unit,
default=self.default, datatype=self.datatype,
dataformat=self.dataformat, group=self.group,
author=self.author, parent=self.parent)
class DisplayText:
"""
Manually enter wlog TextEntries here -- will be in english only
This is used for when we cannot have access to the language database
"""
def __init__(self):
"""
Constructs the manual language database (into `self.entries`)
"""
# set function name (cannot break here --> no access to inputs)
_ = __NAME__ + '._DisplayText.__init__()'
# get the entries from module
self.entries = drs_lang_db.get_entries()
def __call__(self, key, args=None):
"""
When constructed this call method acts like a TextEntry instance,
returning a string that can be used in WLOG and is formatted by
arguments `args`
:param key: str, the key code from the language database
(i.e. 00-001-00001)
:param args: list of objects, if there is formating in entry this
is how arguments are supplied i.e.
`'LOG MESSAGE {0}: Message = {1}'.format(*args)`
:type key: str
:type args: list[objects]
:return: returns string
:rtype: str
"""
# set function name (cannot break here --> no access to inputs)
_ = str(__NAME__) + '._DisplayText.__init__()'
# return the entry for key with the arguments used for formatting
if args is not None:
return self.entries[key].format(*args)
# else just return the entry
else:
return self.entries[key]
# =============================================================================
# Define functions
# =============================================================================
def generate_consts(modulepath):
"""
Get all Const and Keyword instances from a module - basically load
constants from a python file
:param modulepath: str, the module name and location
:type modulepath: str
:return: the keys (Const/Keyword names) and their respective instances
:rtype: tuple[list[str], list[Const, Keyword]]
:raises ConfigError: if module name is not valid
"""
# set function name (cannot break here --> no access to inputs)
func_name = str(__NAME__) + '.generate_consts()'
# import module
mod = import_module(func_name, modulepath)
# get keys and values
keys, values = list(mod.__dict__.keys()), list(mod.__dict__.values())
# storage for all values
all_list, new_keys, new_values = [], [], []
# loop around keys
for it in range(len(keys)):
# get this iterations values
key, value = keys[it], values[it]
# skip any that do not have a "kind" attribute
if not hasattr(value, "kind"):
continue
# check the value of "kind"
if value.kind not in ['Const', 'Keyword']:
continue
# now append to list
new_keys.append(key)
new_values.append(value)
# return
return new_keys, new_values
def import_module(func, modulepath, full=False, quiet=False):
"""
Import a module given a module path
:param func: str, the function where import_module was called
:param modulepath: str, the
:param full: bool, if True, assumes modulepath is the full path
:param quiet: bool, if True raises a ValueError instead of a ConfigError
:type func: str
:type modulepath: str
:type full: bool
:type quiet: bool
:raises: ConfigError - if module path is not valid (and quiet=False)
:raises: ValueError - if module path is not valid (and quiet=True)
:return: the imported module instance
"""
# set function name (cannot break here --> no access to inputs)
if func is None:
func_name = str(__NAME__) + '.import_module()'
else:
func_name = str(func)
# get display text
textentry = DisplayText()
# deal with getting module
if full:
modfile = modulepath
moddir = ''
else:
# get module name and directory
modfile = os.path.basename(modulepath).replace('.py', '')
moddir = os.path.dirname(modulepath)
# import module
try:
if modfile in sys.modules:
del sys.modules[modfile]
if not full:
sys.path.insert(0, moddir)
mod = importlib.import_module(modfile)
if not full:
sys.path.remove(moddir)
# return
return mod
except Exception as e:
string_trackback = traceback.format_exc()
# report error
eargs = [modfile, moddir, func_name, type(e), e, str(string_trackback)]
# deal with quiet return vs normal return
if quiet:
raise ValueError(textentry('00-000-00003', args=eargs))
else:
raise ConfigError(textentry('00-000-00003', args=eargs),
level='error')
def get_constants_from_file(filename):
"""
Read config file and convert to key, value pairs
comments have a '#' at the start
format of variables: key = value
:param filename: string, the filename (+ absolute path) of file to open
:type: str
:return keys: list of strings, upper case strings for each variable
:return values: list of strings, value of each key
:raises ConfigError: if there is a profile read constants from file
"""
# set function name (cannot break here --> no access to inputs)
_ = str(__NAME__) + '.get_constants_from_file()'
# first try to reformat text file to avoid weird characters
# (like mac smart quotes)
_validate_text_file(filename)
# read raw config file as strings
raw = _get_raw_txt(filename, comments='#', delimiter='=')
# check that we have lines in config file
if len(raw) == 0:
return [], []
elif len(raw.shape) == 1:
single = True
else:
single = False
# check that we have opened config file correctly
try:
# check how many rows we have
lraw = raw.shape[0]
except TypeError:
return [], []
# loop around each variable (key and value pairs)
if single:
key = raw[0].strip().strip("'").strip('"')
value = raw[1].strip().strip("'").strip('"')
keys = [key]
values = [value]
else:
keys, values = [], []
for row in range(lraw):
# remove whitespaces and quotation marks from start/end
key = raw[row, 0].strip().strip("'").strip('"')
value = raw[row, 1].strip().strip("'").strip('"')
# add key.upper() to keys
keys.append(key.upper())
# add value to values
values.append(value)
# return keys and values
return keys, values
def update_file(filename, dictionary):
"""
Updates a config/constants file with key/value pairs in the `dictionary`
If key not found in config/constants file does not add key/value to file
:param filename: str, the config/constants file (absolute path)
:param dictionary: dict, the dictionary containing the key/value pairs
to update in the config/constants file
:type filename: str
:type dictionary: dict
:return: None
:raises ConfigError: if we cannot read filename
"""
# set function name (cannot break here --> no access to inputs)
func_name = str(__NAME__) + '.update_file()'
# get display text
textentry = DisplayText()
# open file
try:
# read the lines
with open(filename, 'r') as f:
lines = f.readlines()
except Exception as e:
eargs = [filename, func_name, type(e), e]
raise ConfigError(textentry('00-004-00003', args=eargs),
level='error')
# convert lines to char array
clines = np.char.array(lines).strip()
# loop through keys in dictionary
for key in dictionary:
# get value
value = dictionary[key]
# create replacement string
rstring = '{0} = {1}\n'.format(key, value)
# find any line that starts with
mask = clines.startswith(key + ' = ')
# if we have this entry update it
if np.sum(mask) > 0:
# get line numbers
linenumbers = np.where(mask)[0]
# loop around line numbers and replace
for linenumber in linenumbers:
lines[linenumber] = rstring
# open file
try:
# write the lines
with open(filename, 'w') as f:
f.writelines(lines)
except Exception as e:
eargs = [filename, func_name, type(e), e]
raise ConfigError(textentry('00-004-00004', args=eargs),
level='error')
def textwrap(input_string, length):
"""
Wraps the text `input_string` to the length of `length` new lines are
indented with a tab
Modified version of this: https://stackoverflow.com/a/16430754
| |
<gh_stars>10-100
# avocado script augmenting spcc - requires spcc_sn_data.txt file
import numpy as np
import pandas as pd
import astronomical_object
import augment
from scipy.special import erf
import string
import math
from .instruments import band_central_wavelengths
from .utils import settings, logger
class SPCC_SN_data:
"""Metadata for the SPCC dataset"""
def __init__(self):
# load spcc sn data
spcc_sn_data = pd.read_csv('../data/spcc_sn_data.txt',delimiter=' ', names=['spcc_names', 'spcc_types', 'spcc_mags', 'spcc_photo_z', 'spcc_photo_z_err', 'spcc_spec_z'])
self.spcc_names = spcc_sn_data['spcc_names'].values
self.spcc_types = spcc_sn_data['spcc_types'].values
self.spcc_mags = spcc_sn_data['spcc_mags'].values
self.spcc_photo_z = spcc_sn_data['spcc_photo_z'].values
self.spcc_photo_z_err = spcc_sn_data['spcc_photo_z_err'].values
self.spcc_spec_z = spcc_sn_data['spcc_spec_z'].values
self.binned_data = None
def get_binned_data(self):
"""Bin SPCC r-magnitudes by redshift to later check whether augmented
objects are similar and likely to cover the same parameter space
Returns
-------
binned_data : 2d list
SPCC magnitudes binned by redshift
"""
if self.binned_data is None:
spcc_z_bins = [[] for i in range(11)]
# there's probably a better way to do this, but this works and is fast
for i,z in enumerate(self.spcc_spec_z):
if float(z) <= 0.1:
spcc_z_bins[0].append(self.spcc_mags[i])
elif float(z) <= 0.2:
spcc_z_bins[1].append(self.spcc_mags[i])
elif float(z) <= 0.3:
spcc_z_bins[2].append(self.spcc_mags[i])
elif float(z) <= 0.4:
spcc_z_bins[3].append(self.spcc_mags[i])
elif float(z) <= 0.5:
spcc_z_bins[4].append(self.spcc_mags[i])
elif float(z) <= 0.6:
spcc_z_bins[5].append(self.spcc_mags[i])
elif float(z) <= 0.7:
spcc_z_bins[6].append(self.spcc_mags[i])
elif float(z) <= 0.8:
spcc_z_bins[7].append(self.spcc_mags[i])
elif float(z) <= 0.9:
spcc_z_bins[8].append(self.spcc_mags[i])
elif float(z) <= 1.0:
spcc_z_bins[9].append(self.spcc_mags[i])
elif float(z) <= 1.1:
spcc_z_bins[10].append(self.spcc_mags[i])
# remove outlier type ii supernova,which skews distribution of
#augmented objects - as this is a strange light curve with only
#a couple very faint points in r band, seen in plot of r-mag vs z
spcc_z_bins[2].remove(27.119)
self.binned_data = spcc_z_bins
return self.binned_data
def get_augmentation_list(self, training_list, add_faint=False, training_faint_list=''):
"""Read list of SPCC training objects to augment from a file
If already created augmented objects from a sample (e.g. mag-limited),
can augment additional supernovae (e.g. a fainter sample)
Parameters
----------
training_list : string
Directory of file containing list of SPCC objects to augment
add_faint : bool
If true, need to specify training_faint_list, for augmenting just
additional supernovae not included in the original training_list
training_faint_list : string
Directory of file with list of training objects including fainter sample
Returns
-------
training_objects : numpy.array
Array of objects for augmenting
"""
training_objects = np.loadtxt(training_list, dtype=str)
if add_faint:
# read list which comprises training_objects and additional objects
training_add_faint = np.loadtxt(training_faint_list, dtype=str)
# get just the additional objects
faint_objs = np.setdiff1d(training_add_faint, training_objects)
# redefine training_objects array so that we only use this list
training_objects = np.asarray(faint_objs)
return training_objects
def get_photoz_reference(self):
"""Get the reference for photo-z estimation from the SPCC dataset
Returns
-------
photoz_reference : numpy ndarray
Nx3 array with reference photo-zs for each entry with a spec-z.
The columns are spec-z, photo-z, photo-z error
"""
self.photoz_reference = np.vstack([self.spcc_spec_z, self.spcc_photo_z, self.spcc_photo_z_err]).T
return self.photoz_reference
def load_reference_object(self, sn):
"""Load reference object to augment from
Parameters
----------
sn : string
Original SPCC supernova ID
Returns
-------
reference_object : :class:'AstronomicalObject'
The object to be used as a reference for augmentation
"""
sn_index = list(self.spcc_names).index(sn)
obj_mag = self.spcc_mags[sn_index]
photo_z = self.spcc_photo_z[sn_index]
photo_z_err = self.spcc_photo_z_err[sn_index]
spec_z = self.spcc_spec_z[sn_index]
sn_type = self.spcc_types[sn_index]
mjd = []
flt = []
flux = []
flux_err = []
# it should be specified here where the SPCC data is stored
#with open('../../SIMGEN_PUBLIC_DES/'+sn, 'r') as f:
with open('/dir/to/data/SIMGEN_PUBLIC_DES/'+sn, 'r') as f:
obj_name = str(sn)
for line in f:
line_list = line.split()
if 'OBS:' in line and len(line_list)>2:
mjd.append(float(line_list[1]))
flt.append('des'+line_list[2])
flux.append(float(line_list[4]))
flux_err.append(float(line_list[5]))
obs_dict = {'time': mjd, 'band': flt, 'flux': flux, 'flux_error': flux_err}
observations = pd.DataFrame(obs_dict)
metadata = {'object_id': obj_name, 'object_r_mag': obj_mag, 'host_photoz': photo_z, 'host_photoz_error': photo_z_err, 'host_specz': spec_z, 'redshift': spec_z, 'class': sn_type}
reference_object = astronomical_object.AstronomicalObject(metadata, observations)
return reference_object
def passed_criteria(self, augmented_object):
"""Make sure augmented object passes criteria; there should be
multiple r band observations with positive peak flux and it should
exist within the magnitude and redshift bounds of the dataset
Parameters
----------
augmented_object : :class:'AstronomicalObject'
Created augmented object
Returns
-------
bool
True or False for passing criteria
"""
r_fluxes = []
for i,flt in enumerate(augmented_object.observations['band']):
if flt=='desr':
r_fluxes.append(augmented_object.observations['flux'][i])
if len(r_fluxes)==0:
return False
max_flux = np.amax(r_fluxes)
if max_flux<0:
return False
r_mag = -2.5*math.log10(max_flux)+27.5 # zero point from sn data
augmented_object.metadata['object_r_mag'] = r_mag
augmented_z = augmented_object.metadata['host_specz']
# remove augmented objects outside z-mag spaces.
#in reality we will have mag and z info
binned_data = self.get_binned_data()
for i,z_bin in enumerate(binned_data):
if augmented_z > i/10 and augmented_z <= (i/10+0.1):
if r_mag > np.amin(z_bin) and r_mag <= np.amax(z_bin):
return True
else:
return False
else:
if augmented_z > (1.1):
return False
def save_augmented_object(self, augmented_object, augment_dir):
"""Create .DAT file of the augmented light curve that includes
the necessary info for photometric classification, following
the same formatting used in the original SPCC
Parameters
----------
augmented_object : :class:'AstronomicalObject'
Created augmented object
augment_dir : string
Directory in which to save augmented object
"""
augmented_mjd = augmented_object.observations['time']
augmented_flux = augmented_object.observations['flux']
augmented_flux_err = augmented_object.observations['flux_error']
augmented_bands = augmented_object.observations['band']
renamed_augmented_bands = []
for filt in augmented_bands:
renamed_augmented_bands.append(filt.split('des')[1])
augmented_z = str(augmented_object.metadata['host_specz'])
obj_class = augmented_object.metadata['class']
obj_id = augmented_object.metadata['object_id']
r_mag = str(augmented_object.metadata['object_r_mag'])
aug_obj_file = open(augment_dir+'/'+obj_id, 'w')
aug_obj_file.write('SIM_REDSHIFT: '+augmented_z+'\n')
aug_obj_file.write('SIM_COMMENT: '+'SN Type = '+obj_class+'\n')
aug_obj_file.write('R-MAG = '+r_mag+'\n')
for i,val in enumerate(augmented_mjd):
aug_obj_file.write('OBS: '+str(val)+' '+str(renamed_augmented_bands[i])+' '+'0'+' '+str(augmented_flux[i])+' '+str(augmented_flux_err[i])+'\n')
aug_obj_file.close()
class SpccAugmentor(augment.Augmentor):
"""Subclass of the avocado augmentor for the SPCC dataset.
Most methods implemented here are changed slightly to the original
ones in avocado
"""
def __init__(self, **cosmology_kwargs):
super().__init__()
def _augment_redshift(self, reference_object, augmented_metadata):
"""Choose a new redshift and simulate the photometric redshift for an
augmented object
Parameters
==========
reference_object : :class:`AstronomicalObject`
The object to use as a reference for the augmentation.
augmented_metadata : dict
The augmented metadata to add the new redshift too. This will be
updated in place.
"""
# no galactic objects (z=0) in spcc
# Choose a new redshift based on the reference template redshift.
template_redshift = reference_object.metadata["redshift"]
# First, we limit the redshift range as a multiple of the original
# redshift. We avoid making templates too much brighter because
# the lower redshift templates will be left with noise that is
# unrealistic. We also avoid going to too high of a relative
# redshift because the templates there will be too faint to be
# detected and the augmentor will waste a lot of time without being
# able to actually generate a template.
min_redshift = 0.95 * template_redshift
max_redshift = 5 * template_redshift
# Second, for high-redshift objects, we add a constraint to make
# sure that we aren't evaluating the template at wavelengths where
# the GP extrapolation is unreliable.
max_redshift = np.min([max_redshift, 1.5 * (1 + template_redshift) - 1])
# Choose new redshift from a log-uniform distribution over the
# allowable redshift range.
aug_redshift = np.exp(np.random.uniform(np.log(min_redshift), np.log(max_redshift)))
# Simulate a new photometric redshift
aug_photoz, aug_photoz_error = self._simulate_photoz(aug_redshift)
aug_distmod = self.cosmology.distmod(aug_photoz).value
augmented_metadata["redshift"] = aug_redshift
augmented_metadata["host_specz"] = aug_redshift
augmented_metadata["host_photoz"] = aug_photoz
augmented_metadata["host_photoz_error"] = aug_photoz_error
augmented_metadata["augment_brightness"] = 0.0
def _augment_metadata(self, reference_object):
"""Generate new metadata for the augmented object.
Parameters
==========
reference_object : :class:`AstronomicalObject`
The object to use as a reference for the augmentation.
Returns
=======
augmented_metadata : dict
The augmented metadata
"""
# no need for ddfs in spcc, or mwebv etc since we aren't using this for
#classification, just making new objects, so just make copy to start from
augmented_metadata = reference_object.metadata.copy()
self._augment_redshift(reference_object, augmented_metadata)
return augmented_metadata
def _simulate_photoz(self, redshift):
"""Simulate the photoz determination for a lightcurve using the test
set as a reference.
I apply the observed differences between photo-zs and spec-zs directly
to the new redshifts. This does not capture all of the intricacies of
photo-zs, but it does ensure that we cover all of the available
parameter space with at least some simulations.
Parameters
----------
redshift : float
The new true redshift of the object.
Returns
-------
host_photoz : float
The simulated photoz of the host.
host_photoz_error : float
The simulated photoz error of the host.
| |
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-maxExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-maxExclusive-2-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMaxExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_max_exclusive_nistxml_sv_iv_atomic_non_positive_integer_max_exclusive_1_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet maxExclusive
with value -999999999999999998.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-maxExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-maxExclusive-1-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMaxExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_4_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_5_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value 0.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-5-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_4_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -911248228325171715.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-4-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_4_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -911248228325171715.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-4-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_4_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -911248228325171715.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-4-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_4_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -911248228325171715.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-4-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_4_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -911248228325171715.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-4-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_3_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -214379312213180406.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-3-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_3_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -214379312213180406.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-3-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_3_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -214379312213180406.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-3-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_3_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -214379312213180406.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-3-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_3_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -214379312213180406.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-3-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_2_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -927820889571802863.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-2-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_2_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -927820889571802863.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-2-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_2_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -927820889571802863.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-2-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_2_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -927820889571802863.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-2-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_2_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -927820889571802863.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-2-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_1_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-1-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_1_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-1-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_1_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-1-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_1_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-1-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_inclusive_nistxml_sv_iv_atomic_non_positive_integer_min_inclusive_1_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minInclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minInclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minInclusive-1-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_4_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_5_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -1.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-5-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_4_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -594976296252018754.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-4-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_4_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -594976296252018754.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-4-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_4_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -594976296252018754.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-4-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_4_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -594976296252018754.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-4-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_4_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -594976296252018754.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-4-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_3_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -406392790344449528.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-3-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_3_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -406392790344449528.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-3-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_3_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -406392790344449528.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-3-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_3_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -406392790344449528.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-3-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_2_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_3_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -406392790344449528.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-3-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_2_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -482054947069493477.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-2-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_2_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -482054947069493477.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-2-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_2_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -482054947069493477.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-2-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_2_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -482054947069493477.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-2-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_1_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_2_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -482054947069493477.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-2-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_1_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-1-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_1_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-1-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_1_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-1-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_1_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-1-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_min_exclusive_nistxml_sv_iv_atomic_non_positive_integer_min_exclusive_1_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet minExclusive
with value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-minExclusive-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-minExclusive-1-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_white_space_nistxml_sv_iv_atomic_integer_white_space_1_1(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-whiteSpace-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-whiteSpace-1-1.xml",
class_name="NistschemaSvIvAtomicIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_white_space_nistxml_sv_iv_atomic_integer_white_space_1_2(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-whiteSpace-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-whiteSpace-1-2.xml",
class_name="NistschemaSvIvAtomicIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_white_space_nistxml_sv_iv_atomic_integer_white_space_1_3(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-whiteSpace-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-whiteSpace-1-3.xml",
class_name="NistschemaSvIvAtomicIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_white_space_nistxml_sv_iv_atomic_integer_white_space_1_4(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-whiteSpace-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-whiteSpace-1-4.xml",
class_name="NistschemaSvIvAtomicIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_white_space_nistxml_sv_iv_atomic_integer_white_space_1_5(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-whiteSpace-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-whiteSpace-1-5.xml",
class_name="NistschemaSvIvAtomicIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_enumeration_4_nistxml_sv_iv_atomic_integer_enumeration_5_1(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-enumeration-5.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-enumeration-5-1.xml",
class_name="NistschemaSvIvAtomicIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_enumeration_4_nistxml_sv_iv_atomic_integer_enumeration_5_2(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-enumeration-5.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-enumeration-5-2.xml",
class_name="NistschemaSvIvAtomicIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_enumeration_4_nistxml_sv_iv_atomic_integer_enumeration_5_3(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-enumeration-5.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-enumeration-5-3.xml",
class_name="NistschemaSvIvAtomicIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_enumeration_4_nistxml_sv_iv_atomic_integer_enumeration_5_4(mode, save_output, output_format):
"""
Type | |
{
'resource_registry': stack_reg,
}
env = {
'resource_registry': env_reg,
'parameter_defaults': {'NetworkConfigWithAnsible': True}
}
utils.check_nic_config_with_ansible(mock_stack, env)
def test_check_heat_network_config_no_ansible(self):
stack_reg = {
'OS::TripleO::Controller::Net::SoftwareConfig': 'val',
'OS::TripleO::Compute::Net::SoftwareConfig': 'val',
}
env_reg = {
'OS::TripleO::Controller::Net::SoftwareConfig': 'val',
'OS::TripleO::Compute::Net::SoftwareConfig': 'val',
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'resource_registry': stack_reg,
}
env = {
'resource_registry': env_reg,
'parameter_defaults': {'NetworkConfigWithAnsible': False}
}
utils.check_nic_config_with_ansible(mock_stack, env)
def test_check_stack_network_matches_env_files(self):
stack_reg = {
'OS::TripleO::Network': 'val',
'OS::TripleO::Network::External': 'val',
'OS::TripleO::Network::ExtraConfig': 'OS::Heat::None',
'OS::TripleO::Network::InternalApi': 'val',
'OS::TripleO::Network::Port::InternalApi': 'val',
'OS::TripleO::Network::Management': 'val',
'OS::TripleO::Network::Storage': 'val',
'OS::TripleO::Network::StorageMgmt': 'val',
'OS::TripleO::Network::Tenant': 'val'
}
env_reg = {
'OS::TripleO::Network': 'newval',
'OS::TripleO::Network::External': 'newval',
'OS::TripleO::Network::ExtraConfig': 'OS::Heat::None',
'OS::TripleO::Network::InternalApi': 'newval',
'OS::TripleO::Network::Management': 'newval',
'OS::TripleO::Network::Storage': 'val',
'OS::TripleO::Network::StorageMgmt': 'val',
'OS::TripleO::Network::Tenant': 'val'
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'resource_registry': stack_reg
}
env = {
'resource_registry': env_reg
}
utils.check_stack_network_matches_env_files(mock_stack, env)
def test_check_stack_network_matches_env_files_fail(self):
stack_reg = {
'OS::TripleO::LoggingConfiguration': 'val',
'OS::TripleO::Network': 'val',
'OS::TripleO::Network::External': 'val',
'OS::TripleO::Network::ExtraConfig': 'OS::Heat::None',
'OS::TripleO::Network::InternalApi': 'val',
'OS::TripleO::Network::Port::InternalApi': 'val',
'OS::TripleO::Network::Management': 'val',
'OS::TripleO::Network::Storage': 'val',
'OS::TripleO::Network::StorageMgmt': 'val',
'OS::TripleO::Network::Tenant': 'val'
}
env_reg = {
'OS::TripleO::LoggingConfiguration': 'newval',
'OS::TripleO::Network': 'newval',
'OS::TripleO::Network::InternalApi': 'newval'
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'resource_registry': stack_reg
}
env = {
'resource_registry': env_reg
}
with self.assertRaises(exceptions.InvalidConfiguration):
utils.check_stack_network_matches_env_files(mock_stack, env)
def test_check_ceph_fsid_matches_env_files(self):
stack_params = {
'CephClusterFSID': 'ceph_fsid_val',
'key1': 'val1',
'key2': 'val2',
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'parameter_defaults': stack_params
}
provided_env = {
'parameter_defaults': {
'CephClusterFSID': mock_stack.environment()
.get('parameter_defaults', {})
.get('CephClusterFSID', False),
'key1': 'val1',
'key2': 'val2',
}
}
utils.check_ceph_fsid_matches_env_files(mock_stack, provided_env)
def test_check_ceph_fsid_matches_env_files_fail(self):
stack_params = {
'CephClusterFSID': 'ceph_fsid_val',
'key1': 'val1',
'key2': 'val2',
}
provided_env = {
'parameter_defaults': {
'CephClusterFSID': 'new_or_wrong_fsid_val',
'key1': 'val1',
'key2': 'val2',
}
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'parameter_defaults': stack_params
}
with self.assertRaises(exceptions.InvalidConfiguration):
utils.check_ceph_fsid_matches_env_files(mock_stack, provided_env)
def test_check_ceph_ansible(self):
res_reg = {
'resource_registry': {
'OS::Tripleo::Services::CephMon': '/path/to/ceph-ansible.yml',
}
}
utils.check_ceph_ansible(res_reg.get('resource_registry', {}),
'UpgradePrepare')
utils.check_ceph_ansible(res_reg.get('resource_registry', {}),
'UpgradeConverge')
def test_check_ceph_ansible_fail(self):
res_reg = {
'resource_registry': {
'OS::Tripleo::Services::CephMon': '/path/to/ceph-ansible.yml',
}
}
with self.assertRaises(exceptions.InvalidConfiguration):
utils.check_ceph_ansible(res_reg.get('resource_registry', {}),
'DeployOvercloud')
def test_check_swift_and_rgw(self):
stack_reg = {
'OS::TripleO::Services::SwiftProxy': 'OS::Heat::None',
}
env_reg = {
'OS::TripleO::Services::CephRgw': 'val',
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'resource_registry': stack_reg,
}
env = {
'resource_registry': env_reg,
}
utils.check_swift_and_rgw(mock_stack, env, 'UpgradePrepare')
def test_check_swift_and_rgw_fail(self):
stack_reg = {
'OS::TripleO::Services::SwiftProxy': 'val',
}
env_reg = {
'OS::TripleO::Services::CephRgw': 'val',
}
mock_stack = mock.MagicMock()
mock_stack.environment = mock.MagicMock()
mock_stack.environment.return_value = {
'resource_registry': stack_reg,
}
env = {
'resource_registry': env_reg,
}
with self.assertRaises(exceptions.InvalidConfiguration):
utils.check_swift_and_rgw(mock_stack, env, 'UpgradePrepare')
@mock.patch('subprocess.check_call')
@mock.patch('os.path.exists')
def test_remove_known_hosts(self, mock_exists, mock_check_call):
mock_exists.return_value = True
utils.remove_known_hosts('192.168.0.1')
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
mock_check_call.assert_called_with(
['ssh-keygen', '-R', '192.168.0.1', '-f', known_hosts])
@mock.patch('subprocess.check_call')
@mock.patch('os.path.exists')
def test_remove_known_hosts_no_file(self, mock_exists, mock_check_call):
mock_exists.return_value = False
utils.remove_known_hosts('192.168.0.1')
mock_check_call.assert_not_called()
def test_empty_file_checksum(self):
# Used a NamedTemporaryFile since it's deleted when the file is closed.
with tempfile.NamedTemporaryFile() as empty_temp_file:
self.assertEqual(
utils.file_checksum(empty_temp_file.name),
(
'cf83e1357eefb8bdf1542850d66d8007'
'd620e4050b5715dc83f4a921d36ce9ce47'
'd0d13c5d85f2b0ff8318d2877eec2f63b'
'931bd47417a81a538327af927da3e'))
def test_non_empty_file_checksum(self):
# Used a NamedTemporaryFile since it's deleted when the file is closed.
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(b'foo')
temp_file.flush()
self.assertEqual(
utils.file_checksum(temp_file.name),
(
'f7fbba6e0636f890e56fbbf3283e52'
'4c6fa3204ae298382d624741d0dc663'
'8326e282c41be5e4254d8820772c55'
'18a2c5a8c0c7f7eda19594a7eb539453e1ed7'))
def test_non_empty_file_checksum_SHA256(self):
"""Test 'file_checksum' function with an alternative algorithm.
"""
# Used a NamedTemporaryFile since it's deleted when the file is closed.
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(b'foo')
temp_file.flush()
self.assertEqual(
utils.file_checksum(temp_file.name, 'sha256'),
(
'2c26b46b68ffc68ff99b453c1d304134'
'13422d706483bfa0f98a5e886266e7ae'))
def test_non_empty_file_checksum_non_compliant(self):
"""Test 'file_checksum' function with an alternative algorithm
that isn't permitted by the FIPS.
"""
# Used a NamedTemporaryFile since it's deleted when the file is closed.
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(b'foo')
temp_file.flush()
self.assertRaises(RuntimeError, utils.file_checksum,
temp_file.name, 'md5')
def test_shouldnt_checksum_open_special_files(self):
self.assertRaises(ValueError, utils.file_checksum, '/dev/random')
self.assertRaises(ValueError, utils.file_checksum, '/dev/zero')
class TestEnsureRunAsNormalUser(TestCase):
@mock.patch('os.geteuid')
def test_ensure_run_as_normal_user(self, os_geteuid_mock):
os_geteuid_mock.return_value = 1000
self.assertIsNone(utils.ensure_run_as_normal_user())
@mock.patch('os.geteuid')
def test_ensure_run_as_normal_user_root(self, os_geteuid_mock):
os_geteuid_mock.return_value = 0
self.assertRaises(exceptions.RootUserExecution,
utils.ensure_run_as_normal_user)
@mock.patch('getpass.getuser')
def test_get_deployment_user(self, mock_getpass):
mock_getpass.return_value = 'stack'
u = utils.get_deployment_user()
self.assertEqual('stack', u)
class TestCreateTempestDeployerInput(TestCase):
def test_create_tempest_deployer_input(self):
with tempfile.NamedTemporaryFile() as cfgfile:
filepath = cfgfile.name
utils.create_tempest_deployer_input(filepath)
with open(filepath, 'rt') as f:
cfg = f.read()
# Just make a simple test, to make sure it created a proper file:
self.assertIn(
'[volume-feature-enabled]\nbootable = true', cfg)
class TestGetStackOutputItem(TestCase):
def test_get_stack_output_item(self):
stack = mock.MagicMock()
emap = {'KeystonePublic': {'uri': 'http://foo:8000/'}}
stack.to_dict.return_value = {
'outputs': [{'output_key': 'EndpointMap',
'output_value': emap}]
}
endpoint_map = utils.get_stack_output_item(stack, 'EndpointMap')
self.assertEqual(endpoint_map,
{'KeystonePublic': {'uri': 'http://foo:8000/'}})
def test_get_stack_output_item_not_found(self):
stack = mock.MagicMock()
stack.to_dict.return_value = {
'outputs': [{'output_key': 'foo',
'output_value': 'bar'}]
}
val = utils.get_stack_output_item(stack, 'baz')
self.assertEqual(val, None)
def test_get_stack_output_item_no_stack(self):
stack = None
val = utils.get_stack_output_item(stack, 'baz')
self.assertEqual(val, None)
class TestGetEndpointMap(TestCase):
def test_get_endpoint_map(self):
stack = mock.MagicMock()
emap = {'KeystonePublic': {'uri': 'http://foo:8000/'}}
stack.to_dict.return_value = {
'outputs': [{'output_key': 'EndpointMap',
'output_value': emap}]
}
endpoint_map = utils.get_endpoint_map(stack)
self.assertEqual(endpoint_map,
{'KeystonePublic': {'uri': 'http://foo:8000/'}})
class TestNodeGetCapabilities(TestCase):
def test_with_capabilities(self):
node = mock.Mock(properties={'capabilities': 'x:y,foo:bar'})
self.assertEqual({'x': 'y', 'foo': 'bar'},
utils.node_get_capabilities(node))
def test_no_capabilities(self):
node = mock.Mock(properties={})
self.assertEqual({}, utils.node_get_capabilities(node))
class TestNodeAddCapabilities(TestCase):
def test_add(self):
bm_client = mock.Mock()
node = mock.Mock(uuid='uuid1', properties={})
new_caps = utils.node_add_capabilities(bm_client, node, x='y')
bm_client.node.update.assert_called_once_with(
'uuid1', [{'op': 'add', 'path': '/properties/capabilities',
'value': 'x:y'}])
self.assertEqual('x:y', node.properties['capabilities'])
self.assertEqual({'x': 'y'}, new_caps)
class TestAssignVerifyProfiles(TestCase):
def setUp(self):
super(TestAssignVerifyProfiles, self).setUp()
self.bm_client = mock.Mock(spec=['node'],
node=mock.Mock(spec=['list', 'update']))
self.nodes = []
self.bm_client.node.list.return_value = self.nodes
self.flavors = {name: (fakes.FakeFlavor(name), 1)
for name in ('compute', 'control')}
def _get_fake_node(self, profile=None, possible_profiles=[],
provision_state='available'):
caps = {'%s_profile' % p: '1'
for p in possible_profiles}
if profile is not None:
caps['profile'] = profile
caps = utils.dict_to_capabilities(caps)
return mock.Mock(uuid=str(uuid4()),
properties={'capabilities': caps},
provision_state=provision_state,
spec=['uuid', 'properties', 'provision_state'])
def _test(self, expected_errors, expected_warnings,
assign_profiles=True, dry_run=False):
errors, warnings = utils.assign_and_verify_profiles(self.bm_client,
self.flavors,
assign_profiles,
dry_run)
self.assertEqual(errors, expected_errors)
self.assertEqual(warnings, expected_warnings)
def test_no_matching_without_scale(self):
self.flavors = {name: (object(), 0)
for name in self.flavors}
self.nodes[:] = [self._get_fake_node(profile='fake'),
self._get_fake_node(profile='fake')]
self._test(0, 0)
self.assertFalse(self.bm_client.node.update.called)
def test_exact_match(self):
self.nodes[:] = [self._get_fake_node(profile='compute'),
self._get_fake_node(profile='control')]
self._test(0, 0)
self.assertFalse(self.bm_client.node.update.called)
def test_nodes_with_no_profiles_present(self):
self.nodes[:] = [self._get_fake_node(profile='compute'),
self._get_fake_node(profile=None),
self._get_fake_node(profile='foobar'),
self._get_fake_node(profile='control')]
self._test(0, 1)
self.assertFalse(self.bm_client.node.update.called)
def test_more_nodes_with_profiles_present(self):
self.nodes[:] = [self._get_fake_node(profile='compute'),
self._get_fake_node(profile='compute'),
self._get_fake_node(profile='compute'),
self._get_fake_node(profile='control')]
self._test(0, 1)
self.assertFalse(self.bm_client.node.update.called)
def test_no_nodes(self):
# One error per each flavor
self._test(2, 0)
self.assertFalse(self.bm_client.node.update.called)
def test_not_enough_nodes(self):
self.nodes[:] = [self._get_fake_node(profile='compute')]
self._test(1, 0)
self.assertFalse(self.bm_client.node.update.called)
def test_assign_profiles(self):
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
self._get_fake_node(possible_profiles=['control']),
self._get_fake_node(possible_profiles=['compute'])]
# one warning for a redundant node
self._test(0, 1, assign_profiles=True)
self.assertEqual(2, self.bm_client.node.update.call_count)
actual_profiles = [utils.node_get_capabilities(node).get('profile')
for node in self.nodes]
actual_profiles.sort(key=lambda x: str(x))
self.assertEqual([None, 'compute', 'control'], actual_profiles)
def test_assign_profiles_multiple_options(self):
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute',
'control']),
self._get_fake_node(possible_profiles=['compute',
'control'])]
self._test(0, 0, assign_profiles=True)
self.assertEqual(2, self.bm_client.node.update.call_count)
actual_profiles = [utils.node_get_capabilities(node).get('profile')
for node in self.nodes]
actual_profiles.sort(key=lambda x: str(x))
self.assertEqual(['compute', 'control'], actual_profiles)
def test_assign_profiles_not_enough(self):
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
self._get_fake_node(possible_profiles=['compute']),
self._get_fake_node(possible_profiles=['compute'])]
self._test(1, 1, assign_profiles=True)
# no node update for failed flavor
self.assertEqual(1, self.bm_client.node.update.call_count)
actual_profiles = [utils.node_get_capabilities(node).get('profile')
for node in self.nodes]
actual_profiles.sort(key=lambda x: str(x))
self.assertEqual([None, None, 'compute'], actual_profiles)
def test_assign_profiles_dry_run(self):
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
self._get_fake_node(possible_profiles=['control']),
self._get_fake_node(possible_profiles=['compute'])]
self._test(0, 1, dry_run=True)
self.assertFalse(self.bm_client.node.update.called)
actual_profiles = [utils.node_get_capabilities(node).get('profile')
for node in self.nodes]
self.assertEqual([None] * 3, actual_profiles)
def test_scale(self):
# active nodes with assigned profiles are fine
self.nodes[:] = [self._get_fake_node(profile='compute',
provision_state='active'),
self._get_fake_node(profile='control')]
self._test(0, 0, assign_profiles=True)
self.assertFalse(self.bm_client.node.update.called)
def test_assign_profiles_wrong_state(self):
# active nodes are not considered for assigning profiles
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute'],
provision_state='active'),
self._get_fake_node(possible_profiles=['control'],
provision_state='cleaning'),
self._get_fake_node(profile='compute',
provision_state='error')]
self._test(2, 1, assign_profiles=True)
self.assertFalse(self.bm_client.node.update.called)
def test_no_spurious_warnings(self):
self.nodes[:] = [self._get_fake_node(profile=None)]
self.flavors = {'baremetal': (fakes.FakeFlavor('baremetal', None), 1)}
self._test(0, 0)
class TestPromptUser(TestCase):
def setUp(self):
super(TestPromptUser, self).setUp()
self.logger = mock.MagicMock()
self.logger.info = mock.MagicMock()
@mock.patch('sys.stdin')
def test_user_accepts(self, stdin_mock):
stdin_mock.isatty.return_value = True
stdin_mock.readline.return_value = "yes"
result = utils.prompt_user_for_confirmation("[y/N]?", self.logger)
self.assertTrue(result)
@mock.patch('sys.stdin')
def test_user_declines(self, stdin_mock):
stdin_mock.isatty.return_value = True
stdin_mock.readline.return_value = "no"
result = utils.prompt_user_for_confirmation("[y/N]?", self.logger)
self.assertFalse(result)
@mock.patch('sys.stdin')
def test_user_no_tty(self, stdin_mock):
stdin_mock.isatty.return_value = False
stdin_mock.readline.return_value = "yes"
result = utils.prompt_user_for_confirmation("[y/N]?", self.logger)
self.assertFalse(result)
@mock.patch('sys.stdin')
def test_user_aborts_control_c(self, stdin_mock):
stdin_mock.isatty.return_value = False
stdin_mock.readline.side_effect = KeyboardInterrupt()
result = utils.prompt_user_for_confirmation("[y/N]?", self.logger)
self.assertFalse(result)
@mock.patch('sys.stdin')
def test_user_aborts_with_control_d(self, stdin_mock):
stdin_mock.isatty.return_value = False
stdin_mock.readline.side_effect = EOFError()
result = utils.prompt_user_for_confirmation("[y/N]?", self.logger)
self.assertFalse(result)
class TestReplaceLinks(TestCase):
def setUp(self):
super(TestReplaceLinks, self).setUp()
self.link_replacement = {
'file:///home/stack/test.sh':
'user-files/home/stack/test.sh',
'file:///usr/share/extra-templates/my.yml':
'user-files/usr/share/extra-templates/my.yml',
}
def test_replace_links(self):
source = (
'description: my template\n'
'heat_template_version: "2014-10-16"\n'
'parameters:\n'
' foo:\n'
' default: ["bar"]\n'
' type: json\n'
' bar:\n'
' default: []\n'
'resources:\n'
' test_config:\n'
' properties:\n'
' config: {get_file: "file:///home/stack/test.sh"}\n'
' type: OS::Heat::SoftwareConfig\n'
)
expected = (
'description: my template\n'
'heat_template_version: "2014-10-16"\n'
'parameters:\n'
' foo:\n'
' default: ["bar"]\n'
' type: json\n'
' bar:\n'
' default: []\n'
'resources:\n'
' test_config:\n'
' properties:\n'
' config: {get_file: user-files/home/stack/test.sh}\n'
' type: OS::Heat::SoftwareConfig\n'
)
# the yaml->string dumps aren't always character-precise, so
# we need to parse them into dicts for comparison
expected_dict = yaml.safe_load(expected)
result_dict = yaml.safe_load(utils.replace_links_in_template_contents(
source, self.link_replacement))
self.assertEqual(expected_dict, result_dict)
def test_replace_links_not_template(self):
# valid JSON/YAML, but doesn't have heat_template_version
source = '{"get_file": "file:///home/stack/test.sh"}'
self.assertEqual(
source,
utils.replace_links_in_template_contents(
source, self.link_replacement))
def test_replace_links_not_yaml(self):
# invalid JSON/YAML -- curly brace left open
source = '{"invalid JSON"'
self.assertEqual(
source,
utils.replace_links_in_template_contents(
source, self.link_replacement))
| |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
"""
Contains a feature selection method that evaluates the importance of the different extracted features. To do so,
for every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.
The methods that calculate the p-values are called feature selectors.
Afterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and
which to cut off (solely based on the p-values).
"""
from multiprocessing import Pool
import warnings
import numpy as np
import pandas as pd
from functools import partial, reduce
from statsmodels.stats.multitest import multipletests
from tsfresh import defaults
from tsfresh.feature_selection.significance_tests import (
target_binary_feature_real_test,
target_real_feature_binary_test,
target_real_feature_real_test,
target_binary_feature_binary_test,
)
from tsfresh.utilities.distribution import initialize_warnings_in_workers
def calculate_relevance_table(
X,
y,
ml_task="auto",
multiclass=False,
n_significant=1,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
chunksize=defaults.CHUNKSIZE,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
):
"""
Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.
The relevance table is calculated for the intended machine learning task `ml_task`.
To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test
is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to
decide which features to keep and which to delete.
We are testing
:math:`H_0` = the Feature is not relevant and should not be added
against
:math:`H_1` = the Feature is relevant and should be kept
or in other words
:math:`H_0` = Target and Feature are independent / the Feature has no influence on the target
:math:`H_1` = Target and Feature are associated / dependent
When the target is binary this becomes
:math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)`
:math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)`
Where :math:`F` is the distribution of the target.
In the same way we can state the hypothesis when the feature is binary
:math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)`
:math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)`
Here :math:`T` is the distribution of the target.
TODO: And for real valued?
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumend to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant classes.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'
:type n_significant: int
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).
:type show_warnings: bool
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as
the data for one feature. If you set the chunksize
to 10, then it means that one task is to filter 10 features.
If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature. The DataFrame has the columns
"feature",
"type" (binary, real or const),
"p_value" (the significance of this feature as a p-value, lower means more significant)
"relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is
not relevant] for this feature).
If the problem is `multiclass` with n classes, the DataFrame will contain n
columns named "p_value_CLASSID" instead of the "p_value" column.
`CLASSID` refers here to the different values set in `y`.
There will also be n columns named `relevant_CLASSID`, indicating whether
the feature is relevant for that class.
:rtype: pandas.DataFrame
"""
# Make sure X and y both have the exact same indices
y = y.sort_index()
X = X.sort_index()
assert list(y.index) == list(X.index), "The index of X and y need to be the same"
if ml_task not in ["auto", "classification", "regression"]:
raise ValueError(
"ml_task must be one of: 'auto', 'classification', 'regression'"
)
elif ml_task == "auto":
ml_task = infer_ml_task(y)
if multiclass:
assert (
ml_task == "classification"
), "ml_task must be classification for multiclass problem"
assert (
len(y.unique()) >= n_significant
), "n_significant must not exceed the total number of classes"
if len(y.unique()) <= 2:
warnings.warn(
"Two or fewer classes, binary feature selection will be used (multiclass = False)"
)
multiclass = False
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
if n_jobs == 0:
map_function = map
else:
pool = Pool(
processes=n_jobs,
initializer=initialize_warnings_in_workers,
initargs=(show_warnings,),
)
map_function = partial(pool.map, chunksize=chunksize)
relevance_table = pd.DataFrame(index=pd.Series(X.columns, name="feature"))
relevance_table["feature"] = relevance_table.index
relevance_table["type"] = pd.Series(
map_function(
get_feature_type, [X[feature] for feature in relevance_table.index]
),
index=relevance_table.index,
)
table_real = relevance_table[relevance_table.type == "real"].copy()
table_binary = relevance_table[relevance_table.type == "binary"].copy()
table_const = relevance_table[relevance_table.type == "constant"].copy()
table_const["p_value"] = np.NaN
table_const["relevant"] = False
if not table_const.empty:
warnings.warn(
"[test_feature_significance] Constant features: {}".format(
", ".join(map(str, table_const.feature))
),
RuntimeWarning,
)
if len(table_const) == len(relevance_table):
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
return table_const
if ml_task == "classification":
tables = []
for label in y.unique():
_test_real_feature = partial(
target_binary_feature_real_test,
y=(y == label),
test=test_for_binary_target_real_feature,
)
_test_binary_feature = partial(
target_binary_feature_binary_test, y=(y == label)
)
tmp = _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
_test_real_feature,
_test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
)
if multiclass:
tmp = tmp.reset_index(drop=True)
tmp.columns = tmp.columns.map(
lambda x: x + "_" + str(label)
if x != "feature" and x != "type"
else x
)
tables.append(tmp)
if multiclass:
relevance_table = reduce(
lambda left, right: pd.merge(
left, right, on=["feature", "type"], how="outer"
),
tables,
)
relevance_table["n_significant"] = relevance_table.filter(
regex="^relevant_", axis=1
).sum(axis=1)
relevance_table["relevant"] = (
relevance_table["n_significant"] >= n_significant
)
relevance_table.index = relevance_table["feature"]
else:
relevance_table = combine_relevance_tables(tables)
elif ml_task == "regression":
_test_real_feature = partial(target_real_feature_real_test, y=y)
_test_binary_feature = partial(target_real_feature_binary_test, y=y)
relevance_table = _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
_test_real_feature,
_test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
)
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
# set constant features to be irrelevant for all classes in multiclass case
if multiclass:
for column in relevance_table.filter(regex="^relevant_", axis=1).columns:
table_const[column] = False
table_const["n_significant"] = 0
table_const.drop(columns=["p_value"], inplace=True)
relevance_table = pd.concat([relevance_table, table_const], axis=0)
if sum(relevance_table["relevant"]) == 0:
warnings.warn(
"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage "
"of irrelevant features, consider using an higher fdr level or add other features.".format(
ml_task, fdr_level
),
RuntimeWarning,
)
return relevance_table
def _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
test_real_feature,
test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
):
table_real["p_value"] = pd.Series(
map_function(test_real_feature, | |
raise ValueError('The default search space was not found!')
return search_space
def default_searcher(cls, search_space, options):
assert search_space is not None, '"search_space" should be specified when "searcher" is None or str.'
assert optimize_direction in {'max', 'min'}
if options is None:
options = {}
options['optimize_direction'] = optimize_direction
s = make_searcher(cls, search_space, **options)
return s
def default_experiment_callbacks():
cbs = cfg.experiment_callbacks_notebook if isnotebook() else cfg.experiment_callbacks_console
cbs = [load_module(cb)() if isinstance(cb, str) else cb for cb in cbs]
return cbs
def default_search_callbacks():
cbs = cfg.hyper_model_callbacks_notebook if isnotebook() else cfg.hyper_model_callbacks_console
cbs = [load_module(cb)() if isinstance(cb, str) else cb for cb in cbs]
return cbs
def append_early_stopping_callbacks(cbs):
from hypernets.core.callbacks import EarlyStoppingCallback
assert isinstance(cbs, (tuple, list))
if any([isinstance(cb, EarlyStoppingCallback) for cb in cbs]):
return cbs
op = optimize_direction if optimize_direction is not None \
else 'max' if scorer._sign > 0 else 'min'
es = EarlyStoppingCallback(early_stopping_rounds, op,
time_limit=early_stopping_time_limit,
expected_reward=early_stopping_reward)
return [es] + cbs
kwargs = kwargs.copy()
kwargs['max_trials'] = max_trials
kwargs['eval_size'] = eval_size
kwargs['cv'] = cv
kwargs['num_folds'] = num_folds
kwargs['verbose'] = verbose
if kwargs.get('covariables') is not None and covariates is None:
covariates = kwargs.pop('covariables')
# 1. Set Log Level
if log_level is None:
log_level = logging.WARN
logging.set_level(log_level)
# 2. Set Random State
if random_state is not None:
set_random_state(seed=random_state, mode=mode)
if mode != consts.Mode_STATS:
try:
from tensorflow import __version__
logger.info(f'The tensorflow version is {str(__version__)}.')
except ImportError:
raise RuntimeError('Please install `tensorflow` package first. command: pip install tensorflow.')
# 3. Check Data ,Task and Mode
assert train_data is not None, 'train data is required.'
assert eval_data is None or type(eval_data) is type(train_data)
assert test_data is None or type(test_data) is type(train_data)
assert task is not None, 'task is required. Task naming paradigm:' \
f'{consts.TASK_LIST_FORECAST + consts.TASK_LIST_CLASSIFICATION + consts.TASK_LIST_REGRESSION}'
if task not in consts.TASK_LIST_FORECAST + consts.TASK_LIST_CLASSIFICATION + consts.TASK_LIST_REGRESSION:
raise ValueError(f'Task naming paradigm:'
f'{consts.TASK_LIST_FORECAST + consts.TASK_LIST_CLASSIFICATION + consts.TASK_LIST_REGRESSION}')
if task in consts.TASK_LIST_FORECAST and timestamp is None:
raise ValueError("Forecast task 'timestamp' cannot be None.")
if task in consts.TASK_LIST_FORECAST and covariates is None:
logger.info('If the data contains covariates, specify the covariable column names.')
if freq is consts.DISCRETE_FORECAST and mode is consts.Mode_STATS:
raise RuntimeError('Note: `stats` mode does not support discrete data forecast.')
# 4. Set GPU Usage Strategy for DL Mode
if mode == consts.Mode_DL:
if dl_gpu_usage_strategy == 0:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
elif dl_gpu_usage_strategy == 1:
from hyperts.utils import tf_gpu
tf_gpu.set_memory_growth()
elif dl_gpu_usage_strategy == 2:
from hyperts.utils import tf_gpu
tf_gpu.set_memory_limit(limit=dl_memory_limit)
else:
raise ValueError(f'The GPU strategy is not supported. '
f'Default [0:cpu | 1:gpu-memory growth | 2: gpu-memory limit].')
# 5. Load data
if isinstance(train_data, str):
import pandas as pd
tb = get_tool_box(pd.DataFrame)
train_data = tb.load_data(train_data, reset_index=True)
eval_data = tb.load_data(eval_data, reset_index=True) if eval_data is not None else None
X_test = tb.load_data(test_data, reset_index=True) if test_data is not None else None
else:
tb = get_tool_box(train_data, eval_data, test_data)
train_data = tb.reset_index(train_data)
eval_data = tb.reset_index(eval_data) if eval_data is not None else None
X_test = tb.reset_index(test_data) if test_data is not None else None
if task in consts.TASK_LIST_FORECAST:
if timestamp is consts.MISSING_TIMESTAMP:
timestamp = consts.TIMESTAMP
if freq is None or freq is consts.DISCRETE_FORECAST:
generate_freq = 'H'
freq = consts.DISCRETE_FORECAST
else:
generate_freq = freq
pseudo_timestamp = tb.DataFrame({f'{timestamp}':
tb.date_range(start=consts.PSEUDO_DATE_START,
periods=len(train_data),
freq=generate_freq)})
train_data = tb.concat_df([pseudo_timestamp, train_data], axis=1)
kwargs['train_end_date'] = pseudo_timestamp[timestamp].max()
kwargs['generate_freq'] = generate_freq
if (freq is not None and 'N' in freq) or 'N' in tb.infer_ts_freq(train_data, ts_name=timestamp):
timestamp_format = None
train_data[timestamp] = tb.datetime_format(train_data[timestamp], format=timestamp_format)
if eval_data is not None:
eval_data[timestamp] = tb.datetime_format(eval_data[timestamp], format=timestamp_format)
if X_test is not None:
X_test[timestamp] = tb.datetime_format(X_test[timestamp], format=timestamp_format)
# 6. Split X_train, y_train, X_eval, y_eval
X_train, y_train, X_eval, y_eval = None, None, None, None
if task in consts.TASK_LIST_CLASSIFICATION + consts.TASK_LIST_REGRESSION:
if target is None:
target = find_target(train_data)
X_train, y_train = train_data.drop(columns=[target]), train_data.pop(target)
if eval_data is not None:
X_eval, y_eval = eval_data.drop(columns=[target]), eval_data.pop(target)
elif task in consts.TASK_LIST_FORECAST:
excluded_variables = [timestamp] + covariates if covariates is not None else [timestamp]
if target is None:
target = tb.list_diff(train_data.columns.tolist(), excluded_variables)
elif target is not None and isinstance(target, str):
target = [target]
X_train, y_train = train_data[excluded_variables], train_data[target]
if eval_data is not None:
X_eval, y_eval = eval_data[excluded_variables], eval_data[target]
if freq is None:
freq = tb.infer_ts_freq(X_train, ts_name=timestamp)
if freq is None:
raise RuntimeError('Unable to infer correct frequency, please check data or specify frequency.')
elif freq is not None and freq is not consts.DISCRETE_FORECAST:
infer_freq = tb.infer_ts_freq(X_train, ts_name=timestamp)
if freq != infer_freq:
logger.warning(f'The specified frequency is {freq}, but the inferred frequency is {infer_freq}.')
# 7. Covarite Transformer
if covariates is not None:
from hyperts.utils.transformers import CovariateTransformer
cs = CovariateTransformer(covariables=covariates).fit(X_train)
actual_covariates = cs.covariables_
else:
from hyperts.utils.transformers import IdentityTransformer
cs = IdentityTransformer().fit(X_train)
actual_covariates = covariates
# 8. Infer Forecast Window for DL Mode
if mode in [consts.Mode_DL, consts.Mode_NAS] and task in consts.TASK_LIST_FORECAST:
if forecast_train_data_periods is None:
X_train_length = len(X_train)
elif isinstance(forecast_train_data_periods, int) and forecast_train_data_periods < len(X_train):
X_train_length = forecast_train_data_periods
else:
raise ValueError(f'forecast_train_data_periods can not be greater than {len(X_train)}.')
if cv:
X_train_length = int(X_train_length // num_folds)
if eval_data is not None:
max_win_size = int((X_train_length + dl_forecast_horizon - 1) / 2)
elif isinstance(eval_size, int):
if X_train_length > eval_size - dl_forecast_horizon + 1:
max_win_size = int((X_train_length - eval_size - dl_forecast_horizon + 1) / 2)
else:
raise ValueError(f'eval_size has to be less than {X_train_length - dl_forecast_horizon + 1}.')
else:
max_win_size = int((X_train_length * (1 - eval_size) - dl_forecast_horizon + 1) / 2)
if max_win_size < 1:
logger.warning('The trian data is too short to start dl mode, '
'stats mode has been automatically switched.')
mode = consts.Mode_STATS
hist_store_upper_limit = consts.HISTORY_UPPER_LIMIT
else:
if dl_forecast_window is None:
import numpy as np
if max_win_size <= 10:
dl_forecast_window = list(filter(lambda x: x <= max_win_size, [2, 4, 6, 8, 10]))
else:
candidate_windows = [3, 8, 12, 24, 30]*1 + [48, 60]*1 + [72, 96, 168, 183]*1
dl_forecast_window = list(filter(lambda x: x <= max_win_size, candidate_windows))
periods = [tb.fft_infer_period(y_train[col]) for col in target]
period = int(np.argmax(np.bincount(periods)))
if period > 0 and period <= max_win_size:
dl_forecast_window.append(period)
elif isinstance(dl_forecast_window, int):
assert dl_forecast_window < max_win_size, f'The slide window can not be greater than {max_win_size}'
dl_forecast_window = [dl_forecast_window]
elif isinstance(dl_forecast_window, list):
assert max(
dl_forecast_window) < max_win_size, f'The slide window can not be greater than {max_win_size}'
else:
raise ValueError(f'This type of {dl_forecast_window} is not supported.')
logger.info(f'The forecast window length of DL mode list is: {dl_forecast_window}')
hist_store_upper_limit = max(dl_forecast_window) + 1
else:
hist_store_upper_limit = consts.HISTORY_UPPER_LIMIT
# 9. Task Type Infering
if task in [consts.Task_FORECAST] and len(y_train.columns) == 1:
task = consts.Task_UNIVARIATE_FORECAST
elif task in [consts.Task_FORECAST] and len(y_train.columns) > 1:
task = consts.Task_MULTIVARIATE_FORECAST
if task in [consts.Task_CLASSIFICATION]:
if y_train.nunique() == 2:
if len(X_train.columns) == 1:
task = consts.Task_UNIVARIATE_BINARYCLASS
else:
task = consts.Task_MULTIVARIATE_BINARYCLASS
else:
if len(X_train.columns) == 1:
task = consts.Task_UNIVARIATE_MULTICALSS
else:
task = consts.Task_MULTIVARIATE_MULTICALSS
logger.info(f'Inference task type could be [{task}].')
# 10. Configuration
if reward_metric is None:
if task in consts.TASK_LIST_FORECAST:
reward_metric = 'mae'
if task in consts.TASK_LIST_CLASSIFICATION:
reward_metric = 'accuracy'
if task in consts.TASK_LIST_REGRESSION:
reward_metric = 'rmse'
logger.info(f'No reward metric specified, use "{reward_metric}" for {task} task by default.')
if isinstance(reward_metric, str):
logger.info(f'Reward_metric is [{reward_metric}].')
else:
logger.info(f'Reward_metric is [{reward_metric.__name__}].')
# 11. Get scorer
if kwargs.get('scorer') is None:
kwargs['pos_label'] = tb.infer_pos_label(y_train, task, kwargs.get('pos_label'))
scorer = tb.metrics.metric_to_scorer(reward_metric, task=task, pos_label=kwargs.get('pos_label'),
optimize_direction=optimize_direction)
else:
scorer = kwargs.pop('scorer')
if isinstance(scorer, str):
raise ValueError('scorer should be a [make_scorer(metric, greater_is_better)] type.')
# 12. Specify optimization direction
if optimize_direction is None or len(optimize_direction) == 0:
optimize_direction = 'max' if scorer._sign > 0 else 'min'
logger.info(f'Optimize direction is [{optimize_direction}].')
# 13. Get search space
if (searcher is None or isinstance(searcher, str)) and search_space is None:
search_space = default_search_space(task=task, metrics=reward_metric, covariates=actual_covariates)
search_space.update_init_params(freq=freq)
else:
search_space.update_init_params(
task=task,
timestamp=timestamp,
metrics=to_metric_str(reward_metric),
covariables=actual_covariates,
window=dl_forecast_window,
horizon=dl_forecast_horizon,
freq=freq)
# 14. Get searcher
searcher = to_search_object(searcher, search_space)
logger.info(f'Searcher is [{searcher.__class__.__name__}].')
# 15. Define callbacks
if search_callbacks is None:
search_callbacks = default_search_callbacks()
search_callbacks = append_early_stopping_callbacks(search_callbacks)
if callbacks is None:
callbacks = default_experiment_callbacks()
# 16. Define discriminator
if discriminator is None and cfg.experiment_discriminator is not None and len(cfg.experiment_discriminator) > 0:
discriminator = make_discriminator(cfg.experiment_discriminator,
optimize_direction=optimize_direction,
**(cfg.experiment_discriminator_options or {}))
# 17. Define id
if id is None:
hasher = tb.data_hasher()
id = hasher(dict(X_train=X_train, y_train=y_train, X_eval=X_eval, | |
<reponame>ShubhamThakre/datahub
from datetime import datetime
from functools import lru_cache
from typing import Dict, Iterable, Optional
import dateutil.parser as dp
import requests
from pydantic import validator
from pydantic.fields import Field
from requests.models import HTTPError
from sqllineage.runner import LineageRunner
import datahub.emitter.mce_builder as builder
from datahub.configuration.source_common import DatasetLineageProviderConfigBase
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.decorators import (
SourceCapability,
SupportStatus,
capability,
config_class,
platform_name,
support_status,
)
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.common import (
AuditStamp,
ChangeAuditStamps,
)
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
ChartSnapshot,
DashboardSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
ChartInfoClass,
ChartQueryClass,
ChartQueryTypeClass,
ChartTypeClass,
DashboardInfoClass,
OwnerClass,
OwnershipClass,
OwnershipTypeClass,
)
from datahub.utilities import config_clean
class MetabaseConfig(DatasetLineageProviderConfigBase):
# See the Metabase /api/session endpoint for details
# https://www.metabase.com/docs/latest/api-documentation.html#post-apisession
connect_uri: str = Field(default="localhost:3000", description="Metabase host URL.")
username: str = Field(default=None, description="Metabase username.")
password: str = Field(default=None, description="Metabase password.")
database_alias_map: Optional[dict] = Field(
default=None,
description="Database name map to use when constructing dataset URN.",
)
engine_platform_map: Optional[Dict[str, str]] = Field(
default=None,
description="Custom mappings between metabase database engines and DataHub platforms",
)
default_schema: str = Field(
default="public",
description="Default schema name to use when schema is not provided in an SQL query",
)
@validator("connect_uri")
def remove_trailing_slash(cls, v):
return config_clean.remove_trailing_slashes(v)
@platform_name("Metabase")
@config_class(MetabaseConfig)
@support_status(SupportStatus.CERTIFIED)
@capability(SourceCapability.PLATFORM_INSTANCE, "Enabled by default")
class MetabaseSource(Source):
"""
This plugin extracts Charts, dashboards, and associated metadata. This plugin is in beta and has only been tested
on PostgreSQL and H2 database.
### Dashboard
[/api/dashboard](https://www.metabase.com/docs/latest/api-documentation.html#dashboard) endpoint is used to
retrieve the following dashboard information.
- Title and description
- Last edited by
- Owner
- Link to the dashboard in Metabase
- Associated charts
### Chart
[/api/card](https://www.metabase.com/docs/latest/api-documentation.html#card) endpoint is used to
retrieve the following information.
- Title and description
- Last edited by
- Owner
- Link to the chart in Metabase
- Datasource and lineage
The following properties for a chart are ingested in DataHub.
| Name | Description |
| ------------- | ----------------------------------------------- |
| `Dimensions` | Column names |
| `Filters` | Any filters applied to the chart |
| `Metrics` | All columns that are being used for aggregation |
"""
config: MetabaseConfig
report: SourceReport
platform = "metabase"
def __hash__(self):
return id(self)
def __init__(self, ctx: PipelineContext, config: MetabaseConfig):
super().__init__(ctx)
self.config = config
self.report = SourceReport()
login_response = requests.post(
f"{self.config.connect_uri}/api/session",
None,
{
"username": self.config.username,
"password": <PASSWORD>.password,
},
)
login_response.raise_for_status()
self.access_token = login_response.json().get("id", "")
self.session = requests.session()
self.session.headers.update(
{
"X-Metabase-Session": f"{self.access_token}",
"Content-Type": "application/json",
"Accept": "*/*",
}
)
# Test the connection
try:
test_response = self.session.get(
f"{self.config.connect_uri}/api/user/current"
)
test_response.raise_for_status()
except HTTPError as e:
self.report.report_failure(
key="metabase-session",
reason=f"Unable to retrieve user {self.config.username} information. %s"
% str(e),
)
def close(self) -> None:
response = requests.delete(
f"{self.config.connect_uri}/api/session",
headers={"X-Metabase-Session": self.access_token},
)
if response.status_code not in (200, 204):
self.report.report_failure(
key="metabase-session",
reason=f"Unable to logout for user {self.config.username}",
)
def emit_dashboard_mces(self) -> Iterable[MetadataWorkUnit]:
try:
dashboard_response = self.session.get(
f"{self.config.connect_uri}/api/dashboard"
)
dashboard_response.raise_for_status()
dashboards = dashboard_response.json()
for dashboard_info in dashboards:
dashboard_snapshot = self.construct_dashboard_from_api_data(
dashboard_info
)
if dashboard_snapshot is not None:
mce = MetadataChangeEvent(proposedSnapshot=dashboard_snapshot)
wu = MetadataWorkUnit(id=dashboard_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
except HTTPError as http_error:
self.report.report_failure(
key="metabase-dashboard",
reason=f"Unable to retrieve dashboards. " f"Reason: {str(http_error)}",
)
@staticmethod
def get_timestamp_millis_from_ts_string(ts_str: str) -> int:
"""
Converts the given timestamp string to milliseconds. If parsing fails,
returns the utc-now in milliseconds.
"""
try:
return int(dp.parse(ts_str).timestamp() * 1000)
except (dp.ParserError, OverflowError):
return int(datetime.utcnow().timestamp() * 1000)
def construct_dashboard_from_api_data(
self, dashboard_info: dict
) -> Optional[DashboardSnapshot]:
dashboard_id = dashboard_info.get("id", "")
dashboard_url = f"{self.config.connect_uri}/api/dashboard/{dashboard_id}"
try:
dashboard_response = self.session.get(dashboard_url)
dashboard_response.raise_for_status()
dashboard_details = dashboard_response.json()
except HTTPError as http_error:
self.report.report_failure(
key=f"metabase-dashboard-{dashboard_id}",
reason=f"Unable to retrieve dashboard. " f"Reason: {str(http_error)}",
)
return None
dashboard_urn = builder.make_dashboard_urn(
self.platform, dashboard_details.get("id", "")
)
dashboard_snapshot = DashboardSnapshot(
urn=dashboard_urn,
aspects=[],
)
last_edit_by = dashboard_details.get("last-edit-info") or {}
modified_actor = builder.make_user_urn(last_edit_by.get("email", "unknown"))
modified_ts = self.get_timestamp_millis_from_ts_string(
f"{last_edit_by.get('timestamp')}"
)
title = dashboard_details.get("name", "") or ""
description = dashboard_details.get("description", "") or ""
last_modified = ChangeAuditStamps(
created=AuditStamp(time=modified_ts, actor=modified_actor),
lastModified=AuditStamp(time=modified_ts, actor=modified_actor),
)
chart_urns = []
cards_data = dashboard_details.get("ordered_cards", "{}")
for card_info in cards_data:
chart_urn = builder.make_chart_urn(self.platform, card_info.get("id", ""))
chart_urns.append(chart_urn)
dashboard_info_class = DashboardInfoClass(
description=description,
title=title,
charts=chart_urns,
lastModified=last_modified,
dashboardUrl=f"{self.config.connect_uri}/dashboard/{dashboard_id}",
customProperties={},
)
dashboard_snapshot.aspects.append(dashboard_info_class)
# Ownership
ownership = self._get_ownership(dashboard_details.get("creator_id", ""))
if ownership is not None:
dashboard_snapshot.aspects.append(ownership)
return dashboard_snapshot
@lru_cache(maxsize=None)
def _get_ownership(self, creator_id: int) -> Optional[OwnershipClass]:
user_info_url = f"{self.config.connect_uri}/api/user/{creator_id}"
try:
user_info_response = self.session.get(user_info_url)
user_info_response.raise_for_status()
user_details = user_info_response.json()
except HTTPError as http_error:
self.report.report_failure(
key=f"metabase-user-{creator_id}",
reason=f"Unable to retrieve User info. " f"Reason: {str(http_error)}",
)
return None
owner_urn = builder.make_user_urn(user_details.get("email", ""))
if owner_urn is not None:
ownership: OwnershipClass = OwnershipClass(
owners=[
OwnerClass(
owner=owner_urn,
type=OwnershipTypeClass.DATAOWNER,
)
]
)
return ownership
return None
def emit_card_mces(self) -> Iterable[MetadataWorkUnit]:
try:
card_response = self.session.get(f"{self.config.connect_uri}/api/card")
card_response.raise_for_status()
cards = card_response.json()
for card_info in cards:
chart_snapshot = self.construct_card_from_api_data(card_info)
if chart_snapshot is not None:
mce = MetadataChangeEvent(proposedSnapshot=chart_snapshot)
wu = MetadataWorkUnit(id=chart_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
except HTTPError as http_error:
self.report.report_failure(
key="metabase-cards",
reason=f"Unable to retrieve cards. " f"Reason: {str(http_error)}",
)
return None
def construct_card_from_api_data(self, card_data: dict) -> Optional[ChartSnapshot]:
card_id = card_data.get("id", "")
card_url = f"{self.config.connect_uri}/api/card/{card_id}"
try:
card_response = self.session.get(card_url)
card_response.raise_for_status()
card_details = card_response.json()
except HTTPError as http_error:
self.report.report_failure(
key=f"metabase-card-{card_id}",
reason=f"Unable to retrieve Card info. " f"Reason: {str(http_error)}",
)
return None
chart_urn = builder.make_chart_urn(self.platform, card_id)
chart_snapshot = ChartSnapshot(
urn=chart_urn,
aspects=[],
)
last_edit_by = card_details.get("last-edit-info") or {}
modified_actor = builder.make_user_urn(last_edit_by.get("email", "unknown"))
modified_ts = self.get_timestamp_millis_from_ts_string(
f"{last_edit_by.get('timestamp')}"
)
last_modified = ChangeAuditStamps(
created=AuditStamp(time=modified_ts, actor=modified_actor),
lastModified=AuditStamp(time=modified_ts, actor=modified_actor),
)
chart_type = self._get_chart_type(
card_details.get("id", ""), card_details.get("display")
)
description = card_details.get("description") or ""
title = card_details.get("name") or ""
datasource_urn = self.get_datasource_urn(card_details)
custom_properties = self.construct_card_custom_properties(card_details)
chart_info = ChartInfoClass(
type=chart_type,
description=description,
title=title,
lastModified=last_modified,
chartUrl=f"{self.config.connect_uri}/card/{card_id}",
inputs=datasource_urn,
customProperties=custom_properties,
)
chart_snapshot.aspects.append(chart_info)
if card_details.get("query_type", "") == "native":
raw_query = (
card_details.get("dataset_query", {}).get("native", {}).get("query", "")
)
chart_query_native = ChartQueryClass(
rawQuery=raw_query,
type=ChartQueryTypeClass.SQL,
)
chart_snapshot.aspects.append(chart_query_native)
# Ownership
ownership = self._get_ownership(card_details.get("creator_id", ""))
if ownership is not None:
chart_snapshot.aspects.append(ownership)
return chart_snapshot
def _get_chart_type(self, card_id: int, display_type: str) -> Optional[str]:
type_mapping = {
"table": ChartTypeClass.TABLE,
"bar": ChartTypeClass.BAR,
"line": ChartTypeClass.LINE,
"row": ChartTypeClass.BAR,
"area": ChartTypeClass.AREA,
"pie": ChartTypeClass.PIE,
"funnel": ChartTypeClass.BAR,
"scatter": ChartTypeClass.SCATTER,
"scalar": ChartTypeClass.TEXT,
"smartscalar": ChartTypeClass.TEXT,
"pivot": ChartTypeClass.TABLE,
"waterfall": ChartTypeClass.BAR,
"progress": None,
"combo": None,
"gauge": None,
"map": None,
}
if not display_type:
self.report.report_warning(
key=f"metabase-card-{card_id}",
reason=f"Card type {display_type} is missing. Setting to None",
)
return None
try:
chart_type = type_mapping[display_type]
except KeyError:
self.report.report_warning(
key=f"metabase-card-{card_id}",
reason=f"Chart type {display_type} not supported. Setting to None",
)
chart_type = None
return chart_type
def construct_card_custom_properties(self, card_details: dict) -> Dict:
result_metadata = card_details.get("result_metadata") or []
metrics, dimensions = [], []
for meta_data in result_metadata:
display_name = meta_data.get("display_name", "") or ""
metrics.append(display_name) if "aggregation" in meta_data.get(
"field_ref", ""
) else dimensions.append(display_name)
filters = (card_details.get("dataset_query", {}).get("query", {})).get(
"filter", []
)
custom_properties = {
"Metrics": ", ".join(metrics),
"Filters": f"{filters}" if len(filters) else "",
"Dimensions": ", ".join(dimensions),
}
return custom_properties
def get_datasource_urn(self, card_details):
platform, database_name, platform_instance = self.get_datasource_from_id(
card_details.get("database_id", "")
)
query_type = card_details.get("dataset_query", {}).get("type", {})
source_paths = set()
if query_type == "query":
source_table_id = (
card_details.get("dataset_query", {})
.get("query", {})
.get("source-table")
)
if source_table_id is not None:
schema_name, table_name = self.get_source_table_from_id(source_table_id)
if table_name:
source_paths.add(
f"{schema_name + '.' if schema_name else ''}{table_name}"
)
else:
try:
raw_query = (
card_details.get("dataset_query", {})
.get("native", {})
.get("query", "")
)
parser = LineageRunner(raw_query)
for table in parser.source_tables:
sources = str(table).split(".")
source_schema, source_table = sources[-2], sources[-1]
if source_schema == "<default>":
source_schema = str(self.config.default_schema)
source_paths.add(f"{source_schema}.{source_table}")
except Exception as e:
self.report.report_failure(
key="metabase-query",
reason=f"Unable to retrieve lineage from query. "
f"Query: {raw_query} "
f"Reason: {str(e)} ",
)
return None
# Create dataset URNs
dataset_urn = []
dbname = f"{database_name + '.' if database_name else ''}"
source_tables = list(map(lambda tbl: f"{dbname}{tbl}", source_paths))
dataset_urn = [
builder.make_dataset_urn_with_platform_instance(
platform=platform,
name=name,
platform_instance=platform_instance,
env=self.config.env,
)
for name in source_tables
]
return dataset_urn
@lru_cache(maxsize=None)
def get_source_table_from_id(self, table_id):
try:
dataset_response = self.session.get(
f"{self.config.connect_uri}/api/table/{table_id}"
)
dataset_response.raise_for_status()
dataset_json = dataset_response.json()
schema = dataset_json.get("schema", "")
name = dataset_json.get("name", "")
return schema, name
except HTTPError as http_error:
self.report.report_failure(
key=f"metabase-table-{table_id}",
reason=f"Unable to retrieve source table. "
f"Reason: {str(http_error)}",
)
return None, None
@lru_cache(maxsize=None)
def get_datasource_from_id(self, datasource_id):
try:
dataset_response = self.session.get(
f"{self.config.connect_uri}/api/database/{datasource_id}"
)
dataset_response.raise_for_status()
dataset_json = dataset_response.json()
except HTTPError as http_error:
self.report.report_failure(
key=f"metabase-datasource-{datasource_id}",
reason=f"Unable to retrieve Datasource. " f"Reason: {str(http_error)}",
)
return None, None
# Map engine names to what datahub expects in
# https://github.com/datahub-project/datahub/blob/master/metadata-service/war/src/main/resources/boot/data_platforms.json
engine = dataset_json.get("engine", "")
platform = engine
engine_mapping = {
"sparksql": "spark",
"mongo": "mongodb",
"presto-jdbc": "presto",
"sqlserver": "mssql",
"bigquery-cloud-sdk": "bigquery",
}
if self.config.engine_platform_map is not None:
| |
<reponame>jaymegordo/SMSEventLog
import re
from collections import defaultdict as dd
from functools import partial
from typing import *
import pandas as pd
from PyQt6.QtCore import QAbstractTableModel, QModelIndex, Qt, pyqtSlot
from PyQt6.QtGui import QColor
from smseventlog import config as cf
from smseventlog import dbtransaction as dbt
from smseventlog import dt
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.gui import _global as gbl
from smseventlog.utils import dbmodel as dbm
log = getlog(__name__)
# irow, icol = row/column integer locations eg 3, 5
# row, col = row/column index names eg (if no actual index) 3, 'StartDate'
# NOTE calling index.data() defaults to role=DisplayRole, NOT model.data(role=RawDataRole) careful!
global m_align
m_align = {
'object': Qt.AlignmentFlag.AlignLeft,
'float64': Qt.AlignmentFlag.AlignRight,
'int64': Qt.AlignmentFlag.AlignRight,
'bool': Qt.AlignmentFlag.AlignCenter,
'datetime64[ns]': Qt.AlignmentFlag.AlignCenter}
class TableDataModel(QAbstractTableModel):
RawDataRole = 64
NameIndexRole = 65
DateRole = 66
RawBackgroundRole = 67
iIndexRole = 68
qtIndexRole = 69
def __init__(self, parent, df=None):
super().__init__(parent)
# table model must be created from TableWidget()/TableView() parent
_df = pd.DataFrame()
_df_orig = pd.DataFrame()
_df_pre_dyn_filter = None
_resort = lambda: None # Null resort functon
_cols = []
view = parent
table_widget = view.parent # sketch - could also be dlgs.TableDialog
formats = parent.formats
highlight_funcs = parent.highlight_funcs
m_display, m_color_bg, m_color_text = {}, {}, {}
current_row = -1
self.highlight_rows = True
selection_color = QColor(cf.config['color']['bg']['yellowrow'])
display_color = True
self.set_queue()
_queue_locked = False
self.alignments = {}
self.block_resort_style = False
color_enabled = False
f.set_self(vars(), exclude='df')
if not df is None:
self.set_df(df=df)
@classmethod
def example(cls, name='EventLog'):
from smseventlog.gui import tables as tbls
app = gbl.get_qt_app()
table_widget = getattr(tbls, name, tbls.EventLog)()
query = table_widget.query
df = query.get_df(default=True)
view = table_widget.view
model = view.data_model
model.set_df(df)
return model
def set_df(self, df, center_cols=None):
"""Set or change pd DataFrame to show
- Used when reloading full new table
"""
_df_orig = df.copy()
_df_pre_dyn_filter = None # Clear dynamic filter
self._cols = list(df.columns)
mcols = dd(list) # dict of col_type: list of ints
parent = self.parent
query = self.table_widget.query
# center_cols is dynamic
mcols['center'] = self.get_col_idxs(center_cols)
# convert col headers to int indexes for faster lookups (maybe lol)
set_mcols = ('disabled', 'fill_enabled', 'datetime', 'time', 'sort_filter', 'no_space', 'highlight_dates')
for k in set_mcols:
mcols[k] = self.get_col_idxs(parent.mcols[k])
self.block_resort_style = True
# date cols have to exclude datetime + time cols
date_cols = self.get_col_idxs(df.dtypes[df.dtypes == 'datetime64[ns]'].index)
mcols['date'] = [i for i in date_cols if not i in mcols['datetime'] + mcols['time']]
self.mcols = mcols
self.set_date_formats()
self.set_static_dfs(df=df, reset=True)
f.set_self(vars(), exclude='df')
self.df = df
# HACK to block recompute sort cols on initial set
# but DO when sort/filters are changed
self.block_resort_style = False
def set_date_formats(self):
for i in self.mcols['date']:
self.formats[self.headerData(i)] = '{:%Y-%m-%d}'
for i in self.mcols['datetime']:
self.formats[self.headerData(i)] = '{:%Y-%m-%d %H:%M}'
for i in self.mcols['time']:
self.formats[self.headerData(i)] = '{:%H:%M}'
def update_rows_label(self):
"""set so mainwindow can update current rows label"""
self.visible_rows = self._df.shape[0]
self.total_rows = self._df_orig.shape[0]
if not self.view.mainwindow is None:
self.view.mainwindow.update_rows_label()
@property
def df(self):
return self._df
@df.setter
def df(self, dataFrame):
"""Setter should only be used internal to DataFrameModel. Others should use set_df()"""
self.layoutAboutToBeChanged.emit()
self.modelAboutToBeReset.emit()
self._df = dataFrame
self.update_rows_label()
self._reset_sort_cols_style()
self.modelReset.emit()
self.layoutChanged.emit()
if self._df.shape[0] > 0:
self.parent.resizeRowsToContents()
def search(self, search_text: str) -> list:
"""Filter self.m_display dict to values which match search text
- TODO this searches everything in m_display, need to filter to ONLY active df
"""
if search_text.strip() == '':
return []
hidden_cols = self.view.mcols['hide']
expr = re.compile(search_text, re.IGNORECASE)
# get dict of {col_name: (index_name, ...)}
m_out = {k: tuple(k2 for k2, v in m2.items() if expr.search(str(v))) for k, m2 in self.m_display.items()}
# convert dict to list of (row_name, col_name), sort by row THEN col
lst_out = [(v2, k) for k, v in m_out.items() for v2 in v]
lst_out.sort(key=lambda x: x[0])
return lst_out
def update_static_df(self, m_new: dict, m_update: dict):
"""Update single static df with new vals
- used to update single row
Parameters
----------
m_new : dict
new vals to merge to m_update\n
m_update : dict
dict to update, one of (m_display, m_color_bg, m_color_text)
"""
for col_name in m_new.keys():
m_update[col_name].update(m_new[col_name])
def get_static_dfs(self, df) -> tuple:
"""Get Display, Background, Text dicts
- Call for full df or single row
Returns
-------
tuple[m_display, m_color_bg, m_color_text]
"""
m_display = f.df_to_strings(df=df, formats=self.formats).to_dict()
m_color_bg = f.df_to_color(df=df, highlight_funcs=self.highlight_funcs,
role=Qt.ItemDataRole.BackgroundRole).to_dict()
m_color_text = f.df_to_color(df=df, highlight_funcs=self.highlight_funcs,
role=Qt.ItemDataRole.ForegroundRole).to_dict()
return (m_display, m_color_bg, m_color_text)
def set_static_dfs(self, df, reset=False):
"""Set static dict copies of df string value + colors for faster display in table.
Parameters
----------
df : pd.DataFrame
full df or single row
reset : bool
reset static dfs if true, else append
"""
# update all int column display format
# NOTE this updates TableView's formats too (shared obj)
int_cols = list(df.select_dtypes(int).columns)
self.formats.update(**f.dtypes_dict('{:,.0f}', int_cols))
static_dfs_new = self.get_static_dfs(df=df)
static_dfs_orig = [self.m_display, self.m_color_bg, self.m_color_text]
if reset:
self.m_display, self.m_color_bg, self.m_color_text = static_dfs_new
else:
# called when adding a single row
for m_new, m_orig in zip(static_dfs_new, static_dfs_orig):
self.update_static_df(m_new=m_new, m_update=m_orig)
self.set_stylemap(df=df)
def set_stylemap(self, df=None, col: str = None):
"""Get colors from applying a stylemap func to df, merge to static dfs
- Only updates > can call with full df or single row"""
if df is None:
df = self.df
if df.shape[0] == 0:
return
# only avail + FCSummary use this so far
# m_stylemap is tuple of 2 nested dicts
m_stylemap = self.query.get_stylemap(df=df, col=col)
if m_stylemap is None:
return
# loop stylemap cols, update full column values
for col_name in m_stylemap[0].keys():
self.m_color_bg[col_name].update(m_stylemap[0][col_name])
self.m_color_text[col_name].update(m_stylemap[1][col_name])
@property
def dbtable_default(self):
return self.table_widget.get_dbtable()
@pyqtSlot()
def beginDynamicFilter(self):
"""Effects of using the "filter" function will not become permanent until endDynamicFilter called"""
if self._df_pre_dyn_filter is None:
print('Begin new dynamic filter')
self._df_pre_dyn_filter = self.df.copy()
else:
# Already dynamically filtering, so don't override that
print('SAME DYNAMIC FILTER MODEL')
pass
@pyqtSlot()
def endDynamicFilter(self):
"""Makes permanent the effects of the dynamic filter"""
print(' * * * RESETING DYNAMIC')
self._df_pre_dyn_filter = None
@pyqtSlot()
def cancelDynamicFilter(self):
"""Cancel the dynamic filter"""
self.df = self._df_pre_dyn_filter.copy()
self._df_pre_dyn_filter = None
def headerData(self, i, orientation=Qt.Orientation.Horizontal, role=Qt.ItemDataRole.DisplayRole):
"""Return data for QTableView header"""
cols = self._cols
if role == Qt.ItemDataRole.DisplayRole:
if orientation == Qt.Orientation.Horizontal:
if i < len(cols):
return cols[i]
else:
return ''
elif orientation == Qt.Orientation.Vertical:
# return i
return int(self.df.index[i])
elif role == Qt.ItemDataRole.ToolTipRole:
# show tooltip text
if self.parent.mcols['tooltip']:
return self.parent.mcols['tooltip'].get(self.get_col_name(i), None)
return None
def get_background_colors_from_df(self, df):
# return df of background colors to use in style.apply (df is passed in by default)
func = lambda x: f'background-color: {str(x)};'
# call self.data to get current table's background colors as [list of (tuples of QColors)]
rows = []
for row_name in df.index:
rows.append(tuple(self.data(name_index=(row_name, col_name), role=Qt.ItemDataRole.BackgroundRole)
for col_name in df.columns))
df = pd.DataFrame(data=rows, columns=df.columns, index=df.index)
# convert QColor back to hex for styler
for irow in df.index:
for col in df.columns:
val = df.loc[irow, col]
if isinstance(val, QColor):
val_str = func(val.name())
else:
val_str = func(val)
df.loc[irow, col] = val_str
return df
def data(
self,
index: QModelIndex = None,
role: int = RawDataRole,
i_index: int = None,
name_index: str = None):
"""TableView asks the model for data to display, edit, paint etc
convert index integer values to index names for df._get_value() > fastest lookup"""
df = self.df
irow, icol, row, col = None, None, None, None
if not index is None and index.isValid():
irow, icol = self.getRowCol(index)
elif not i_index is None:
irow, icol = i_index[0], i_index[1]
elif not name_index is None:
row, col = name_index[0], name_index[1]
else:
return None
if col is None:
row, col = df.index[irow], df.columns[icol]
if role == Qt.ItemDataRole.DisplayRole:
try:
return str(self.m_display[col][row])
except KeyError:
return None
elif role in (Qt.ItemDataRole.BackgroundRole, Qt.ItemDataRole.ForegroundRole):
# ask table_widget for cell color given df, irow, icol
if not self.display_color:
return None
# check self.m_color_display first
try:
if role == Qt.ItemDataRole.BackgroundRole:
color = self.m_color_bg[col][row]
elif role == Qt.ItemDataRole.ForegroundRole:
color = self.m_color_text[col][row]
except KeyError:
# if static dfs not set at init properly, just return None so sentry doesn't send 1000 errors
# log.warning(f'Couldn\'t get value for row: {row}, col: {col}, role: {role}')
return None
if not pd.isnull(color):
return color
# TODO somehow merge complex highlight funcs
# func = self.parent.highlight_funcs_complex[col]
# if not func is None:
# try:
# color = func(df=df, row=row, col=col, irow=irow, icol=icol, val=val, role=role, index=index)
| |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/AdverseEvent
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
import typing
from pydantic import Field
from pydantic import root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import fhirtypes
from . import domainresource
class AdverseEvent(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Medical care, research study or other healthcare event causing physical
injury.
An event (i.e. any change to current patient status) that may be related to
unintended effects on a patient or research subject. The unintended
effects may require additional monitoring, treatment or hospitalization or
may result in death. The AdverseEvent resource also extends to potential
or avoided events that could have had such effects.
"""
resource_type = Field("AdverseEvent", const=True)
actuality: fhirtypes.Code = Field(
None,
alias="actuality",
title="actual | potential",
description=(
"Whether the event actually happened, or just had the potential to. "
"Note that this is independent of whether anyone was affected or harmed"
" or how severely."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["actual", "potential"],
)
actuality__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_actuality", title="Extension field for ``actuality``."
)
category: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title=(
"wrong-patient | procedure-mishap | medication-mishap | device | "
"unsafe-physical-environment | hospital-aquired-infection | wrong-body-"
"site"
),
description="The overall type of event, intended for search and filtering purposes.",
# if property is element of this resource.
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Event or incident that occurred or was averted",
description=(
"Specific event that occurred or that was averted, such as patient "
"fall, wrong organ removed, or wrong blood transfused."
),
# if property is element of this resource.
element_property=True,
)
contributingFactor: typing.List[
fhirtypes.AdverseEventContributingFactorType
] = Field(
None,
alias="contributingFactor",
title=(
"Contributing factors suspected to have increased the probability or "
"severity of the adverse event"
),
description=(
"The contributing factors suspected to have increased the probability "
"or severity of the adverse event."
),
# if property is element of this resource.
element_property=True,
)
detected: fhirtypes.DateTime = Field(
None,
alias="detected",
title="When the event was detected",
description=(
"Estimated or actual date the AdverseEvent began, in the opinion of the"
" reporter."
),
# if property is element of this resource.
element_property=True,
)
detected__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_detected", title="Extension field for ``detected``."
)
encounter: fhirtypes.ReferenceType = Field(
None,
alias="encounter",
title="The Encounter associated with the start of the AdverseEvent",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Encounter"],
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Business identifier for the event",
description=(
"Business identifiers assigned to this adverse event by the performer "
"or other systems which remain constant as the resource is updated and "
"propagates from server to server."
),
# if property is element of this resource.
element_property=True,
)
location: fhirtypes.ReferenceType = Field(
None,
alias="location",
title="Location where adverse event occurred",
description="The information about where the adverse event occurred.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Location"],
)
mitigatingAction: typing.List[fhirtypes.AdverseEventMitigatingActionType] = Field(
None,
alias="mitigatingAction",
title=(
"Ameliorating actions taken after the adverse event occured in order to"
" reduce the extent of harm"
),
description=(
"The ameliorating action taken after the adverse event occured in order"
" to reduce the extent of harm."
),
# if property is element of this resource.
element_property=True,
)
occurrenceDateTime: fhirtypes.DateTime = Field(
None,
alias="occurrenceDateTime",
title="When the event occurred",
description="The date (and perhaps time) when the adverse event occurred.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
occurrenceDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_occurrenceDateTime",
title="Extension field for ``occurrenceDateTime``.",
)
occurrencePeriod: fhirtypes.PeriodType = Field(
None,
alias="occurrencePeriod",
title="When the event occurred",
description="The date (and perhaps time) when the adverse event occurred.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
occurrenceTiming: fhirtypes.TimingType = Field(
None,
alias="occurrenceTiming",
title="When the event occurred",
description="The date (and perhaps time) when the adverse event occurred.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
outcome: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="outcome",
title="Type of outcome from the adverse event",
description=(
"Describes the type of outcome from the adverse event, such as "
"resolved, recovering, ongoing, resolved-with-sequelae, or fatal."
),
# if property is element of this resource.
element_property=True,
)
participant: typing.List[fhirtypes.AdverseEventParticipantType] = Field(
None,
alias="participant",
title=(
"Who was involved in the adverse event or the potential adverse event "
"and what they did"
),
description=(
"Indicates who or what participated in the adverse event and how they "
"were involved."
),
# if property is element of this resource.
element_property=True,
)
preventiveAction: typing.List[fhirtypes.AdverseEventPreventiveActionType] = Field(
None,
alias="preventiveAction",
title="Preventive actions that contributed to avoiding the adverse event",
description=None,
# if property is element of this resource.
element_property=True,
)
recordedDate: fhirtypes.DateTime = Field(
None,
alias="recordedDate",
title="When the event was recorded",
description=(
"The date on which the existence of the AdverseEvent was first " "recorded."
),
# if property is element of this resource.
element_property=True,
)
recordedDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_recordedDate", title="Extension field for ``recordedDate``."
)
recorder: fhirtypes.ReferenceType = Field(
None,
alias="recorder",
title="Who recorded the adverse event",
description=(
"Information on who recorded the adverse event. May be the patient or "
"a practitioner."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Patient",
"Practitioner",
"PractitionerRole",
"RelatedPerson",
],
)
resultingCondition: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="resultingCondition",
title="Effect on the subject due to this event",
description=(
"Information about the condition that occurred as a result of the "
"adverse event, such as hives due to the exposure to a substance (for "
"example, a drug or a chemical) or a broken leg as a result of the "
"fall."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Condition"],
)
seriousness: fhirtypes.CodeableConceptType = Field(
None,
alias="seriousness",
title="Seriousness or gravity of the event",
description=(
"Assessment whether this event, or averted event, was of clinical "
"importance."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="in-progress | completed | entered-in-error | unknown",
description="The current state of the adverse event or potential adverse event.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["in-progress", "completed", "entered-in-error", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
study: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="study",
title="Research study that the subject is enrolled in",
description="The research study that the subject is enrolled in.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ResearchStudy"],
)
subject: fhirtypes.ReferenceType = Field(
...,
alias="subject",
title="Subject impacted by event",
description="This subject or group impacted by the event.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient", "Group", "Practitioner", "RelatedPerson"],
)
supportingInfo: typing.List[fhirtypes.AdverseEventSupportingInfoType] = Field(
None,
alias="supportingInfo",
title="Supporting information relevant to the event",
description=None,
# if property is element of this resource.
element_property=True,
)
suspectEntity: typing.List[fhirtypes.AdverseEventSuspectEntityType] = Field(
None,
alias="suspectEntity",
title="The suspected agent causing the adverse event",
description=(
"Describes the entity that is suspected to have caused the adverse "
"event."
),
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from ``AdverseEvent`` according specification,
with preserving | |
<gh_stars>0
import hashlib
import json
import logging
import os
import time
import functools
from threading import RLock
from node import constants, datastore, routingtable
from node.protocol import proto_store
class DHT(object):
def __init__(self, transport, market_id, settings, db_connection):
self.log = logging.getLogger(
'[%s] %s' % (market_id, self.__class__.__name__)
)
self.settings = settings
self.known_nodes = []
self.searches = []
self.active_peers = []
self.transport = transport
self.market_id = market_id
# Routing table
self.routing_table = routingtable.OptimizedTreeRoutingTable(
self.settings['guid'], market_id)
self.data_store = datastore.SqliteDataStore(db_connection)
self._lock = RLock()
# pylint: disable=no-self-argument
# pylint: disable=not-callable
def _synchronized(f):
"""Decorator for synchronizing access to DHT attributes."""
@functools.wraps(f)
def synced_f(self, *args, **kwargs):
with self._lock:
return f(self, *args, **kwargs)
return synced_f
@_synchronized
def get_active_peers(self):
return self.active_peers
@_synchronized
def start(self, seed_peer):
""" This method executes only when the server is starting up for the
first time and add the seed peer(s) to known node list and
active peer list. It then executes a findNode against the network
for itself to refresh buckets.
:param seed_peer: (CryptoPeerConnection) for seed peer
"""
ip_address = seed_peer.ip
port = seed_peer.port
self._add_known_node(('tcp://%s:%s' % (ip_address, port), seed_peer.guid, seed_peer.nickname))
self.log.debug('Starting Seed Peer: %s', seed_peer.nickname)
self.add_peer(seed_peer.address,
seed_peer.pub,
seed_peer.guid,
seed_peer.nickname)
self.iterative_find(self.settings['guid'], self.known_nodes,
'findNode')
def remove_peer(self, guid):
if guid[:4] != 'seed':
for i, x in enumerate(self.active_peers):
if x.guid == guid:
self.log.debug('Remove Node: %s', guid)
del self.active_peers[i]
self.routing_table.remove_contact(guid)
if guid in self.transport.mediation_mode:
del self.transport.mediation_mode[guid]
# Refresh GUI peer list
if self.transport.handler:
self.transport.handler.refresh_peers()
@_synchronized
def add_peer(self, hostname, port, pubkey=None, guid=None, nickname=None, nat_type=None, avatar_url=None):
""" This takes a tuple (pubkey, hostname, port, guid) and adds it to the active
peers list if it doesn't already reside there.
TODO: Refactor to just pass a peer object. evil tuples.
"""
# peer_tuple = (hostname, port, pubkey, guid, nickname)
# found_peer = False
# activePeers
for peer in self.active_peers:
self.log.debug('Peer: %s', peer)
if peer.guid == guid:
# Check if hostname/port combo changed
if hostname != peer.hostname or port != peer.port:
peer.hostname = hostname
peer.port = port
peer.nat_type = nat_type
# if nat_type == 'Full Cone':
# peer.reachable = True
self.log.debug('Hostname/Port combo changed.')
peer.init_packetsender()
peer.setup_emitters()
self.routing_table.add_contact(peer)
if self.transport.handler:
self.transport.handler.refresh_peers()
peer.nickname = nickname
if avatar_url:
peer.avatar_url = avatar_url
peer.pub = pubkey
# DHT contacts
# self.routingTable.removeContact(guid)
#self.routingTable.addContact(peer)
return peer
elif peer.hostname == hostname and peer.port == port:
peer.guid = guid
peer.nat_type = nat_type
peer.pub = pubkey
peer.nickname = nickname
if avatar_url:
peer.avatar_url = avatar_url
self.routing_table.add_contact(peer)
if self.transport.handler:
self.transport.handler.refresh_peers()
return peer
new_peer = self.transport.get_crypto_peer(guid, hostname, port, pubkey, nickname, nat_type, avatar_url)
if new_peer:
#if new_peer.guid:
#self.activePeers[:] = [x for x in self.active_peers if x.guid != guid]
self.active_peers.append(new_peer)
self.log.debug('Active peers after adding new one: %s', self.active_peers)
self.routing_table.add_contact(new_peer)
if self.transport.handler:
self.transport.handler.refresh_peers()
return new_peer
else:
self.log.error('Could not create a new peer.')
return None
@_synchronized
def _add_known_node(self, node):
""" Accept a peer tuple and add it to known nodes list
:param node: (tuple)
:return: N/A
"""
self.log.debug('Adding known node: %s', node)
if node not in self.known_nodes and node[1] is not None:
self.known_nodes.append(node)
@_synchronized
def on_find_node(self, msg):
""" When a findNode message is received it will be of several types:
- findValue: Looking for a specific key-value
- findNode: Looking for a node with a key
If you find the key-value pair you send back the value in the foundKey
field.
If you find the node then send back the exact node and if you don't
send back a list of k closest nodes in the foundNodes field.
:param msg: Incoming message from other node with findNode request
:return: N/A
"""
self.log.debug('Received a findNode request: %s', msg)
guid = msg['senderGUID']
key = msg['key']
find_id = msg['findID']
pubkey = msg['pubkey']
nickname = msg['senderNick']
nat_type = msg['nat_type']
hostname = msg['hostname']
avatar_url = msg['avatar_url']
port = msg['port']
assert guid is not None and guid != self.transport.guid
assert key is not None
assert find_id is not None
assert pubkey is not None
querying_peer = self.add_peer(
hostname, port, pubkey, guid, nickname, nat_type, avatar_url
)
if querying_peer is not None:
response_msg = {"type": "findNodeResponse",
"senderGUID": self.transport.guid,
"hostname": self.transport.hostname,
"port": self.transport.port,
"pubkey": self.transport.pubkey,
"senderNick": self.transport.nickname,
"avatar_url": self.transport.avatar_url,
"findID": find_id,
'v': constants.VERSION}
if msg['findValue']:
if key in self.data_store and self.data_store[key] is not None:
# Found key in local data store
response_msg["foundKey"] = self.data_store[key]
self.log.info('Found a key: %s', key)
else:
close_nodes = self.close_nodes(key, guid)
self.log.debug('Found Close Nodes: %s', close_nodes)
response_msg['foundNodes'] = []
querying_peer.send(response_msg)
else:
# Return close nodes to the key
close_nodes = self.close_nodes(key, guid)
self.log.debug('Found Close Nodes: %s', close_nodes)
response_msg['foundNodes'] = close_nodes
querying_peer.send(response_msg)
@_synchronized
def close_nodes(self, key, guid=None):
contacts = self.routing_table.find_close_nodes(key, constants.K, guid)
contact_list = []
for contact in contacts:
self.log.debug('Contact: %s', contact)
contact.avatar_url = contact.avatar_url if contact.avatar_url else None
contact_list.append((
contact.guid,
contact.hostname,
contact.port,
contact.pub,
contact.nickname,
contact.nat_type,
contact.avatar_url
))
return self.dedupe(contact_list)
@_synchronized
def on_find_node_response(self, msg):
# Update existing peer's pubkey if active peer
for idx, peer in enumerate(self.active_peers):
if peer.guid == msg['senderGUID']:
peer.nickname = msg['senderNick']
peer.pub = msg['pubkey']
# If key was found by this node then
if 'foundKey' in msg.keys():
self.log.debug('Found the key-value pair. Executing callback.')
for idx, search in enumerate(self.searches):
if search.find_id == msg['findID']:
search.callback(msg['foundKey'])
if idx in self.searches:
del self.searches[idx]
else:
if 'foundNode' in msg.keys():
found_node = msg['foundNodes']
# Add foundNode to active peers list and routing table
if found_node[0] != self.transport.guid:
self.log.debug('Found a tuple %s', found_node)
if len(found_node) == 3:
found_node.append('')
self.add_peer(found_node[1], found_node[2], found_node[3],
found_node[0], found_node[4], avatar_url=found_node[6])
for idx, search in enumerate(self.searches):
if search.find_id == msg['findID']:
# Execute callback
if search.callback is not None:
search.callback((found_node[2], found_node[1], found_node[0], found_node[3]))
# Clear search
del self.searches[idx]
else:
found_search = False
search = None
find_id = msg['findID']
for ser in self.searches:
if ser.find_id == find_id:
search = ser
found_search = True
if not found_search:
self.log.info('No search found')
return
else:
# Get current shortlist length
shortlist_length = len(search.shortlist)
nodes_to_extend = []
# Extends shortlist if necessary
for node in msg['foundNodes']:
if node[0] != self.transport.guid and node[3] != self.transport.pubkey \
and not (node[1] == self.transport.hostname) \
or not node[2] == self.transport.port:
self.log.debug('Adding a findNode peer')
self.add_peer(
node[1],
node[2],
node[3],
node[0],
node[4],
node[5],
node[6]
)
nodes_to_extend.append(node)
self.extend_shortlist(msg['findID'], nodes_to_extend)
# Remove active probe to this node for this findID
search_ip = msg['hostname']
search_port = msg['port']
search_guid = msg['senderGUID']
search_tuple = (search_ip, search_port, search_guid)
for idx, probe in enumerate(search.active_probes):
if probe == search_tuple:
del search.active_probes[idx]
self.log.datadump(
'Find Node Response - Active Probes After: %s',
search.active_probes
)
# Add this to already contacted list
if search_tuple not in search.already_contacted:
search.already_contacted.append(search_tuple)
self.log.datadump(
'Already Contacted: %s',
search.already_contacted
)
# If we added more to shortlist then keep searching
if len(search.shortlist) > shortlist_length:
self.log.info('Lets keep searching')
self._search_iteration(search)
else:
self.log.info('Shortlist is empty')
if search.callback is not None:
search.callback(search.shortlist)
@_synchronized
def _refresh_node(self):
""" Periodically called to perform k-bucket refreshes and data
replication/republishing as necessary """
self._refresh_routing_table()
self._republish_data()
if self.transport.handler:
self.transport.handler.send_to_client(None, {"type": "republish_notify",
"msg": "P2P Data Republished"})
@_synchronized
def _refresh_routing_table(self):
self.log.info('Started Refreshing Routing Table')
# Get Random ID from every KBucket
node_ids = self.routing_table.get_refresh_list(0, False)
def search_for_next_node_id():
if len(node_ids) > 0:
search_id = node_ids.pop()
self.iterative_find_node(search_id)
search_for_next_node_id()
else:
# If this is reached, we have finished refreshing the routing table
return
# Start the refreshing cycle
search_for_next_node_id()
@_synchronized
def _republish_data(self, *args):
self._threaded_republish_data()
@_synchronized
def _threaded_republish_data(self, *args):
""" Republishes and expires any stored data (i.e. stored
C{(key, value pairs)} that need to be republished/expired
This method should run in a deferred thread
"""
self.log.debug('Republishing Data')
expired_keys = []
for key in self.data_store.keys():
# Filter internal variables stored in the data store
if key == 'nodeState':
continue
now = int(time.time())
key = key.encode('hex')
original_publisher_id = self.data_store.get_original_publisher_id(key)
age = now - self.data_store.get_original_publish_time(key) + 500000
if original_publisher_id == self.settings['guid']:
# This node is the original publisher; it has to republish
# the data before it expires (24 hours in basic Kademlia)
if age >= constants.DATE_EXPIRE_TIMEOUT:
self.iterative_store(key, self.data_store[key])
else:
# This node needs to replicate the data at set intervals,
# until it expires, without changing the metadata associated with it
# First, check if the data has expired
if age >= constants.DATE_EXPIRE_TIMEOUT:
# This key/value pair has expired and has not been
# republished by the original publishing node,
| |
return
drivers[0].set("io", "native")
def filebased_volume():
disk = etree.Element('disk', attrib={'type': 'file', 'device': 'disk'})
e(disk, 'driver', None, {'name': 'qemu', 'type': linux.get_img_fmt(volume.installPath), 'cache': volume.cacheMode})
e(disk, 'source', None, {'file': volume.installPath})
if volume.shareable:
e(disk, 'shareable')
if volume.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', volume.wwn)
elif volume.useVirtio:
e(disk, 'target', None, {'dev': 'vd%s' % self.DEVICE_LETTERS[volume.deviceId], 'bus': 'virtio'})
else:
bus_type = self._get_controller_type()
dev_format = Vm._get_disk_target_dev_format(bus_type)
e(disk, 'target', None, {'dev': dev_format % dev_letter, 'bus': bus_type})
return disk
def scsilun_volume():
# default value of sgio is 'filtered'
#NOTE(weiw): scsi lun not support aio or qos
disk = etree.Element('disk', attrib={'type': 'block', 'device': 'lun', 'sgio': get_sgio_value()})
e(disk, 'driver', None, {'name': 'qemu', 'type': 'raw'})
e(disk, 'source', None, {'dev': volume.installPath})
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
return disk
def iscsibased_volume():
# type: () -> etree.Element
def virtio_iscsi():
vi = VirtioIscsi()
portal, vi.target, vi.lun = volume.installPath.lstrip('iscsi://').split('/')
vi.server_hostname, vi.server_port = portal.split(':')
vi.device_letter = dev_letter
vi.volume_uuid = volume.volumeUuid
vi.chap_username = volume.chapUsername
vi.chap_password = <PASSWORD>
return vi.to_xmlobject()
def blk_iscsi():
bi = BlkIscsi()
portal, bi.target, bi.lun = volume.installPath.lstrip('iscsi://').split('/')
bi.server_hostname, bi.server_port = portal.split(':')
bi.device_letter = dev_letter
bi.volume_uuid = volume.volumeUuid
bi.chap_username = volume.chapUsername
bi.chap_password = <PASSWORD>
return bi.to_xmlobject()
if volume.useVirtio:
return virtio_iscsi()
else:
return blk_iscsi()
def ceph_volume():
# type: () -> etree.Element
def virtoio_ceph():
vc = VirtioCeph()
vc.volume = volume
vc.dev_letter = dev_letter
return vc.to_xmlobject()
def blk_ceph():
ic = BlkCeph()
ic.volume = volume
ic.dev_letter = dev_letter
ic.bus_type = self._get_controller_type()
return ic.to_xmlobject()
def virtio_scsi_ceph():
vsc = VirtioSCSICeph()
vsc.volume = volume
vsc.dev_letter = dev_letter
return vsc.to_xmlobject()
if volume.useVirtioSCSI:
return virtio_scsi_ceph()
else:
if volume.useVirtio:
return virtoio_ceph()
else:
return blk_ceph()
def block_volume():
# type: () -> etree.Element
def blk():
disk = etree.Element('disk', {'type': 'block', 'device': 'disk', 'snapshot': 'external'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None, {'dev': volume.installPath})
if volume.useVirtioSCSI:
e(disk, 'target', None, {'dev': 'sd%s' % dev_letter, 'bus': 'scsi'})
e(disk, 'wwn', volume.wwn)
else:
e(disk, 'target', None, {'dev': 'vd%s' % dev_letter, 'bus': 'virtio'})
return disk
return blk()
def spool_volume():
# type: () -> etree.Element
def blk():
imgfmt = linux.get_img_fmt(volume.installPath)
disk = etree.Element('disk', {'type': 'network', 'device': 'disk'})
e(disk, 'driver', None,
{'name': 'qemu', 'type': 'raw', 'cache': 'none', 'io': 'native'})
e(disk, 'source', None,
{'protocol': 'spool', 'name': make_spool_conf(imgfmt, dev_letter, volume)})
e(disk, 'target', None, {'dev': 'vd%s' % dev_letter, 'bus': 'virtio'})
return disk
return blk()
dev_letter = self._get_device_letter(volume, addons)
if volume.deviceType == 'iscsi':
disk_element = iscsibased_volume()
elif volume.deviceType == 'file':
disk_element = filebased_volume()
elif volume.deviceType == 'ceph':
disk_element = ceph_volume()
elif volume.deviceType == 'scsilun':
disk_element = scsilun_volume()
elif volume.deviceType == 'block':
disk_element = block_volume()
elif volume.deviceType == 'spool':
disk_element = spool_volume()
else:
raise Exception('unsupported volume deviceType[%s]' % volume.deviceType)
Vm.set_device_address(disk_element, volume, get_vm_by_uuid(self.uuid))
Vm.set_volume_qos(addons, volume.volumeUuid, disk_element)
Vm.set_volume_serial_id(volume.volumeUuid, disk_element)
volume_native_aio(disk_element)
xml = etree.tostring(disk_element)
logger.debug('attaching volume[%s] to vm[uuid:%s]:\n%s' % (volume.installPath, self.uuid, xml))
try:
# libvirt has a bug that if attaching volume just after vm created, it likely fails. So we retry three time here
@linux.retry(times=3, sleep_time=5)
def attach():
def wait_for_attach(_):
me = get_vm_by_uuid(self.uuid)
disk, _ = me._get_target_disk(volume, is_exception=False)
if not disk:
logger.debug('volume[%s] is still in process of attaching, wait it' % volume.installPath)
return bool(disk)
try:
self.domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_LIVE)
if not linux.wait_callback_success(wait_for_attach, None, 5, 1):
raise Exception("cannot attach a volume[uuid: %s] to the vm[uuid: %s];"
"it's still not attached after 5 seconds" % (volume.volumeUuid, self.uuid))
except:
# check one more time
if not wait_for_attach(None):
raise
attach()
except libvirt.libvirtError as ex:
err = str(ex)
if 'Duplicate ID' in err:
err = ('unable to attach the volume[%s] to vm[uuid: %s], %s. This is a KVM issue, please reboot'
' the VM and try again' % (volume.volumeUuid, self.uuid, err))
elif 'No more available PCI slots' in err:
err = ('vm[uuid: %s] has no more PCI slots for volume[%s]. This is a Libvirt issue, please reboot'
' the VM and try again' % (self.uuid, volume.volumeUuid))
else:
err = 'unable to attach the volume[%s] to vm[uuid: %s], %s.' % (volume.volumeUuid, self.uuid, err)
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(err)
def _get_device_letter(self, volume, addons):
default_letter = Vm.DEVICE_LETTERS[volume.deviceId]
if not volume.useVirtioSCSI:
return default_letter
# usually, device_letter_index equals device_id, but reversed when volume use VirtioSCSI because of ZSTAC-9641
# so when attach SCSI volume again after detached it, device_letter should be same as origin name,
# otherwise it will fail for duplicate device name.
def get_reversed_disks():
results = {}
for vol in addons.attachedDataVolumes:
_, disk_name = self._get_target_disk(vol)
if disk_name and disk_name[-1] != Vm.DEVICE_LETTERS[vol.deviceId]:
results[disk_name[-1]] = vol.deviceId
return results
# {actual_dev_letter: device_id_in_db}
# type: dict[str, int]
reversed_disks = get_reversed_disks()
if default_letter not in reversed_disks.keys():
return default_letter
else:
# letter has been occupied, so return reversed letter
logger.debug("reversed disk name: %s" % reversed_disks)
return Vm.DEVICE_LETTERS[reversed_disks[default_letter]]
def detach_data_volume(self, volume):
self._wait_vm_run_until_seconds(10)
self.timeout_object.wait_until_object_timeout('attach-volume-%s' % self.uuid)
self._detach_data_volume(volume)
self.timeout_object.put('detach-volume-%s' % self.uuid, timeout=10)
def _detach_data_volume(self, volume):
assert volume.deviceId != 0, 'how can root volume gets detached???'
target_disk, disk_name = self._get_target_disk(volume, is_exception=False)
if not target_disk:
if self._volume_detach_timed_out(volume):
logger.debug('volume [installPath: %s] has been detached before' % volume.installPath)
self._clean_timeout_record(volume)
return
raise kvmagent.KvmError('unable to find data volume[%s] on vm[uuid:%s]' % (disk_name, self.uuid))
xmlstr = target_disk.dump()
logger.debug('detaching volume from vm[uuid:%s]:\n%s' % (self.uuid, xmlstr))
try:
# libvirt has a bug that if detaching volume just after vm created, it likely fails. So we retry three time here
@linux.retry(times=3, sleep_time=5)
def detach():
def wait_for_detach(_):
me = get_vm_by_uuid(self.uuid)
disk, _ = me._get_target_disk(volume, is_exception=False)
if disk:
logger.debug('volume[%s] is still in process of detaching, wait for it' % volume.installPath)
return not bool(disk)
try:
self.domain.detachDeviceFlags(xmlstr, libvirt.VIR_DOMAIN_AFFECT_LIVE)
if not linux.wait_callback_success(wait_for_detach, None, 5, 1):
raise Exception("unable to detach the volume[uuid:%s] from the vm[uuid:%s];"
"it's still attached after 5 seconds" %
(volume.volumeUuid, self.uuid))
except:
# check one more time
if not wait_for_detach(None):
self._record_volume_detach_timeout(volume)
logger.debug("detach timeout, record volume install path: %s" % volume.installPath)
raise
detach()
if self._volume_detach_timed_out(volume):
self._clean_timeout_record(volume)
logger.debug("detach success finally, remove record of volume install path: %s" % volume.installPath)
def logout_iscsi():
BlkIscsi.logout_portal(target_disk.source.dev_)
if volume.deviceType == 'iscsi':
if not volume.useVirtio:
logout_iscsi()
except libvirt.libvirtError as ex:
vm = get_vm_by_uuid(self.uuid)
logger.warn('vm dump: %s' % vm.domain_xml)
logger.warn(linux.get_exception_stacktrace())
raise kvmagent.KvmError(
'unable to detach volume[%s] from vm[uuid:%s], %s' % (volume.installPath, self.uuid, str(ex)))
def _record_volume_detach_timeout(self, volume):
Vm.timeout_detached_vol.add(volume.installPath + "-" + self.uuid)
def _volume_detach_timed_out(self, volume):
return volume.installPath + "-" + self.uuid in Vm.timeout_detached_vol
def _clean_timeout_record(self, volume):
Vm.timeout_detached_vol.remove(volume.installPath + "-" + self.uuid)
def _get_back_file(self, volume):
back = linux.qcow2_get_backing_file(volume)
return None if not back else back
def _get_backfile_chain(self, current):
back_files = []
def get_back_files(volume):
back_file = self._get_back_file(volume)
if not back_file:
return
back_files.append(back_file)
get_back_files(back_file)
get_back_files(current)
return back_files
@staticmethod
def ensure_no_internal_snapshot(volume):
if os.path.exists(volume) and shell.run("%s --backing-chain %s | grep 'Snapshot list:'"
% (qemu_img.subcmd('info'), volume)) == 0:
raise kvmagent.KvmError('found internal snapshot in the backing chain of volume[path:%s].' % volume)
# NOTE: code from Openstack nova
def _wait_for_block_job(self, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = self.domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
raise kvmagent.KvmError('libvirt error while requesting blockjob info.')
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception as e:
logger.warn(linux.get_exception_stacktrace())
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _get_target_disk_by_path(self, installPath, is_exception=True):
if installPath.startswith('sharedblock'):
installPath = shared_block_to_file(installPath)
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if not xmlobject.has_element(disk, 'source'):
continue
# file
if disk.source.file__ and disk.source.file_ == installPath:
return disk, disk.target.dev_
# ceph
if disk.source.name__ and disk.source.name_ in installPath:
return disk, disk.target.dev_
# 'block':
if disk.source.dev__ and disk.source.dev_ in installPath:
return disk, disk.target.dev_
if not is_exception:
return None, None
logger.debug('%s is not found on the vm[uuid:%s]' % (installPath, self.uuid))
raise kvmagent.KvmError('unable to find volume[installPath:%s] on vm[uuid:%s]' % (installPath, self.uuid))
def _get_all_volume_alias_names(self, volumes):
volumes.sort(key=lambda d: d.deviceId)
target_disk_alias_names = []
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
target_disk_alias_names.append(target_disk.alias.name_)
if len(volumes) != len(target_disk_alias_names):
raise Exception('not all disk have alias names, skip rollback')
return target_disk_alias_names
def _get_target_disk(self, volume, is_exception=True):
if volume.installPath.startswith('sharedblock'):
volume.installPath = shared_block_to_file(volume.installPath)
for disk in self.domain_xmlobject.devices.get_child_node_as_list('disk'):
if not xmlobject.has_element(disk, 'source') | |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
import docker
import errno
import random
import six
import sys
import time
from . import audit
from .errors import AlreadyInitializedError
from .errors import EmbargoContainerConflictError
from .errors import EmbargoError
from .errors import DockerContainerNotFound
from .errors import InsufficientPermissionsError
from .net import NetworkState
from .state import EmbargoState
# TODO: configurable timeout
DEFAULT_KILL_TIMEOUT = 3
_logger = logging.getLogger(__name__)
class Embargo(object):
def __init__(self, config, embargo_id=None, state=None,
network=None, docker_client=None):
self.config = config
self.state = state or EmbargoState(embargo_id=embargo_id)
self.network = network
try:
self._audit = audit.EventAuditor(self.state.get_audit_file())
except Exception as ex:
_logger.exception(ex)
raise
default_client = docker.APIClient(
**docker.utils.kwargs_from_env(assert_hostname=False)
)
self.docker_client = docker_client or default_client
def create(self, verbose=False, force=False):
container_state = {}
num_containers = len(self.config.sorted_containers)
# we can check if a state file already exists beforehand
if self.state.exists():
raise AlreadyInitializedError('a embargo already exists in here - '
'you may want to destroy it first')
def vprint(msg):
if verbose:
sys.stdout.write(msg)
sys.stdout.flush()
if self.config.is_udn():
# Create custom network to allow docker resolve container hostnames
# via built-in DNS server.
response = self.docker_client.create_network(
self.state.embargo_net_name)
if response['Warning']:
raise EmbargoError("Error while creating network: '%s'" %
(response['Warning']))
for idx, container in enumerate(self.config.sorted_containers):
name = container.name
vprint("\r[%d/%d] Starting '%s' " % (idx+1, num_containers, name))
# in case a startup delay is configured
# we have to wait in here
if container.start_delay > 0:
vprint('(delaying for %s seconds)' % (container.start_delay))
time.sleep(container.start_delay)
container_id = self._start_container(container, force)
container_state[name] = {'id': container_id}
# clear progress line
vprint('\r')
# try to persist container states
self.state.initialize(container_state)
container_descriptions = []
for container in self.config.sorted_containers:
description = self._get_container_description(container.name)
container_descriptions.append(description)
return container_descriptions
def _get_device_id(self, container_id, container_name):
# next we have to determine the veth pair of host/container
# that we formerly could pass in via 'lxc_conf' which is
# deprecated since docker > 1.6
device = None
try:
device = self.network.get_container_device(self.docker_client, container_id)
except OSError as err:
if err.errno in (errno.EACCES, errno.EPERM):
msg = "Failed to determine network device of container '%s' [%s]" % (container_name, container_id)
raise InsufficientPermissionsError(msg)
raise
return device
def __get_container_links(self, container):
links = {}
for link, alias in container.links.items():
link_container = self.config.containers.get(link, None)
if not link_container:
raise EmbargoError("link '%s' of container '%s' does not exist" %
(link, container.name))
name = link_container.get_name(self.state.embargo_id)
links[name] = alias
return links
def _start_container(self, container, force=False):
container_name = container.get_name(self.state.embargo_id)
volumes = list(container.volumes.values()) or None
links = self.__get_container_links(container)
# the docker api for port bindings is `internal:external`
port_bindings = dict((v, k) for k, v in container.publish_ports.items())
if self.config.is_udn():
network_mode = self.state.embargo_net_name
else:
network_mode = None
host_config = self.docker_client.create_host_config(
binds=container.volumes,
dns=container.dns,
port_bindings=port_bindings,
network_mode=network_mode,
ulimits=[{'name': 'core', 'soft': 3145728, 'hard': 4194304}],
links=links,
cap_add=container.cap_add)
def create_container():
# try to create container
response = self.docker_client.create_container(
container.image,
command=container.command,
name=container_name,
ports=container.expose_ports,
volumes=volumes,
hostname=container.hostname,
environment=container.environment,
host_config=host_config,
labels={"embargo.id": self.state.embargo_id})
return response['Id']
try:
container_id = create_container()
except docker.errors.APIError as err:
if err.response.status_code == 409 and err.is_client_error():
# if force is set we are retrying after removing the
# container with that name first
if force and self.__try_remove_container(container_name):
container_id = create_container()
else:
raise EmbargoContainerConflictError(err)
else:
raise
# start container
self.docker_client.start(container_id)
return container_id
def __try_remove_container(self, name):
try:
self.docker_client.remove_container(name, force=True)
return True
except Exception:
# TODO: log error?
return False
def _get_container_description(self, name, network_state=True,
ip_partitions=None):
self.state.load()
state_container = self.state.containers[name]
container_id = state_container['id']
try:
container = self._inspect_container(container_id)
except DockerContainerNotFound:
return Container(name, container_id, ContainerStatus.MISSING)
state_dict = container.get('State')
if state_dict and state_dict.get('Running'):
container_status = ContainerStatus.UP
else:
container_status = ContainerStatus.DOWN
extras = {}
network = container.get('NetworkSettings')
ip = None
if network:
ip = network.get('IPAddress')
networks = network.get('Networks')
if self.config.is_udn():
ip = networks.get(
self.state.embargo_net_name).get('IPAddress')
elif networks and not ip:
if len(networks) == 1:
ip = six.next(six.itervalues(networks)).get('IPAddress')
if ip:
extras['ip_address'] = ip
if (network_state and name in self.state.containers
and container_status == ContainerStatus.UP):
device = self._get_device_id(container_id, name)
extras['device'] = device
extras['network_state'] = self.network.network_state(device)
# include partition ID if we were provided a map of them
if ip_partitions and ip:
extras['partition'] = ip_partitions.get(ip)
else:
extras['network_state'] = NetworkState.UNKNOWN
extras['device'] = None
# lookup 'holy' and 'neutral' containers
# TODO: this might go into the state as well..?
cfg_container = self.config.containers.get(name)
extras['neutral'] = cfg_container.neutral if cfg_container else False
extras['holy'] = cfg_container.holy if cfg_container else False
return Container(name, container_id, container_status, **extras)
def destroy(self, force=False):
containers = self._get_embargo_docker_containers()
for container in list(containers.values()):
container_id = container['Id']
self.docker_client.stop(container_id, timeout=DEFAULT_KILL_TIMEOUT)
self.docker_client.remove_container(container_id)
self.network.restore(self.state.embargo_id)
self.state.destroy()
if self.config.is_udn():
try:
self.docker_client.remove_network(self.state.embargo_net_name)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
# Get the containers that are part of the initial Embargo group
def _get_embargo_docker_containers(self):
self.state.load()
containers = {}
filters = {"label": ["embargo.id=" + self.state.embargo_id]}
prefix = self.state.embargo_id + "_"
for container in self.docker_client.containers(all=True, filters=filters):
for name in container['Names']:
# strip leading '/'
name = name[1:] if name[0] == '/' else name
# strip prefix. containers will have these UNLESS `container_name`
# was specified in the config
name = name[len(prefix):] if name.startswith(prefix) else name
if name in self.state.containers:
containers[name] = container
break
return containers
def _get_docker_containers(self):
self.state.load()
containers = self._get_embargo_docker_containers()
# Search for and add any containers that were added to the state
for state_container_name in self.state.containers:
if state_container_name not in containers.keys():
container_id = self.state.container_id(state_container_name)
filters = {"id": container_id}
for container in self.docker_client.containers(all=True, filters=filters):
containers[state_container_name] = container
return containers
def _get_all_containers(self):
self.state.load()
containers = []
ip_partitions = self.network.get_ip_partitions(self.state.embargo_id)
docker_containers = self._get_docker_containers()
for name in docker_containers.keys():
container = self._get_container_description(name, ip_partitions=ip_partitions)
containers.append(container)
return containers
def status(self):
return self._get_all_containers()
def _get_running_containers(self, container_names=None, select_random=False):
return self._get_containers_with_state(container_names, select_random, ContainerStatus.UP)
def _get_created_containers(self, container_names=None, select_random=False):
return self._get_containers_with_state(container_names, select_random,
ContainerStatus.UP, ContainerStatus.DOWN)
def _get_containers_with_state(self, container_names, select_random, *container_states):
containers = self._get_all_containers()
candidates = dict((c.name, c) for c in containers
if c.status in container_states)
if select_random and candidates:
return [random.choice(list(candidates.values()))]
if container_names is None:
return list(candidates.values())
found = []
for name in container_names:
container = candidates.get(name)
if not container:
raise EmbargoError("Container %s is not found or not any of %s"
% (name, container_states))
found.append(container)
return found
def _get_running_container(self, container_name):
return self._get_running_containers((container_name,))[0]
def __with_running_container_device(self, container_names, func, select_random=False):
message = ""
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
device = self._get_device_id(container.container_id, container.name)
func(device)
return container_names
except Exception as ex:
audit_status = "Failed"
message = str(ex)
raise
finally:
self._audit.log_event(func.__name__, audit_status, message,
container_names)
def flaky(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.flaky, select_random)
def slow(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.slow, select_random)
def duplicate(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.duplicate, select_random)
def fast(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.fast, select_random)
def restart(self, container_names, select_random=False):
message = ""
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._stop(container)
self._start(container.name)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('restart', audit_status, message,
container_names)
def kill(self, container_names, signal="SIGKILL", select_random=False):
message = ''
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._kill(container, signal)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('kill', audit_status, message,
container_names)
def _kill(self, container, signal):
self.docker_client.kill(container.container_id, signal)
def stop(self, container_names, select_random=False):
message = ''
audit_status = "Success"
try:
# it is valid to try to stop an already stopped container
containers = self._get_created_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._stop(container)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('stop', audit_status, message,
container_names)
def _stop(self, container):
self.docker_client.stop(container.container_id, timeout=DEFAULT_KILL_TIMEOUT)
def start(self, container_names, select_random=False):
message = ''
audit_status = "Success"
try:
# it is valid to try to start an already running container
containers = self._get_created_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in container_names:
self._start(container)
return container_names
except | |
# %% [markdown]
# # Crowdsourcing Tutorial
# %% [markdown]
# In this tutorial, we'll provide a simple walkthrough of how to use Snorkel in conjunction with crowdsourcing to create a training set for a sentiment analysis task.
# We already have crowdsourced labels for about half of the training dataset.
# The crowdsourced labels are fairly accurate, but do not cover the entire training dataset, nor are they available for the test set or during inference.
# To make up for their lack of training set coverage, we combine crowdsourced labels with heuristic labeling functions to increase the number of training labels we have.
# Like most Snorkel labeling pipelines, we'll use the denoised labels to train a deep learning
# model which can be applied to new, unseen data to automatically make predictions.
# %% [markdown]
# ## Dataset Details
# %% [markdown]
# In this tutorial, we'll use the [Weather Sentiment](https://data.world/crowdflower/weather-sentiment) dataset from Figure Eight.
# Our goal is to train a classifier that can label new tweets as expressing either a positive or negative sentiment.
#
# Crowdworkers were asked to label the sentiment of a particular tweet relating to the weather.
# The catch is that 20 crowdworkers graded each tweet, and in many cases crowdworkers assigned conflicting sentiment labels to the same tweet.
# This is a common issue when dealing with crowdsourced labeling workloads.
#
# Label options were positive, negative, or one of three other options saying they weren't sure if it was positive or negative; we use only the positive/negative labels.
# We've also altered the dataset to reflect a realistic crowdsourcing pipeline where only a subset of our available training set has received crowd labels.
#
# We will treat each crowdworker's labels as coming from a single labeling function (LF).
# This will allow us to learn a weight for how much to trust the labels from each crowdworker.
# We will also write a few heuristic labeling functions to cover the data points without crowd labels.
# Snorkel's ability to build high-quality datasets from multiple noisy labeling signals makes it an ideal framework to approach this problem.
# %% [markdown]
# ## Loading Crowdsourcing Dataset
# %% [markdown]
# We start by loading our data which has 287 data points in total.
# We take 50 for our development set and 50 for our test set.
# The remaining 187 data points form our training set.
# Since the dataset is already small, we skip using a validation set.
# Note that this very small dataset is primarily used for demonstration purposes here.
# In a real setting, we would expect to have access to many more unlabeled tweets, which could help us to train a higher quality model.
# %% {"tags": ["md-exclude"]}
import os
# Make sure we're in the right directory
if os.path.basename(os.getcwd()) == "snorkel-tutorials":
os.chdir("crowdsourcing")
# %%
from data import load_data
crowd_labels, df_train, df_dev, df_test = load_data()
Y_dev = df_dev.sentiment.values
Y_test = df_test.sentiment.values
# %% [markdown] {"tags": ["md-exclude"]}
# First, let's take a look at our development set to get a sense of what the tweets look like.
# We use the following label convention: 0 = Negative, 1 = Positive.
# %% {"tags": ["md-exclude"]}
import pandas as pd
# Don't truncate text fields in the display
pd.set_option("display.max_colwidth", 0)
df_dev.head()
# %% [markdown] {"tags": ["md-exclude"]}
# Now let's take a look at the crowd labels.
# We'll convert these into labeling functions.
# %% {"tags": ["md-exclude"]}
crowd_labels.head()
# %% [markdown]
# ## Writing Labeling Functions
# Each crowdworker can be thought of as a single labeling function,
# as each worker labels a subset of data points,
# and may have errors or conflicting labels with other workers / labeling functions.
# So we create one labeling function per worker.
# We'll simply return the label the worker submitted for a given tweet, and abstain
# if they didn't submit a label for it.
# %% [markdown]
# ### Crowdworker labeling functions
# %%
labels_by_annotator = crowd_labels.groupby("worker_id")
worker_dicts = {}
for worker_id in labels_by_annotator.groups:
worker_df = labels_by_annotator.get_group(worker_id)[["label"]]
worker_dicts[worker_id] = dict(zip(worker_df.index, worker_df.label))
print("Number of workers:", len(worker_dicts))
# %%
from snorkel.labeling import LabelingFunction
ABSTAIN = -1
def worker_lf(x, worker_dict):
return worker_dict.get(x.tweet_id, ABSTAIN)
def make_worker_lf(worker_id):
worker_dict = worker_dicts[worker_id]
name = f"worker_{worker_id}"
return LabelingFunction(name, f=worker_lf, resources={"worker_dict": worker_dict})
worker_lfs = [make_worker_lf(worker_id) for worker_id in worker_dicts]
# %% [markdown]
# Let's take a quick look at how well they do on the development set.
# %% {"tags": ["md-exclude-output"]}
from snorkel.labeling import PandasLFApplier
applier = PandasLFApplier(worker_lfs)
L_train = applier.apply(df_train)
L_dev = applier.apply(df_dev)
# %% [markdown]
# Note that because our dev set is so small and our LFs are relatively sparse, many LFs will appear to have zero coverage.
# Fortunately, our label model learns weights for LFs based on their outputs on the training set, which is generally much larger.
# %%
from snorkel.labeling import LFAnalysis
LFAnalysis(L_dev, worker_lfs).lf_summary(Y_dev).sample(5)
# %% [markdown]
# So the crowd labels in general are quite good! But how much of our dev and training
# sets do they cover?
# %%
print(f"Training set coverage: {100 * LFAnalysis(L_train).label_coverage(): 0.1f}%")
print(f"Dev set coverage: {100 * LFAnalysis(L_dev).label_coverage(): 0.1f}%")
# %% [markdown]
# ### Additional labeling functions
#
# To improve coverage of the training set, we can mix the crowdworker labeling functions with labeling
# functions of other types.
# For example, we can use [TextBlob](https://textblob.readthedocs.io/en/dev/index.html), a tool that provides a pretrained sentiment analyzer. We run TextBlob on our tweets and create some simple LFs that threshold its polarity score, similar to what we did in the spam_tutorial.
# %%
from snorkel.labeling import labeling_function
from snorkel.preprocess import preprocessor
from textblob import TextBlob
@preprocessor(memoize=True)
def textblob_polarity(x):
scores = TextBlob(x.tweet_text)
x.polarity = scores.polarity
return x
# Label high polarity tweets as positive.
@labeling_function(pre=[textblob_polarity])
def polarity_positive(x):
return 1 if x.polarity > 0.3 else -1
# Label low polarity tweets as negative.
@labeling_function(pre=[textblob_polarity])
def polarity_negative(x):
return 0 if x.polarity < -0.25 else -1
# Similar to polarity_negative, but with higher coverage and lower precision.
@labeling_function(pre=[textblob_polarity])
def polarity_negative_2(x):
return 0 if x.polarity <= 0.3 else -1
# %% [markdown]
# ### Applying labeling functions to the training set
# %% {"tags": ["md-exclude-output"]}
text_lfs = [polarity_positive, polarity_negative, polarity_negative_2]
lfs = text_lfs + worker_lfs
applier = PandasLFApplier(lfs)
L_train = applier.apply(df_train)
L_dev = applier.apply(df_dev)
# %%
LFAnalysis(L_dev, lfs).lf_summary(Y_dev).head()
# %% [markdown]
# Using the text-based LFs, we've expanded coverage on both our training set
# and dev set to 100%.
# We'll now take these noisy and conflicting labels, and use the LabelModel
# to denoise and combine them.
# %%
print(f"Training set coverage: {100 * LFAnalysis(L_train).label_coverage(): 0.1f}%")
print(f"Dev set coverage: {100 * LFAnalysis(L_dev).label_coverage(): 0.1f}%")
# %% [markdown]
# ## Train LabelModel And Generate Probabilistic Labels
# %% {"tags": ["md-exclude-output"]}
from snorkel.labeling import LabelModel
# Train LabelModel.
label_model = LabelModel(cardinality=2, verbose=True)
label_model.fit(L_train, n_epochs=100, seed=123, log_freq=20, l2=0.1, lr=0.01)
# %% [markdown]
# As a spot-check for the quality of our LabelModel, we'll score it on the dev set.
# %%
from snorkel.analysis import metric_score
preds_dev = label_model.predict(L_dev)
acc = metric_score(Y_dev, preds_dev, probs=None, metric="accuracy")
print(f"LabelModel Accuracy: {acc:.3f}")
# %% [markdown]
# We see that we get very high accuracy on the development set.
# This is due to the abundance of high quality crowdworker labels.
# **Since we don't have these high quality crowdsourcing labels for the
# test set or new incoming data points, we can't use the LabelModel reliably
# at inference time.**
# In order to run inference on new incoming data points, we need to train a
# discriminative model over the tweets themselves.
# Let's generate a set of labels for that training set.
# %%
preds_train = label_model.predict(L_train)
# %% [markdown]
# ## Use Soft Labels to Train End Model
# %% [markdown]
# ### Getting features from BERT
# Since we have very limited training data, we cannot train a complex model like an LSTM with a lot of parameters.
# Instead, we use a pre-trained model, [BERT](https://github.com/google-research/bert), to generate embeddings for each our tweets, and treat the embedding values as features.
# This may take 5-10 minutes on a CPU, as the BERT model is very large.
# %% {"tags": ["md-exclude-output"]}
import numpy as np
import torch
from pytorch_transformers import BertModel, BertTokenizer
model = BertModel.from_pretrained("bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def encode_text(text):
input_ids = torch.tensor([tokenizer.encode(text)])
return model(input_ids)[0].mean(1)[0].detach().numpy()
X_train = np.array(list(df_train.tweet_text.apply(encode_text).values))
X_test = np.array(list(df_test.tweet_text.apply(encode_text).values))
# %% [markdown]
# ### Model on labels
# Now, we train a simple logistic regression model on the BERT features, using labels
# obtained from our LabelModel.
# %% {"tags": ["md-exclude-output"]}
from sklearn.linear_model import LogisticRegression
sklearn_model = LogisticRegression(solver="liblinear")
sklearn_model.fit(X_train, preds_train)
# %%
print(f"Accuracy of trained model: {sklearn_model.score(X_test, Y_test)}")
# %% [markdown]
# We now have a trained model that can be applied to future data points without requiring crowdsourced labels, and with accuracy not much lower than | |
import math
import pytest
import numpy as np
from verifai.features import *
### Utilities
def checkFlattening(domain, point, expectedLength=None, coordsAreNumeric=None):
flat = domain.flatten(point)
assert type(flat) is tuple
assert len(flat) == domain.flattenedDimension
if expectedLength is not None:
assert len(flat) == expectedLength
unflat = domain.unflatten(flat)
assert point == unflat
for index, value in enumerate(flat):
meaning = domain.meaningOfFlatCoordinate(index, pointName='point')
extractedValue = eval(meaning)
assert extractedValue == domain.denumericizeCoordinate(value)
assert domain.numericizeCoordinate(extractedValue) == value
if coordsAreNumeric is not None:
assert domain.coordinateIsNumerical(index) == coordsAreNumeric
### Constant and Categorical
def test_constant_sampling():
dom = Constant(12.4)
for i in range(10):
point = dom.uniformPoint()
assert point == 12.4
def test_constant_enumeration():
dom = Constant(-7)
assert set(dom) == { -7 }
def test_constant_flatten():
dom = Constant(3.14)
point = dom.uniformPoint()
checkFlattening(dom, point, expectedLength=0)
def test_categorical_empty():
with pytest.raises(RuntimeError):
Categorical()
def test_categorical_duplicate():
with pytest.raises(RuntimeError):
Categorical(1, 2, 1)
def test_categorical_sampling_form():
vals = (1, 4, 9, 16, 25, 36)
cat = Categorical(*vals)
for i in range(10):
point = cat.uniformPoint()
assert type(point) is int
assert point in vals
def test_categorical_sampling_random():
cat = Categorical(-1.2, 1.7)
points = tuple(cat.uniformPoint() for i in range(100))
assert any(point > 0 for point in points)
assert any(point < 0 for point in points)
def test_categorical_enumeration():
vals = (1, 4, 9, 16, 25, 36)
cat = Categorical(*vals)
assert set(cat) == set(vals)
def test_categorical_flatten():
vals = (1, 4, 9, 16, 25, 36)
cat = Categorical(*vals)
point = cat.uniformPoint()
checkFlattening(cat, point, expectedLength=1, coordsAreNumeric=False)
assert cat.pandasIndexForFlatCoordinate(0) == ()
def test_categorical_standardize():
vals = (1, 4, 9, 16, 25, 36)
cat = Categorical(*vals)
assert cat.standardizedDimension == 0
maxIndex = len(vals) - 1
assert cat.standardizedIntervals == ((0, maxIndex),)
for point in cat:
stand = cat.standardize(point)
assert type(stand) is tuple
assert len(stand) == 1
assert 0 <= stand[0] <= maxIndex
unstand = cat.unstandardize(stand)
assert point == unstand
### Box and DiscreteBox
def test_box_zerodim():
with pytest.raises(RuntimeError):
Box()
def test_box_bad_interval():
with pytest.raises(RuntimeError):
Box((1, 2), (1, 4, 9))
def test_box_sampling_form():
def check(box):
for i in range(10):
point = box.uniformPoint()
assert type(point) is tuple
assert len(point) == 2
for coord in point:
assert 0 <= coord <= 2
check(Box((0, 2), (2, 0)))
check(DiscreteBox((0, 2), (0, 2)))
def test_box_sampling_random():
def check(box):
points = tuple(box.uniformPoint()[0] for i in range(100))
assert any(point > 5 for point in points)
assert any(point < 5 for point in points)
check(Box((0, 10)))
check(DiscreteBox((0, 10)))
def test_box_enumeration():
box = DiscreteBox((0, 3), (-1, 1))
assert set(box) == {
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1),
(2, -1), (2, 0), (2, 1),
(3, -1), (3, 0), (3, 1)
}
def test_box_flatten():
box = Box((0, 2), (0, 2), (0, 2))
point = box.uniformPoint()
checkFlattening(box, point, expectedLength=3, coordsAreNumeric=True)
for i in range(3):
assert box.pandasIndexForFlatCoordinate(i) == (i,)
def test_box_standardize():
box = Box((0, 1), (0, 5), (-3, 3))
assert box.standardizedDimension == 3
assert box.standardizedIntervals == ()
for i in range(10):
point = box.uniformPoint()
stand = box.standardize(point)
assert type(stand) is tuple
assert len(stand) == 3
for coord in stand:
assert 0 <= coord <= 1
unstand = box.unstandardize(stand)
assert point == unstand
def test_discrete_box_standardize():
intervals = ((0, 1), (0, 5), (-3, 3))
box = DiscreteBox(*intervals)
assert box.standardizedDimension == 0
assert box.standardizedIntervals == intervals
for point in box:
stand = box.standardize(point)
assert type(stand) is tuple
assert len(stand) == len(intervals)
for coord, interval in zip(stand, intervals):
left, right = interval
assert left <= coord <= right
unstand = box.unstandardize(stand)
assert point == unstand
### Arrays
def test_array_zerodim():
with pytest.raises(RuntimeError):
Array(Box((0, 1)), ())
def test_array_negative_dimension():
with pytest.raises(RuntimeError):
Array(Box((0, 1)), (1, 2, -5))
def test_array_sampling_form():
box = Box((0, 2))
shape = (3, 2)
array = Array(box, shape)
for i in range(10):
point = array.uniformPoint()
assert type(point) is tuple
assert len(point) == 3
for level in point:
assert type(level) is tuple
assert len(level) == 2
for element in array.elementsOfPoint(point):
assert 0 <= element[0] <= 2
def test_array_sampling_random():
box = Box((-1, 1))
array = Array(box, (100,))
point = array.uniformPoint()
assert any(element[0] > 0 for element in point)
assert any(element[0] < 0 for element in point)
def test_array_enumeration():
box = DiscreteBox((0, 1))
array = Array(box, (2, 2))
assert set(array) == {
(((0,), (0,)), ((0,), (0,))),
(((0,), (0,)), ((0,), (1,))),
(((0,), (0,)), ((1,), (0,))),
(((0,), (0,)), ((1,), (1,))),
(((0,), (1,)), ((0,), (0,))),
(((0,), (1,)), ((0,), (1,))),
(((0,), (1,)), ((1,), (0,))),
(((0,), (1,)), ((1,), (1,))),
(((1,), (0,)), ((0,), (0,))),
(((1,), (0,)), ((0,), (1,))),
(((1,), (0,)), ((1,), (0,))),
(((1,), (0,)), ((1,), (1,))),
(((1,), (1,)), ((0,), (0,))),
(((1,), (1,)), ((0,), (1,))),
(((1,), (1,)), ((1,), (0,))),
(((1,), (1,)), ((1,), (1,)))
}
def test_array_element_iteration():
array = Array(Real(), (2, 3))
elts = [1, 2, 3, 4, 5, 6]
point = array.pointWithElements(elts)
assert point == ((1, 2, 3), (4, 5, 6))
array = Array(Real(), (3, 2))
point = array.pointWithElements(elts)
assert point == ((1, 2), (3, 4), (5, 6))
def test_array_element_indexing():
array = Array(Box((0, 3)), (2, 3))
for i in range(10):
point = array.uniformPoint()
assert len(point) == 2
left, right = point[0], point[1]
assert len(left) == 3
assert len(right) == 3
for element in left:
assert len(element) == 1
assert 0 <= element[0] <= 3
for element in right:
assert len(element) == 1
assert 0 <= element[0] <= 3
def test_array_flatten():
box = Box((-1, 1))
array = Array(box, (5, 2, 3))
point = array.uniformPoint()
checkFlattening(array, point, expectedLength=30, coordsAreNumeric=True)
def test_array_pandas():
box = Box((-1, 1))
array = Array(box, (5, 2, 3))
point = array.uniformPoint()
flat = array.flatten(point)
for index, value in enumerate(flat):
panda = array.pandasIndexForFlatCoordinate(index)
extracted = point
for subPanda in panda:
extracted = extracted[subPanda]
assert extracted == value
def test_array_standardize():
box = Box((-1, 1))
array = Array(box, (5, 2, 3))
assert array.standardizedDimension == 30
assert array.standardizedIntervals == ()
for i in range(10):
point = array.uniformPoint()
stand = array.standardize(point)
assert type(stand) is tuple
assert len(stand) == 30
for coord in stand:
assert -1 <= coord <= 1
unstand = array.unstandardize(stand)
assert point == unstand
def test_array_discrete_standardize():
interval = (-1, 1)
box = DiscreteBox(interval)
array = Array(box, (5, 2, 3))
assert array.standardizedDimension == 0
assert array.standardizedIntervals == tuple(interval for i in range(30))
for i in range(10):
point = array.uniformPoint()
stand = array.standardize(point)
assert type(stand) is tuple
assert len(stand) == 30
for coord in stand:
assert -1 <= coord <= 1
unstand = array.unstandardize(stand)
assert point == unstand
def test_array_of_arrays():
box = Box((-1, 1))
array1 = Array(box, (3,))
array2 = Array(array1, (4,))
point = array2.uniformPoint()
assert type(point) is tuple
assert len(point) == 4
for element in point:
assert type(element) is tuple
assert len(element) == 3
for element2 in element:
assert type(element2) is tuple
assert len(element2) == 1
assert -1 <= element2[0] <= 1
checkFlattening(array2, point, expectedLength=12, coordsAreNumeric=True)
def test_array_empty():
array = Array(Box((0, 1)), (0,))
for i in range(10):
point = array.uniformPoint()
assert type(point) is tuple
assert point == ()
assert tuple(array) == ((),)
assert array.pointWithElements(()) == ()
assert tuple(array.elementsOfPoint(())) == ()
assert array.flattenedDimension == 0
assert array.flatten(()) == ()
assert array.unflatten(()) == ()
assert array.standardizedDimension == 0
assert array.standardizedIntervals == ()
assert array.standardize(()) == ()
assert array.unstandardize(()) == ()
def test_array_empty2():
array = Array(Box((0, 1)), (3, 0))
for i in range(10):
point = array.uniformPoint()
assert type(point) is tuple
assert point == ((), (), ())
assert tuple(array) == (point,)
assert array.pointWithElements(()) == point
assert tuple(array.elementsOfPoint(point)) == ()
assert array.flattenedDimension == 0
assert array.flatten(point) == ()
assert array.unflatten(()) == point
assert array.standardizedDimension == 0
assert array.standardizedIntervals == ()
assert array.standardize(point) == ()
assert array.unstandardize(()) == point
### Structs
def test_struct_sampling_form():
struct = Struct({
'a': Box((-1, 1)),
'b': DiscreteBox((4, 5))
})
for i in range(10):
point = struct.uniformPoint()
assert type(point) is struct.makePoint
assert set(point._fields) == { 'a', 'b' }
assert -1 <= point.a[0] <= 1
assert 4 <= point.b[0] <= 5
def test_struct_sampling_random():
box = DiscreteBox((0, 10))
struct = Struct({ 'a': box, 'b': box })
points = tuple(struct.flatten(struct.uniformPoint()) for i in range(100))
assert any(l > r for l, r in points)
assert any(l < r for l, r in points)
def test_struct_enumeration():
box = DiscreteBox((0, 2))
struct = Struct({ 'a': box, 'b': box })
assert set(struct) == {
struct.makePoint(**pt) for pt in [
{ 'a': (0,), 'b': (0,) },
{ 'a': (0,), 'b': (1,) },
{ 'a': (0,), 'b': (2,) },
{ 'a': (1,), 'b': (0,) },
{ 'a': (1,), 'b': (1,) },
{ 'a': | |
flow_id to be used across UL and DL.
# Since HSIA flow is the only symmetric flow currently, we need to
# re-use the flow_id across both direction. The 'flow_category'
# takes priority over flow_cookie to find any available HSIA_FLOW
# id for the ONU.
flow_category = HSIA_FLOW
if self.is_no_l2_modification_flow(classifier, action):
flow_category = HSIA_TRANSPARENT.format(classifier[VLAN_VID])
flow_id = self.resource_mgr.get_flow_id(intf_id, onu_id, uni_id,
flow_category=flow_category,
flow_pcp=classifier[VLAN_PCP])
if flow_id is None:
self.log.error("hsia-flow-unavailable")
return
flow = openolt_pb2.Flow(
access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id,
flow_id=flow_id, flow_type=direction, alloc_id=alloc_id,
network_intf_id=self.data_model.olt_nni_intf_id(),
gemport_id=gemport_id,
classifier=self.mk_classifier(classifier),
action=self.mk_action(action), priority=logical_flow.priority,
port_no=port_no, cookie=logical_flow.cookie)
if self.add_flow_to_device(flow, logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(flow,
flow_store_cookie,
flow_category)
self.update_flow_info_to_kv_store(flow.access_intf_id,
flow.onu_id, flow.uni_id,
flow.flow_id, flow_info)
def add_dhcp_trap_uni(self, intf_id, onu_id, uni_id, port_no, classifier,
action, logical_flow, alloc_id, gemport_id):
self.log.debug('add dhcp upstream trap', classifier=classifier,
intf_id=intf_id, onu_id=onu_id, uni_id=uni_id,
action=action)
action.clear()
action[TRAP_TO_HOST] = True
classifier[UDP_SRC] = 68
classifier[UDP_DST] = 67
classifier[PACKET_TAG_TYPE] = SINGLE_TAG
classifier.pop(VLAN_VID, None)
flow_store_cookie = self._get_flow_store_cookie(classifier,
gemport_id)
if self.resource_mgr.is_flow_cookie_on_kv_store(intf_id, onu_id,
uni_id,
flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
flow_id = self.resource_mgr.get_flow_id(
intf_id, onu_id, uni_id,
flow_store_cookie=flow_store_cookie,
)
dhcp_flow = openolt_pb2.Flow(
onu_id=onu_id, uni_id=uni_id, flow_id=flow_id,
flow_type=UPSTREAM, access_intf_id=intf_id,
gemport_id=gemport_id, alloc_id=alloc_id,
network_intf_id=self.data_model.olt_nni_intf_id(),
priority=logical_flow.priority,
classifier=self.mk_classifier(classifier),
action=self.mk_action(action),
port_no=port_no,
cookie=logical_flow.cookie)
if self.add_flow_to_device(dhcp_flow, logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(dhcp_flow,
flow_store_cookie,
DHCP_FLOW)
self.update_flow_info_to_kv_store(dhcp_flow.access_intf_id,
dhcp_flow.onu_id,
dhcp_flow.uni_id,
dhcp_flow.flow_id,
flow_info)
def add_eapol_flow(self, intf_id, onu_id, uni_id, port_no, logical_flow,
alloc_id, gemport_id, vlan_id=DEFAULT_MGMT_VLAN, classifier=None, action=None):
uplink_classifier = dict()
uplink_classifier[ETH_TYPE] = EAP_ETH_TYPE
uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
uplink_classifier[VLAN_VID] = vlan_id
if classifier is not None:
uplink_classifier[VLAN_PCP] = classifier[VLAN_PCP]
uplink_action = dict()
uplink_action[TRAP_TO_HOST] = True
flow_store_cookie = self._get_flow_store_cookie(uplink_classifier,
gemport_id)
if self.resource_mgr.is_flow_cookie_on_kv_store(intf_id, onu_id,
uni_id,
flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
# Add Upstream EAPOL Flow.
uplink_flow_id = self.resource_mgr.get_flow_id(
intf_id, onu_id, uni_id,
flow_store_cookie=flow_store_cookie
)
upstream_flow = openolt_pb2.Flow(
access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id,
flow_id=uplink_flow_id, flow_type=UPSTREAM, alloc_id=alloc_id,
network_intf_id=self.data_model.olt_nni_intf_id(),
gemport_id=gemport_id,
classifier=self.mk_classifier(uplink_classifier),
action=self.mk_action(uplink_action),
priority=logical_flow.priority,
port_no=port_no,
cookie=logical_flow.cookie)
logical_flow = copy.deepcopy(logical_flow)
logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([fd.vlan_vid(
vlan_id | 0x1000)]))
logical_flow.match.type = OFPMT_OXM
if self.add_flow_to_device(upstream_flow, logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(upstream_flow,
flow_store_cookie,
EAPOL_FLOW)
self.update_flow_info_to_kv_store(upstream_flow.access_intf_id,
upstream_flow.onu_id,
upstream_flow.uni_id,
upstream_flow.flow_id,
flow_info)
# Add Downstream EAPOL Flow, Only for first EAP flow (BAL
# requirement)
# On one of the platforms (Broadcom BAL), when same DL classifier
# vlan was used across multiple ONUs, eapol flow re-adds after
# flow delete (cases of onu reboot/disable) fails.
# In order to generate unique vlan, a combination of intf_id
# onu_id and uni_id is used.
# uni_id defaults to 0, so add 1 to it.
special_vlan_downstream_flow = 4090 - intf_id * onu_id * (uni_id + 1)
# Assert that we do not generate invalid vlans under no condition
assert special_vlan_downstream_flow >= 2
downlink_classifier = dict()
downlink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
downlink_classifier[ETH_TYPE] = EAP_ETH_TYPE
downlink_classifier[VLAN_VID] = special_vlan_downstream_flow
downlink_action = dict()
downlink_action[PUSH_VLAN] = True
downlink_action[VLAN_VID] = vlan_id
flow_store_cookie = self._get_flow_store_cookie(
downlink_classifier, gemport_id)
if self.resource_mgr.is_flow_cookie_on_kv_store(
intf_id, onu_id, uni_id, flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
downlink_flow_id = self.resource_mgr.get_flow_id(
intf_id, onu_id, uni_id,
flow_store_cookie=flow_store_cookie
)
downstream_flow = openolt_pb2.Flow(
access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id,
flow_id=downlink_flow_id, flow_type=DOWNSTREAM,
alloc_id=alloc_id,
network_intf_id=self.data_model.olt_nni_intf_id(),
gemport_id=gemport_id,
classifier=self.mk_classifier(downlink_classifier),
action=self.mk_action(downlink_action),
priority=logical_flow.priority,
port_no=port_no,
cookie=logical_flow.cookie)
downstream_logical_flow = ofp_flow_stats(
id=logical_flow.id, cookie=logical_flow.cookie,
table_id=logical_flow.table_id,
priority=logical_flow.priority, flags=logical_flow.flags)
downstream_logical_flow.match.oxm_fields.extend(
fd.mk_oxm_fields(
[fd.in_port(fd.get_out_port(logical_flow)),
fd.vlan_vid(special_vlan_downstream_flow | 0x1000)]))
downstream_logical_flow.match.type = OFPMT_OXM
downstream_logical_flow.instructions.extend(
fd.mk_instructions_from_actions([fd.output(
self.platform.mk_uni_port_num(intf_id, onu_id,
uni_id))]))
if self.add_flow_to_device(downstream_flow,
downstream_logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(
downstream_flow, flow_store_cookie, EAPOL_FLOW)
self.update_flow_info_to_kv_store(
downstream_flow.access_intf_id, downstream_flow.onu_id,
downstream_flow.uni_id, downstream_flow.flow_id,
flow_info)
def repush_all_different_flows(self):
# Check if the device is supposed to have flows, if so add them
# Recover static flows after a reboot
logical_flows = self.logical_flows_proxy.get('/').items
devices_flows = self.flows_proxy.get('/').items
logical_flows_ids_provisioned = [f.cookie for f in devices_flows]
for logical_flow in logical_flows:
try:
if logical_flow.id not in logical_flows_ids_provisioned:
self.add_flow(logical_flow)
except Exception as e:
self.log.exception('Problem reading this flow', e=e)
def reset_flows(self):
self.flows_proxy.update('/', Flows(items=[]))
self.log.debug("purged-all-device-flows")
self.logical_flows_proxy.update('/', Flows(items=[]))
self.log.debug("purged-all-logical-flows")
""" Add a downstream DHCP trap flow on the NNI interface
"""
def add_dhcp_trap_nni(self, logical_flow, classifier,
port_no, network_intf_id=0):
self.log.info("trap-dhcp-of-nni-flow")
classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
action = dict()
action[TRAP_TO_HOST] = True
# We manage flow_id resource pool on per PON port basis.
# Since this situation is tricky, as a hack, we pass the NNI port
# index (network_intf_id) as PON port Index for the flow_id resource
# pool. Also, there is no ONU Id available for trapping LLDP packets
# on NNI port, use onu_id as -1 (invalid)
# ****************** CAVEAT *******************
# This logic works if the NNI Port Id falls within the same valid
# range of PON Port Ids. If this doesn't work for some OLT Vendor
# we need to have a re-look at this.
# *********************************************
onu_id = -1
uni_id = -1
flow_store_cookie = self._get_flow_store_cookie(classifier)
if self.resource_mgr.is_flow_cookie_on_kv_store(
network_intf_id, onu_id, uni_id, flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
flow_id = self.resource_mgr.get_flow_id(
network_intf_id, onu_id, uni_id,
flow_store_cookie=flow_store_cookie)
downstream_flow = openolt_pb2.Flow(
access_intf_id=-1, # access_intf_id not required
onu_id=onu_id, # onu_id not required
uni_id=uni_id, # uni_id not used
flow_id=flow_id,
flow_type=DOWNSTREAM,
network_intf_id=network_intf_id,
gemport_id=-1, # gemport_id not required
classifier=self.mk_classifier(classifier),
action=self.mk_action(action),
priority=logical_flow.priority,
port_no=port_no,
cookie=logical_flow.cookie)
self.log.debug('add dhcp downstream trap', classifier=classifier,
action=action, flow=downstream_flow,
port_no=port_no)
if self.add_flow_to_device(downstream_flow, logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(downstream_flow,
flow_store_cookie, DHCP_FLOW)
self.update_flow_info_to_kv_store(
network_intf_id, onu_id, uni_id, flow_id, flow_info)
def add_lldp_flow(self, logical_flow, port_no, network_intf_id=0):
classifier = dict()
classifier[ETH_TYPE] = LLDP_ETH_TYPE
classifier[PACKET_TAG_TYPE] = UNTAGGED
action = dict()
action[TRAP_TO_HOST] = True
# LLDP flow is installed to trap LLDP packets on the NNI port.
# We manage flow_id resource pool on per PON port basis.
# Since this situation is tricky, as a hack, we pass the NNI port
# index (network_intf_id) as PON port Index for the flow_id resource
# pool. Also, there is no ONU Id available for trapping LLDP packets
# on NNI port, use onu_id as -1 (invalid)
# ****************** CAVEAT *******************
# This logic works if the NNI Port Id falls within the same valid
# range of PON Port Ids. If this doesn't work for some OLT Vendor
# we need to have a re-look at this.
# *********************************************
onu_id = -1
uni_id = -1
flow_store_cookie = self._get_flow_store_cookie(classifier)
if self.resource_mgr.is_flow_cookie_on_kv_store(
network_intf_id, onu_id, uni_id, flow_store_cookie):
self.log.debug('flow-exists--not-re-adding')
else:
flow_id = self.resource_mgr.get_flow_id(
network_intf_id, onu_id, uni_id, flow_store_cookie=flow_store_cookie)
downstream_flow = openolt_pb2.Flow(
access_intf_id=-1, # access_intf_id not required
onu_id=onu_id, # onu_id not required
uni_id=uni_id, # uni_id not used
flow_id=flow_id,
flow_type=DOWNSTREAM,
network_intf_id=network_intf_id,
gemport_id=-1, # gemport_id not required
classifier=self.mk_classifier(classifier),
action=self.mk_action(action),
priority=logical_flow.priority,
port_no=port_no,
cookie=logical_flow.cookie)
self.log.debug('add lldp downstream trap', classifier=classifier,
action=action, flow=downstream_flow,
port_no=port_no)
if self.add_flow_to_device(downstream_flow, logical_flow, flow_store_cookie):
flow_info = self._get_flow_info_as_json_blob(downstream_flow,
flow_store_cookie,
LLDP_FLOW)
self.update_flow_info_to_kv_store(
network_intf_id, onu_id, uni_id, flow_id, flow_info)
@staticmethod
def mk_classifier(classifier_info):
classifier = openolt_pb2.Classifier()
if ETH_TYPE in classifier_info:
classifier.eth_type = classifier_info[ETH_TYPE]
if IP_PROTO in classifier_info:
classifier.ip_proto = classifier_info[IP_PROTO]
if VLAN_VID in classifier_info and \
classifier_info[VLAN_VID] != RESERVED_VLAN:
classifier.o_vid = classifier_info[VLAN_VID]
if METADATA in classifier_info and \
classifier_info[METADATA] != RESERVED_VLAN:
classifier.i_vid = classifier_info[METADATA]
if VLAN_PCP in classifier_info:
classifier.o_pbits = classifier_info[VLAN_PCP]
if UDP_SRC in classifier_info:
classifier.src_port = classifier_info[UDP_SRC]
if UDP_DST in classifier_info:
classifier.dst_port = classifier_info[UDP_DST]
if IPV4_DST in classifier_info:
classifier.dst_ip = classifier_info[IPV4_DST]
if IPV4_SRC in classifier_info:
classifier.src_ip = classifier_info[IPV4_SRC]
if PACKET_TAG_TYPE in classifier_info:
if classifier_info[PACKET_TAG_TYPE] == SINGLE_TAG:
classifier.pkt_tag_type = SINGLE_TAG
elif classifier_info[PACKET_TAG_TYPE] == DOUBLE_TAG:
classifier.pkt_tag_type = DOUBLE_TAG
elif classifier_info[PACKET_TAG_TYPE] == UNTAGGED:
classifier.pkt_tag_type = UNTAGGED
else:
classifier.pkt_tag_type = 'none'
return classifier
def mk_action(self, action_info):
action = openolt_pb2.Action()
if POP_VLAN in action_info:
action.o_vid = action_info[VLAN_VID]
action.cmd.remove_outer_tag = True
elif PUSH_VLAN in action_info:
action.o_vid = action_info[VLAN_VID]
action.cmd.add_outer_tag = True
if VLAN_PCP in action_info:
action.o_pbits = action_info[VLAN_PCP]
elif TRAP_TO_HOST in action_info:
action.cmd.trap_to_host = True
else:
self.log.info('Invalid-action-field', action_info=action_info)
return
return action
def is_eap_enabled(self, intf_id, onu_id, uni_id):
flows = self.logical_flows_proxy.get('/').items
for flow in flows:
eap_flow = False
eap_intf_id = None
eap_onu_id = None
eap_uni_id = None
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
if field.eth_type == EAP_ETH_TYPE:
eap_flow = True
if field.type == fd.IN_PORT:
eap_intf_id = self.platform.intf_id_from_uni_port_num(
field.port)
eap_onu_id = self.platform.onu_id_from_port_num(field.port)
eap_uni_id = self.platform.uni_id_from_port_num(field.port)
if eap_flow:
self.log.debug('eap flow detected', onu_id=onu_id,
uni_id=uni_id, intf_id=intf_id,
eap_intf_id=eap_intf_id, eap_onu_id=eap_onu_id,
eap_uni_id=eap_uni_id)
if eap_flow and intf_id == eap_intf_id \
and onu_id == eap_onu_id and uni_id == eap_uni_id:
return True, flow
return False, None
def get_subscriber_vlan(self, port):
self.log.debug('looking from subscriber flow for port', port=port)
flows = self.logical_flows_proxy.get('/').items
for flow in flows:
in_port = fd.get_in_port(flow)
out_port = fd.get_out_port(flow)
if in_port == port and out_port is not None and \
self.platform.intf_id_to_port_type_name(out_port) \
== Port.ETHERNET_NNI:
fields = fd.get_ofb_fields(flow)
self.log.debug('subscriber flow found', fields=fields)
for field in fields:
if field.type == OFPXMT_OFB_VLAN_VID:
self.log.debug('subscriber vlan found',
vlan_id=field.vlan_vid)
return field.vlan_vid & 0x0fff
self.log.debug('No subscriber flow found', port=port)
return None
def add_flow_to_device(self, flow, logical_flow, flow_store_cookie=None):
self.log.debug('pushing flow to device', flow=flow)
try:
| |
res[3] == {'number': 3, 'letter': 'c'}
assert res[4] == {'number': 3, 'letter': 'c'}
assert res[5] == {'number': 3, 'letter': 'c'}
def test_flat_map_identity(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.flat_map(['number', 'letter'],
lambda row: [[row['id'], row['val']]],
column_types=[int, str])
assert res.column_names() == ['number', 'letter']
assert res.dtype() == [int, str]
assert res[0] == {'number': 1, 'letter': 'a'}
assert res[1] == {'number': 2, 'letter': 'b'}
assert res[2] == {'number': 3, 'letter': 'c'}
def test_flat_map_mapped(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.flat_map(['number', 'letter'],
lambda row: [[row['id'] * 2, row['val'] + 'x']],
column_types=[int, str])
assert res.column_names() == ['number', 'letter']
assert res.dtype() == [int, str]
assert res[0] == {'number': 2, 'letter': 'ax'}
assert res[1] == {'number': 4, 'letter': 'bx'}
assert res[2] == {'number': 6, 'letter': 'cx'}
def test_flat_map_auto(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.flat_map(['number', 'letter'],
lambda row: [[row['id'] * 2, row['val'] + 'x']])
assert res.column_names() == ['number', 'letter']
assert res.dtype() == [int, str]
assert res[0] == {'number': 2, 'letter': 'ax'}
assert res[1] == {'number': 4, 'letter': 'bx'}
assert res[2] == {'number': 6, 'letter': 'cx'}
# TODO: test auto error cases
# noinspection PyClassHasNoInit
class TestXFrameSample:
"""
Tests XFrame sample
"""
@pytest.mark.skip(reason='depends on number of partitions')
def test_sample_02(self):
t = XFrame({'id': [1, 2, 3, 4, 5], 'val': ['a', 'b', 'c', 'd', 'e']})
res = t.sample(0.2, 2)
assert len(res) == 1
assert res[0] == {'id': 2, 'val': 'b'}
@pytest.mark.skip(reason='depends on number of partitions')
def test_sample_08(self):
t = XFrame({'id': [1, 2, 3, 4, 5], 'val': ['a', 'b', 'c', 'd', 'e']})
res = t.sample(0.8, 3)
assert len(res) == 3
assert res[0] == {'id': 2, 'val': 'b'}
assert res[1] == {'id': 4, 'val': 'd'}
assert res[2] == {'id': 5, 'val': 'e'}
# noinspection PyClassHasNoInit
class TestXFrameRandomSplit:
"""
Tests XFrame random_split
"""
@pytest.mark.skip(reason='depends on number of partitions')
def test_random_split(self):
t = XFrame({'id': [1, 2, 3, 4, 5], 'val': ['a', 'b', 'c', 'd', 'e']})
res1, res2 = t.random_split(0.5, 1)
assert len(res1) == 3
assert res1[0] == {'id': 1, 'val': 'a'}
assert res1[1] == {'id': 4, 'val': 'd'}
assert res1[2] == {'id': 5, 'val': 'e'}
assert len(res2) == 2
assert res2[0] == {'id': 2, 'val': 'b'}
assert res2[1] == {'id': 3, 'val': 'c'}
# noinspection PyClassHasNoInit
class TestXFrameTopk:
"""
Tests XFrame topk
"""
def test_topk_int(self):
t = XFrame({'id': [10, 20, 30], 'val': ['a', 'b', 'c']})
res = t.topk('id', 2)
assert len(res) == 2
# noinspection PyUnresolvedReferences
assert (XArray([30, 20]) == res['id']).all()
assert list(res['val']) == ['c', 'b']
assert res.column_types() == [int, str]
assert res.column_names() == ['id', 'val']
def test_topk_int_reverse(self):
t = XFrame({'id': [30, 20, 10], 'val': ['c', 'b', 'a']})
res = t.topk('id', 2, reverse=True)
assert len(res) == 2
assert list(res['id']) == [10, 20]
assert list(res['val']) == ['a', 'b']
# noinspection PyUnresolvedReferences
def test_topk_float(self):
t = XFrame({'id': [10.0, 20.0, 30.0], 'val': ['a', 'b', 'c']})
res = t.topk('id', 2)
assert len(res) == 2
assert (XArray([30.0, 20.0]) == res['id']).all()
assert list(res['val']) == ['c', 'b']
assert res.column_types() == [float, str]
assert res.column_names() == ['id', 'val']
def test_topk_float_reverse(self):
t = XFrame({'id': [30.0, 20.0, 10.0], 'val': ['c', 'b', 'a']})
res = t.topk('id', 2, reverse=True)
assert len(res) == 2
assert list(res['id']) == [10.0, 20.0]
assert list(res['val']) == ['a', 'b']
def test_topk_str(self):
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
res = t.topk('val', 2)
assert len(res) == 2
assert list(res['id']) == [10, 20]
assert list(res['val']) == ['c', 'b']
assert res.column_types() == [int, str]
assert res.column_names() == ['id', 'val']
def test_topk_str_reverse(self):
t = XFrame({'id': [10, 20, 30], 'val': ['c', 'b', 'a']})
res = t.topk('val', 2, reverse=True)
assert len(res) == 2
assert list(res['id']) == [30, 20]
assert list(res['val']) == ['a', 'b']
# noinspection PyClassHasNoInit
class TestXFrameSaveBinary:
"""
Tests XFrame save binary format
"""
def test_save(self):
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
path = 'tmp/frame'
t.save(path, format='binary')
with open(os.path.join(path, '_metadata')) as f:
metadata = pickle.load(f)
assert metadata == [['id', 'val'], [int, str]]
# TODO find some way to check the data
def test_save_not_exist(self, tmpdir):
path = os.path.join(str(tmpdir), 'frame')
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
t.save(path, format='binary')
# TODO find some way to check the data
# noinspection PyClassHasNoInit
class TestXFrameSaveCsv:
"""
Tests XFrame save csv format
"""
def test_save(self):
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
path = 'tmp/frame-csv'
t.save(path, format='csv')
with open(path + '.csv') as f:
heading = f.readline().rstrip()
assert heading == 'id,val'
assert f.readline().rstrip() == '30,a'
assert f.readline().rstrip() == '20,b'
assert f.readline().rstrip() == '10,c'
def test_save_not_exist(self, tmpdir):
path = os.path.join(str(tmpdir), 'frame')
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
t.save(path, format='csv')
# TODO find some way to check the data
# noinspection PyClassHasNoInit
class TestXFrameSaveParquet:
"""
Tests XFrame save for parquet files
"""
def test_save(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
path = 'tmp/frame-parquet'
t.save(path, format='parquet')
res = XFrame(path + '.parquet')
assert res.column_names() == ['id', 'val']
assert res.column_types() == [int, str]
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 2, 'val': 'b'}
assert res[2] == {'id': 3, 'val': 'c'}
def test_save_as_parquet(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
path = 'tmp/frame-parquet'
t.save_as_parquet(path)
res = XFrame(path, format='parquet')
assert res.column_names() == ['id', 'val']
assert res.column_types() == [int, str]
assert res[0] == {'id': 1, 'val': 'a'}
assert res[1] == {'id': 2, 'val': 'b'}
assert res[2] == {'id': 3, 'val': 'c'}
def test_save_rename(self):
t = XFrame({'id col': [1, 2, 3], 'val,col': ['a', 'b', 'c']})
path = 'tmp/frame-parquet'
t.save(path, format='parquet')
res = XFrame(path + '.parquet')
assert res.column_names() == ['id_col', 'val_col']
assert res.column_types() == [int, str]
assert res[0] == {'id_col': 1, 'val_col': 'a'}
assert res[1] == {'id_col': 2, 'val_col': 'b'}
assert res[2] == {'id_col': 3, 'val_col': 'c'}
def test_save_as_parquet_rename(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
path = 'tmp/frame-parquet'
t.save_as_parquet(path, column_names=['id1', 'val1'])
res = XFrame(path, format='parquet')
assert res.column_names() == ['id1', 'val1']
assert res.column_types() == [int, str]
assert res[0] == {'id1': 1, 'val1': 'a'}
assert res[1] == {'id1': 2, 'val1': 'b'}
assert res[2] == {'id1': 3, 'val1': 'c'}
def test_save_not_exist(self):
t = XFrame({'id': [30, 20, 10], 'val': ['a', 'b', 'c']})
path = 'xxx/frame'
t.save_as_parquet(path)
# TODO find some way to check the data
# noinspection PyClassHasNoInit
class TestXFrameSelectColumn:
"""
Tests XFrame select_column
"""
def test_select_column_id(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.select_column('id')
assert list(res) == [1, 2, 3]
def test_select_column_val(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
res = t.select_column('val')
assert list(res) == ['a', 'b', 'c']
def test_select_column_bad_name(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
with pytest.raises(ValueError) as exception_info:
t.select_column('xx')
exception_message = exception_info.value.args[0]
assert exception_message == "Column name does not exist: 'xx'."
# noinspection PyTypeChecker
def test_select_column_bad_type(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c']})
with pytest.raises(TypeError) as exception_info:
t.select_column(1)
exception_message = exception_info.value.args[0]
assert exception_message == 'Invalid column_name type must be str.'
# noinspection PyClassHasNoInit
class TestXFrameSelectColumns:
"""
Tests XFrame select_columns
"""
def test_select_columns_id_val(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
res = t.select_columns(['id', 'val'])
assert res[0] == {'id': 1, 'val': 'a'}
def test_select_columns_id(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
res = t.select_columns(['id'])
assert res[0] == {'id': 1}
# noinspection PyTypeChecker
def test_select_columns_not_iterable(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
with pytest.raises(TypeError) as exception_info:
t.select_columns(1)
exception_message = exception_info.value.args[0]
assert exception_message == 'Keylist must be an iterable.'
def test_select_columns_bad_type(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
with pytest.raises(TypeError) as exception_info:
t.select_columns(['id', 2])
exception_message = exception_info.value.args[0]
assert exception_message == 'Invalid key type: must be str.'
def test_select_columns_bad_dup(self):
t = XFrame({'id': [1, 2, 3], 'val': ['a', 'b', 'c'], 'another': [3.0, 2.0, 1.0]})
with pytest.raises(ValueError) as exception_info:
t.select_columns(['id', 'id'])
exception_message = exception_info.value.args[0]
assert exception_message == "There are duplicate keys in key list: 'id'."
# noinspection PyClassHasNoInit
class TestXFrameAddColumn:
"""
Tests XFrame add_column
"""
def test_add_column_named(self):
| |
current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
>>> s = ks.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
Name: 0, dtype: int64
>>> s.rolling(2).mean()
0 NaN
1 3.5
2 4.0
3 3.5
4 4.0
Name: 0, dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 4.000000
3 3.333333
4 4.333333
Name: 0, dtype: float64
For DataFrame, each rolling mean is computed column-wise.
>>> df = ks.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).mean()
A B
0 NaN NaN
1 3.5 12.5
2 4.0 17.0
3 3.5 14.5
4 4.0 20.0
>>> df.rolling(3).mean()
A B
0 NaN NaN
1 NaN NaN
2 4.000000 16.666667
3 3.333333 12.666667
4 4.333333 21.666667
"""
return super(Rolling, self).mean()
def std(self):
"""
Calculate rolling standard deviation.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Examples
--------
>>> s = ks.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
Name: 0, dtype: float64
For DataFrame, each rolling standard deviation is computed column-wise.
>>> df = ks.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).std()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.707107 7.778175
3 0.707107 9.192388
4 1.414214 16.970563
5 0.000000 0.000000
6 0.000000 0.000000
"""
return super(Rolling, self).std()
def var(self):
"""
Calculate unbiased rolling variance.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Examples
--------
>>> s = ks.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
Name: 0, dtype: float64
For DataFrame, each unbiased rolling variance is computed column-wise.
>>> df = ks.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).var()
A B
0 NaN NaN
1 0.0 0.0
2 0.5 60.5
3 0.5 84.5
4 2.0 288.0
5 0.0 0.0
6 0.0 0.0
"""
return super(Rolling, self).var()
class RollingGroupby(Rolling):
def __init__(self, groupby, groupkeys, window, min_periods=None):
from databricks.koalas.groupby import SeriesGroupBy
from databricks.koalas.groupby import DataFrameGroupBy
if isinstance(groupby, SeriesGroupBy):
kdf = groupby._kser.to_frame()
elif isinstance(groupby, DataFrameGroupBy):
kdf = groupby._kdf
else:
raise TypeError(
"groupby must be a SeriesGroupBy or DataFrameGroupBy; "
"however, got: %s" % type(groupby))
super(RollingGroupby, self).__init__(kdf, window, min_periods)
self._groupby = groupby
# NOTE THAT this code intentionally uses `F.col` instead of `scol` in
# given series. This is because, in case of series, we convert it into
# DataFrame. So, if the given `groupkeys` is a series, they end up with
# being a different series.
self._window = self._window.partitionBy(
*[F.col(name_like_string(ser.name)) for ser in groupkeys])
self._unbounded_window = self._unbounded_window.partitionBy(
*[F.col(name_like_string(ser.name)) for ser in groupkeys])
self._groupkeys = groupkeys
# Current implementation reuses DataFrameGroupBy implementations for Series as well.
self.kdf = self.kdf_or_kser
def __getattr__(self, item: str) -> Any:
if hasattr(_MissingPandasLikeRollingGroupby, item):
property_or_func = getattr(_MissingPandasLikeRollingGroupby, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_as_series_or_frame(self, func):
"""
Wraps a function that handles Spark column in order
to support it in both Koalas Series and DataFrame.
Note that the given `func` name should be same as the API's method name.
"""
from databricks.koalas import DataFrame
from databricks.koalas.series import _col
from databricks.koalas.groupby import SeriesGroupBy
kdf = self.kdf
sdf = self.kdf._sdf
# Here we need to include grouped key as an index, and shift previous index.
# [index_column0, index_column1] -> [grouped key, index_column0, index_column1]
new_index_scols = []
new_index_map = []
for groupkey in self._groupkeys:
new_index_scols.append(
# NOTE THAT this code intentionally uses `F.col` instead of `scol` in
# given series. This is because, in case of series, we convert it into
# DataFrame. So, if the given `groupkeys` is a series, they end up with
# being a different series.
F.col(
name_like_string(groupkey.name)
).alias(
SPARK_INDEX_NAME_FORMAT(len(new_index_scols))
))
new_index_map.append(
(SPARK_INDEX_NAME_FORMAT(len(new_index_map)),
groupkey._internal.column_labels[0]))
for new_index_scol, index_name in zip(kdf._internal.index_scols, kdf._internal.index_names):
new_index_scols.append(
new_index_scol.alias(SPARK_INDEX_NAME_FORMAT(len(new_index_scols))))
new_index_map.append((SPARK_INDEX_NAME_FORMAT(len(new_index_map)), index_name))
applied = []
for column in kdf.columns:
applied.append(
kdf[column]._with_new_scol(
func(kdf[column]._scol)
).rename(kdf[column].name))
# Seems like pandas filters out when grouped key is NA.
cond = self._groupkeys[0]._scol.isNotNull()
for c in self._groupkeys:
cond = cond | c._scol.isNotNull()
sdf = sdf.select(new_index_scols + [c._scol for c in applied]).filter(cond)
internal = kdf._internal.copy(
sdf=sdf,
index_map=new_index_map,
column_labels=[c._internal.column_labels[0] for c in applied],
column_scols=[scol_for(sdf, c._internal.data_columns[0]) for c in applied])
ret = DataFrame(internal)
if isinstance(self._groupby, SeriesGroupBy):
return _col(ret)
else:
return ret
def count(self):
"""
The rolling count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ks.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
0
2 0 1.0
1 2.0
3 2 1.0
3 2.0
4 3.0
4 5 1.0
6 2.0
7 3.0
8 3.0
5 9 1.0
10 2.0
Name: 0, dtype: float64
For DataFrame, each rolling count is computed column-wise.
>>> df = ks.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 1.0 1.0
1 2.0 2.0
3 2 1.0 1.0
3 2.0 2.0
4 2.0 2.0
4 5 1.0 1.0
6 2.0 2.0
7 2.0 2.0
8 2.0 2.0
5 9 1.0 1.0
10 2.0 2.0
"""
return super(RollingGroupby, self).count()
def sum(self):
"""
The rolling summation of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.sum : Sum of the full Series.
DataFrame.sum : Sum of the full DataFrame.
Examples
--------
>>> s = ks.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
0
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 9.0
4 5 NaN
6 NaN
7 12.0
8 12.0
5 9 NaN
10 NaN
Name: 0, dtype: float64
For DataFrame, each rolling summation is computed column-wise.
>>> df = ks.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
A
2 0 NaN NaN
1 4.0 8.0
3 2 NaN NaN
3 6.0 18.0
4 | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 11:33:50 2019
@author: callie_macbookair
NOTE: You MUST have a sleep time in between starting the digitizer and starting fifo. The digitizer needs time start up and fifo will time out if you
do not wait. I suggest something like 5-7 seconds.
"""
import logging
import os
import time
import subprocess
from makeHistogram import makeHistogram_noTable
import matplotlib.pyplot as plt
import numpy as np
import threading
class radiation_measurement():
#On init start logging and set the buffer time (how often access shared memory)
def __init__(self):
# Turn on logging so can get some debugging action
logging.basicConfig(format='%(asctime)s %(message)s')
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
self.log.debug('Logging has started')
#Time to wait before grabbing data from shared memory again, used in FIFO
self.bufferTime = 0.1
# digitizer_location = '/home/kgoetz/Documents/digiTES_4.5.13/bin'
# save_location = '/home/kgoetz/Documents'
# run_name = 'test_run'
#This method starts the digitizer running independantly in its own terminal
#It requires a measurement time, the location the digitizer is writing to, the location where you want to save you data from the run and the run name
def run_digitizer(self,measurement_time,digitizer_location,save_location,run_name):
#Autogenerate a digitizer command file that will be used to pipe commands into digites
f = open(digitizer_location+'/digitizer_command.txt', 'w') #put script in folder with digitizer
f.write('\n')
f.write('\n')
f.write('s\n')
f.write('\n')
f.write('l\n')
for n in range(0,measurement_time-2):
f.write('\n')
f.write('h\n')
f.write('\n')
f.write('s\n')
f.write('\n')
f.write('q\n')
f.close()
#Wait 1 second to let it finish writing file
time.sleep(1)
#Autogenerate script that runs digitizer and moves data safely to named folder when digitizer is finished
d = open('run_digiTES.bash', 'w') #put script in current directory
d.write('#!/bin/bash \n')
d.write('cd '+digitizer_location+'/ \n') #change to digitizer location
d.write('\n')
d.write('while read input\n')
d.write('do echo "$input"\n')
d.write('\t sleep 1\n')
d.write('done < digitizer_command.txt | ./digiTES\n')
d.write('\n')
d.write('cd '+save_location+'/ \n') #change to digitizer location
d.write('mkdir '+run_name+'\n') #make a folder with the run name
d.write('cp '+digitizer_location+'/DataFiles/* '+save_location+'/'+run_name+'/ \n') #move all data from folder digites writes to folder with run name
d.write('\n')
d.close()
#Wait 1 second to let it finish writing file
time.sleep(1)
#Start digitizer process
self.proc = subprocess.call(['gnome-terminal','-x','./run_digiTES.bash'])
self.log.debug('Digitizer Started')
def acquire_data(self,digitizer_location, channels_activated=[True,True,True,True]):
## CARL #################
# This method is untested and I'm not sure if its right but you said you are familiar with it and it should give you an idea of how I was thinking about starting up a new thread
#I think we want to use a channel mask because enabling/disabling channels will be easy
#Wait 7 seconds after start acquiring to let digitizer have some time to start up
time.sleep(7)
#If channel 0 is enabled, start reading in
if channel_mask[0]:
filename = digitizer_location+'/DataFiles/Run0_List_0_0.txt'
#Start new thread with fifo
self.ch0 = threading.Thread(target=self.fifo,args=(filename,0))
self.ch0.start()
#If channel 1 is enabled, start reading in
if channel_mask[1]:
filename = digitizer_location+'/DataFiles/Run0_List_0_1.txt'
#Start new thread with fifo
self.ch1 = threading.Thread(target=self.fifo,args=(filename,1))
self.ch1.start()
#If channel 2 is enabled, start reading in
if channel_mask[2]:
filename = digitizer_location+'/DataFiles/Run0_List_0_2.txt'
#Start new thread with fifo
self.ch2 = threading.Thread(target=self.fifo,args=(filename,2))
self.ch2.start()
#If channel 3 is enabled, start reading in
if channel_mask[3]:
filename = digitizer_location+'/DataFiles/Run0_List_0_3.txt'
#Start new thread with fifo
self.ch3 = threading.Thread(target=self.fifo,args=(filename,3))
self.ch3.start()
# THIS METHOD OPENS A PIPE TO SHARED MEMORY, GRABS DATA COMING IN EVERY X NUMBER OF SECONDS (DEFINED BY BUFFER_TIME) AND OUTPUTS NUMPY ARRAYS FOR TIMESTAMP (PICOSECONDS)
# EVENT ENERGY(ADC COUNTS), PSD VALUE AND ENERGY HISTOGRAM
# IT NEEDS A FILE NAME AND A CHANNEL NUMBER
def fifo(self,filename,channel_number):
#Check that digitizer is running
if not os.path.exists(filename):
self.log.debug('Start acquistion please')
#If its not running wait 5 seconds
time.sleep(5)
else:
self.log.debug('Acquistion running, pipe present: OK')
self.log.debug('Pipe to data for channel '+str(channel_number)+' is open for reading')
#set new pipe variable to true at beginning of run
newPipe=True
#Initialize wait count to 0
wait_count = 0
#Open the pipe to shared memory
with open(filename, 'r') as fifo:
# initialize empty arrays
tr = []
l = []
psd = []
while True:
data = fifo.read().splitlines() #split incoming data into lines based on carriage return
#Set up a time out so the code stops when the digitizer does
if not data and wait_count < 11:
self.log.debug('Waiting for data from digitizer on channel '+str(channel_number))
time.sleep(0.5)
wait_count = wait_count+1
continue
elif not data and wait_count == 11:
self.log.debug('Digitizer has stopped, time out criteria reached, quitting shared memory access on channel '+str(channel_number))
return
#Reset wait count to 0 every time get new data
wait_count = 0
############################ NOTE ############################
# Sometimes fifo accessses shared memory and grabs data when the digitizer is still writing a line, most of the things below are for dealing with that
#Grab the first line to fill first
first = data[0]
first_words = first.split(' ')
while '' in first_words: #get rid of extra spaces
first_words.remove('')
#if this is a new file, no need to worry if things have been cut off, but do fill last
if newPipe is True:
for line in data[-1]:
words = line.split(' ') # split line into space delimited words
while '' in words: #get rid of extra spaces
words.remove('')
# print("Received Data: " + str(words))
#build 1D arrays with list mode data
if len(words) == 3:
tr.append(words[0]) # trigger time in ps
l.append(words[1]) # total integrated charge of event in counts
psd.append(words[2]) # psd value
else:
#Just a bit of error checking
print("Read error on channel "+str(channel_number)+", skipping line")
print("Data line on channel "+str(channel_number)+" is:")
print(words)
#uncomment for debugging
# print("Long gate is: " + str(l))
time.sleep(self.bufferTime) # wait x number of seconds to check for data coming in on pipe
last = data[-1]
#new pipe is now false
newPipe = False
#if its not a new pipe, proceed as normal
else:
for line in data[1:-1]:
words = line.split(' ') # split line into space delimited words
while '' in words:
words.remove('')
#build 1D arrays with list mode data
tr.append(float(words[0])*(10**-12)) # trigger time in ns
l.append(float(words[1])) # total integrated charge of event in counts
psd.append(float(words[2])) # psd value
newline = []
last_words = last.split(' ')
while '' in last_words: #get rid of extra spaces
last_words.remove('')
#if both the last line of the old data file and the first line are fine then append them as expected
if len(last_words) == 3 and len(first_words) ==3:
tr.append(last_words[0]) # trigger time in ns
l.append(last_words[1]) # total integrated charge of event in counts
psd.append(last_words[2]) # psd value
tr.append(first_words[0]) # trigger time in ns
l.append(first_words[1]) # total integrated charge of event in counts
psd.append(first_words[2]) # psd value
#if last words and first words have the length of 2 then they split in the long gate
elif len(last_words) == 2 and len(first_words) ==2:
print('Read split in long gate value in channel '+str(channel_number)+'. Fixing.')
newline = last+first
print("Whole line in channel "+str(channel_number)+" is: " + newline)
new_words = newline.split(' ')
while '' in new_words: #get rid of extra spaces
new_words.remove('')
tr.append(new_words[0]) # trigger time in ps
l.append(new_words[1]) # total integrated charge of event in counts
psd.append(new_words[2]) # psd value
# self.log.debug('Read split in long gate value. Fixing.')
#if the last word is 3 but the first word is 1 then split on the PSD value
elif len(last_words) == 3 and len(first_words) ==1:
print('Read split in PSD value in channel '+str(channel_number)+'. Fixing.')
newline = last+first
print("Whole line in channel "+str(channel_number)+" is: " + newline)
new_words = newline.split(' ')
while '' in new_words: #get rid of extra spaces
new_words.remove('')
tr.append(new_words[0]) # trigger time in ps
l.append(new_words[1]) # total integrated charge of event in counts
psd.append(new_words[2]) # psd value
# self.log.debug('Read split in PSD value. Fixing.')
#if the last word is 1 and the first word is 3 then | |
from cognite.client.data_classes._base import *
class TransformationDestination:
"""TransformationDestination has static methods to define the target resource type of a transformation
Args:
type (str): Used as data type identifier on transformation creation/retrieval.
"""
def __init__(self, type: str = None):
self.type = type
def __hash__(self):
return hash(self.type)
def __eq__(self, obj):
return isinstance(obj, TransformationDestination) and hash(obj) == hash(self)
@staticmethod
def assets():
"""To be used when the transformation is meant to produce assets."""
return TransformationDestination(type="assets")
@staticmethod
def timeseries():
"""To be used when the transformation is meant to produce time series."""
return TransformationDestination(type="timeseries")
@staticmethod
def asset_hierarchy():
"""To be used when the transformation is meant to produce asset hierarchies."""
return TransformationDestination(type="asset_hierarchy")
@staticmethod
def events():
"""To be used when the transformation is meant to produce events."""
return TransformationDestination(type="events")
@staticmethod
def datapoints():
"""To be used when the transformation is meant to produce numeric data points."""
return TransformationDestination(type="datapoints")
@staticmethod
def string_datapoints():
"""To be used when the transformation is meant to produce string data points."""
return TransformationDestination(type="string_datapoints")
@staticmethod
def sequences():
"""To be used when the transformation is meant to produce sequences."""
return TransformationDestination(type="sequences")
@staticmethod
def files():
"""To be used when the transformation is meant to produce files."""
return TransformationDestination(type="files")
@staticmethod
def labels():
"""To be used when the transformation is meant to produce labels."""
return TransformationDestination(type="labels")
@staticmethod
def relationships():
"""To be used when the transformation is meant to produce relationships."""
return TransformationDestination(type="relationships")
@staticmethod
def data_sets():
"""To be used when the transformation is meant to produce data sets."""
return TransformationDestination(type="data_sets")
@staticmethod
def raw(database: str = "", table: str = ""):
"""To be used when the transformation is meant to produce raw table rows.
Args:
database (str): database name of the target raw table.
table (str): name of the target raw table
Returns:
TransformationDestination pointing to the target table
"""
return RawTable(type="raw", database=database, table=table)
class RawTable(TransformationDestination):
def __init__(self, type: str = None, database: str = None, table: str = None):
super().__init__(type=type)
self.database = database
self.table = table
def __hash__(self):
return hash((self.type, self.database, self.table))
def __eq__(self, obj):
return isinstance(obj, RawTable) and hash(obj) == hash(self)
class OidcCredentials:
def __init__(
self,
client_id: str = None,
client_secret: str = None,
scopes: str = None,
token_uri: str = None,
audience: str = None,
cdf_project_name: str = None,
):
self.client_id = client_id
self.client_secret = client_secret
self.scopes = scopes
self.token_uri = token_uri
self.audience = audience
self.cdf_project_name = cdf_project_name
def dump(self, camel_case: bool = False) -> Dict[str, Any]:
"""Dump the instance into a json serializable Python data type.
Args:
camel_case (bool): Use camelCase for attribute names. Defaults to False.
Returns:
Dict[str, Any]: A dictionary representation of the instance.
"""
ret = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"scopes": self.scopes,
"token_uri": self.token_uri,
"audience": self.audience,
"cdf_project_name": self.cdf_project_name,
}
if camel_case:
return {utils._auxiliary.to_camel_case(key): value for key, value in ret.items()}
return ret
class TransformationBlockedInfo:
def __init__(self, reason: str = None, created_time: Optional[int] = None, time: Optional[int] = None):
self.reason = reason
self.created_time = created_time
class Transformation(CogniteResource):
"""The transformations resource allows transforming data in CDF.
Args:
id (int): A server-generated ID for the object.
external_id (str): The external ID provided by the client. Must be unique for the resource type.
name (str): The name of the Transformation.
query (str): SQL query of the transformation.
destination (TransformationDestination): see TransformationDestination for options.
conflict_mode (str): What to do in case of id collisions: either "abort", "upsert", "update" or "delete"
is_public (bool): Indicates if the transformation is visible to all in project or only to the owner.
ignore_null_fields (bool): Indicates how null values are handled on updates: ignore or set null.
source_api_key (str): Configures the transformation to authenticate with the given api key on the source.
destination_api_key (str): Configures the transformation to authenticate with the given api key on the destination.
source_oidc_credentials (Optional[OidcCredentials]): Configures the transformation to authenticate with the given oidc credentials key on the destination.
destination_oidc_credentials (Optional[OidcCredentials]): Configures the transformation to authenticate with the given oidc credentials on the destination.
created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds.
last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds.
owner (str): Owner of the transformation: requester's identity.
owner_is_current_user (bool): Indicates if the transformation belongs to the current user.
has_source_api_key (bool): Indicates if the transformation is configured with a source api key.
has_destination_api_key (bool): Indicates if the transformation is configured with a destination api key.
has_source_oidc_credentials (bool): Indicates if the transformation is configured with a source oidc credentials set.
has_destination_oidc_credentials (bool): Indicates if the transformation is configured with a destination oidc credentials set.
running_job (TransformationJob): Details for the job of this transformation currently running.
last_finished_job (TransformationJob): Details for the last finished job of this transformation.
blocked (TransformationBlockedInfo): Provides reason and time if the transformation is blocked.
schedule (TransformationSchedule): Details for the schedule if the transformation is scheduled.
cognite_client (CogniteClient): The client to associate with this object.
"""
def __init__(
self,
id: int = None,
external_id: str = None,
name: str = None,
query: str = None,
destination: TransformationDestination = None,
conflict_mode: str = None,
is_public: bool = True,
ignore_null_fields: bool = False,
source_api_key: str = None,
destination_api_key: str = None,
source_oidc_credentials: Optional[OidcCredentials] = None,
destination_oidc_credentials: Optional[OidcCredentials] = None,
created_time: Optional[int] = None,
last_updated_time: Optional[int] = None,
owner: str = None,
owner_is_current_user: bool = True,
has_source_api_key: Optional[bool] = None,
has_destination_api_key: Optional[bool] = None,
has_source_oidc_credentials: Optional[bool] = None,
has_destination_oidc_credentials: Optional[bool] = None,
running_job: "TransformationJob" = None,
last_finished_job: "TransformationJob" = None,
blocked: TransformationBlockedInfo = None,
schedule: "TransformationSchedule" = None,
cognite_client=None,
):
self.id = id
self.external_id = external_id
self.name = name
self.query = query
self.destination = destination
self.conflict_mode = conflict_mode
self.is_public = is_public
self.ignore_null_fields = ignore_null_fields
self.source_api_key = source_api_key
self.has_source_api_key = has_source_api_key or source_api_key is not None
self.destination_api_key = destination_api_key
self.has_destination_api_key = has_destination_api_key or destination_api_key is not None
self.source_oidc_credentials = source_oidc_credentials
self.has_source_oidc_credentials = has_source_oidc_credentials or source_oidc_credentials is not None
self.destination_oidc_credentials = destination_oidc_credentials
self.has_destination_oidc_credentials = (
has_destination_oidc_credentials or destination_oidc_credentials is not None
)
self.created_time = created_time
self.last_updated_time = last_updated_time
self.owner = owner
self.owner_is_current_user = owner_is_current_user
self.running_job = running_job
self.last_finished_job = last_finished_job
self.blocked = blocked
self.schedule = schedule
self._cognite_client = cognite_client
def run(self, wait: bool = True, timeout: Optional[float] = None) -> "TransformationJob":
return self._cognite_client.transformations.run(transformation_id=self.id, wait=wait, timeout=timeout)
def run_async(self, timeout: Optional[float] = None) -> Awaitable["TransformationJob"]:
return self._cognite_client.transformations.run_async(transformation_id=self.id, timeout=timeout)
def jobs(self) -> "TransformationJobList":
return self._cognite_client.transformations.jobs.list(transformation_id=self.id)
@classmethod
def _load(cls, resource: Union[Dict, str], cognite_client=None):
instance = super(Transformation, cls)._load(resource, cognite_client)
if isinstance(instance.destination, Dict):
snake_dict = {utils._auxiliary.to_snake_case(key): value for (key, value) in instance.destination.items()}
if instance.destination.get("type") == "raw":
instance.destination = RawTable(**snake_dict)
else:
instance.destination = TransformationDestination(**snake_dict)
if isinstance(instance.running_job, Dict):
snake_dict = {utils._auxiliary.to_snake_case(key): value for (key, value) in instance.running_job.items()}
instance.running_job = TransformationJob._load(snake_dict, cognite_client=cognite_client)
if isinstance(instance.last_finished_job, Dict):
snake_dict = {
utils._auxiliary.to_snake_case(key): value for (key, value) in instance.last_finished_job.items()
}
instance.last_finished_job = TransformationJob._load(snake_dict, cognite_client=cognite_client)
if isinstance(instance.blocked, Dict):
snake_dict = {utils._auxiliary.to_snake_case(key): value for (key, value) in instance.blocked.items()}
instance.blocked = TransformationBlockedInfo(**snake_dict)
if isinstance(instance.schedule, Dict):
snake_dict = {utils._auxiliary.to_snake_case(key): value for (key, value) in instance.schedule.items()}
instance.schedule = TransformationSchedule._load(snake_dict, cognite_client=cognite_client)
return instance
def dump(self, camel_case: bool = False) -> Dict[str, Any]:
"""Dump the instance into a json serializable Python data type.
Args:
camel_case (bool): Use camelCase for attribute names. Defaults to False.
Returns:
Dict[str, Any]: A dictionary representation of the instance.
"""
ret = CogniteResource.dump(self, camel_case=camel_case)
if self.source_oidc_credentials:
source_key = "sourceOidcCredentials" if camel_case else "source_oidc_credentials"
ret[source_key] = self.source_oidc_credentials.dump(camel_case=camel_case)
if self.destination_oidc_credentials:
destination_key = "destinationOidcCredentials" if camel_case else "destination_oidc_credentials"
ret[destination_key] = self.destination_oidc_credentials.dump(camel_case=camel_case)
return ret
def __hash__(self):
return hash(self.external_id)
class TransformationUpdate(CogniteUpdate):
"""Changes applied to transformation
Args:
id (int): A server-generated ID for the object.
external_id (str): External Id provided by client. Should be unique within the project.
"""
class _PrimitiveTransformationUpdate(CognitePrimitiveUpdate):
def set(self, value: Any) -> "TransformationUpdate":
return self._set(value)
@property
def name(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "name")
@property
def destination(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "destination")
@property
def conflict_mode(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "conflictMode")
@property
def query(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "query")
@property
def source_oidc_credentials(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "sourceOidcCredentials")
@property
def destination_oidc_credentials(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "destinationOidcCredentials")
@property
def source_api_key(self):
return TransformationUpdate._PrimitiveTransformationUpdate(self, "sourceApiKey")
@property
def destination_api_key(self):
| |
h2*k2
chiStemp,rhoftemp,grun,KS=getchiSgrun(ytemp[2],ytemp[0],chiSold,scale,param)
k3 = eval(diffeq + '(rold+h2,ytemp,ricb,rcmb,rhoftemp,grun,KS,scale)') # 2nd slope at midpoint
k3 = np.array(k3)
ytemp = yold + h*k3
chiStemp,rhoftemp,grun,KS=getchiSgrun(ytemp[2],ytemp[0],chiSold,scale,param)
k4 = eval(diffeq + '(rold+h,ytemp,ricb,rcmb,rhoftemp,grun,KS,scale)') # Slope at endpoint
k4 = np.array(k4)
y[j,:] = ( yold + h6*(k1+k4) + h3*(k2+k3) ) # Advance all equations
res = getchiSgrun(y[j,2],y[j,0],chiSold,scale,param)
chiS[j],rhof[j] = res[0:2]
return r,y,rhof,chiS
def rhs_PTrhog_solid_snow(r,y,ricb,scale,param):
"""
rhs_PTrhog Right-hand sides of coupled ODEs for interior model equations
Input: r = radius, the independent variable
y = vector (length 3) of dependent variables
ricb = ICB radius
Output: dydr = column vector of dy(i)/dr values
"""
# scales
a=scale['a']
ga=scale['ga']
P=scale['P']
T=scale['T']
# get dimensional P and T
T1=T*y[2]
P1=P*y[0]
out = eos.solidFccFe(P1/1E+9,T1,param)
rho = out[1]
grun = out[6]
KS = out[4]*1e+9
dydr = [-(a*ga/P)*rho*y[1],
(a/ga)*4*np.pi*G*rho-2*y[1]/r,
-(a*ga)*grun*rho*y[1]*y[2]/KS]
return dydr
def getPgcmb_crust(rhom,rc,rhocr,rh,param,scale):
"""
determines the Pressure and grav acc at cmb for a given choice of
mantle density (rhom), cmb radius (rcmb)
crustal density (rhocr), crust-mantle boundary radius (rh)
"""
# Mercury parameters
GM=param['GM']
M=GM/G
rm = param['rm']
# scales
a=scale['a']
P=scale['P']
# mass of crust
Mh=4*np.pi*rhocr*(rm**3-rh**3)/3
# mass of mantle
Mm=4*np.pi*rhom*(rh**3-rc**3)/3
# mass of core
Mcore=M-Mm-Mh
# grav acc at CMB
gcmb=Mcore*G/rc**2
# Pressure at CMB: Shoot In crust + mantle
y0 = [0,1]
sol = scipy.integrate.solve_ivp(lambda t,y: rhs_Pgz(t,y,rhocr,scale), [1,rh/a],
y0, method='RK45')
yh = sol.y[:,-1]
sol = scipy.integrate.solve_ivp(lambda t,y: rhs_Pgz(t,y,rhom,scale),
[rh/a,rc/a],
yh, method='RK45')
Pcmb=P*sol.y[0,-1] # dimensional
return Pcmb,gcmb
def getmelt_anzellini(chis,P,param,To):
"""
Determines melting temperature of FeS mixture as a function
of chis and P.
Here, the melting point of pure Fe is determined according to Anzellini,
Science 2013
From Eq 2 of Anzellini et al 2013
but reformulated as a 3rd order polynomial
"""
el = param['li_el']
if el == 'S':
P1=P*1e-9;
# parametrization for Anzellini
TmFe= 495.4969600595926*(22.19 + P1)**0.42016806722689076
if P1 < 14:
Te0=1265.4
b1=-11.15
Pe0=3
elif P1 < 21:
Te0=1142.7
b1=29
Pe0=14
else:
Te0=1345.72
b1=12.9975
Pe0=21
Te=Te0+b1*(P1-Pe0)
chiSeut=0.11+0.187*np.exp(-0.065*P1)
Tm = TmFe -(TmFe - Te)*chis/chiSeut
return Tm-To
elif el=='Si':
P1=P*1e-9;
# parametrization for Anzellini
TmFe= 495.4969600595926*(22.19 + P1)**0.42016806722689076
Tm15 = 1478 *(P1/10+1)**(1/3)
Tm =(chis/0.15)*Tm15+(1-chis/0.15)*TmFe
return Tm-To
else:
print('Error: Light element',el,' not defined!')
return 0
def getchiSgrun(yT,yP,chiSold,scale,param):
# scales
P=scale['P']
T=scale['T']
el = param['li_el']
# get dimensional P and T
T1=T*yT
P1=P*yP
# chiSold
Tm=getmelt_anzellini(chiSold,P1,param,0)
if T1>Tm: # if adiabat temperature larger than Liquidus
# get chiS on basis of previous radius
chiS=chiSold
else:
# get chiS that matches melting
chiSeut=0.11+0.187*np.exp(-0.065*P1*1e-9)
sol = scipy.optimize.root(getmelt_anzellini, chiSold,
tol=1e-6, args=(P1, param, T1))
chiS = min(sol.x[0],chiSeut)
# Updated Equation of State from Rivoldini
if el == 'S':
out = eos.liquidNonIdalFeS(chiS,P1/1E+9,T1,param)
elif el == 'Si':
out = eos.liquidNonIdalFeSi(chiS,P1/1E+9,T1,param)
else:
print('WARNING: invalid light element')
rho = out[1]
KS = out[4]*1E+9
grun = out[6]
return chiS,rho,grun,KS
def rhs_fluid_snow(r,y,ricb,rcmb,rho,grun,KS,scale):
"""
# Right-hand sides of coupled ODEs for interior model equations
# This version includes a stratified layer at CMB
# Here, we also track the adiabatic Temperature
# THIS VERSION to be used with odeRK4_snow.m
# Input: r = radius, the independent variable
# y = vector (length 4) of dependent variables
# ricb = ICB radius
# rcmb = CMB radius
# rho = density
# grun = gruneisan
# KS = adiabatic bulk modulus
#
# Output: dydr = column vector of dy(i)/dr values
"""
# scales
a=scale['a']
ga=scale['ga']
P=scale['P']
# Size of thermally stratifued layer
# Default rst=ricb + (rcmb-ricb)/2
rst = ricb + (rcmb-ricb)/2
if r<rst:
dydr = [ -(a*ga/P)*rho*y[1],
(a/ga)*4*np.pi*G*rho-2*y[1]/r,
-(a*ga)*grun*rho*y[1]*y[2]/KS,
-(a*ga)*grun*rho*y[1]*y[2]/KS]
else:
dydr = [-(a*ga/P)*rho*y[1],
(a/ga)*4*np.pi*G*rho-2*y[1]/r,
-(a*ga)*grun*rho*y[1]*y[2]*(1 -0.95*(r-rst)/(rcmb-rst))/KS,
-(a*ga)*grun*rho*y[1]*y[2]/KS]
return dydr
def simpsonDat(x,f):
"""
simpsonDat Integration by Composite Simpson's rule
adapted from nmm package, here for a function f evaluated at
equally spaced points x
Synopsis: I = simpson(fun,a,b,npanel)
Input: x = equally spaced points (number of points n must be odd)
f = integrand at these x points
Output: I = approximate value of the integral from x(1) to x(n) of f(x)*dx
"""
h=x[1]-x[0]
I = (h/3)*(f[0]+4*np.sum(f[1::2]) + 2*np.sum((f[2::2])[0:-1]) + f[-1])
return I
def CvC(theta_T):
# heat capacity at constant volume, T and theta in K
f=3*RGas*(4*debye3(theta_T)-theta_T*3/(np.exp(theta_T)-1))
return f
def debye3(x, maxdeg=7): # truncated to save computation time
"""
%DEBYE3 ThirdF order Debye function.
% Y = DEBYE3(X) returns the third order Debye function, evaluated at X.
% X is a scalar. For positive X, this is defined as
%
% (3/x^3) * integral from 0 to x of (t^3/(exp(t)-1)) dt
% Based on the FORTRAN implementation of this function available in the
% MISCFUN Package written by <NAME>, available as TOMS Algorithm 757
% in ACM Transactions of Mathematical Software (1996), 22(3):288-301.
"""
adeb3=[2.70773706832744094526,
0.34006813521109175100,
-0.1294515018444086863e-1,
0.79637553801738164e-3,
-0.5463600095908238e-4,
0.392430195988049e-5,
-0.28940328235386e-6,
0.2173176139625e-7,
-0.165420999498e-8,
0.12727961892e-9,
-0.987963459e-11,
0.77250740e-12,
-0.6077972e-13,
0.480759e-14,
-0.38204e-15,
0.3048e-16,
-0.244e-17,
0.20e-18,
-0.2e-19]
if x < 0:
print('error in debye3: negative input value')
return 0
elif (x < 3.e-8):
print('low input value in debye3: check input value')
D3 = ((x - 7.5 ) * x + 20.0)/20.0
elif x <= 4:
# routine only accurate within these limits
# but should be OK for typical x values in our
# models
t = ((x**2/8)-0.5)-0.5
D3 = cheval(adeb3[0:maxdeg],t)-0.375*x
else:
print('error in debye3: input value should be smaller than 4');
return 0
return D3
def cheval(a, t):
"""
CHEVAL evaluates a Chebyshev series.
modified by MD for Matlab
Discussion:
This function evaluates a Chebyshev series, using the
Clenshaw method with Reinsch modification, as analysed
in the paper by Oliver.
Author:
<NAME>,
Department of Mathematics and Statistics,
Paisley University, High Street, Paisley, Scotland, PA12BE
<EMAIL>
Reference:
<NAME>,
Algorithm 757, MISCFUN: A software package to compute uncommon
special functions,
ACM Transactions on Mathematical Software,
Volume 22, Number 3, September 1996, pages 288-301.
<NAME>,
An error analysis of the modified Clenshaw method for
evaluating Chebyshev and Fourier series,
Journal of the IMA,
Volume 20, 1977, pages 379-391.
Parameters:
Input, A(1:N), the coefficients of the Chebyshev series.
Input, T, the value at which the series is
to be evaluated.
Output, CHEV, the value of the Chebyshev series at T.
"""
n=len(a)-1
u1 = 0.0
# T <= -0.6, Reinsch modification.
# -0.6 < T < 0.6, Standard Clenshaw method.
# T > 0.6 Reinsch
if t <= -0.6 or t>=0.6:
d1 = 0.0
tt = (t+0.5) + 0.5
tt = tt+tt
for i in range(n,-1,-1):
d2 = d1
u2 = u1
d1 = tt * u2 + a[i] - d2
u1 = d1 - u2
chev = 0.5*( d1-d2 )
else:
u0 = 0.0
tt = t + t
for i in range(n,-1,-1):
u2 = u1
u1 = u0
u0 = tt * u1 + a[i] - u2
chev = 0.5*( u0 - u2 )
return chev
def Eth(T,theta):
#internal thermal energy (without not relevant term linear in theta),
# T and theta in K
RGas=8.3144621
f=3.*RGas*(3*theta/8 + T*debye3(theta/T))
return f
def gammaC(eta,gamma0,q0):
# Grueneisen parameter
# eta=V0/V
return gamma0*eta**(-q0)
def thetaC(eta,theta0,gamma0,q0):
# Debye temperature, eta=V0/V
return theta0*np.exp((gamma0-gammaC(eta,gamma0,q0))/q0)
def rhs_Pgz(r, y, rho, scale):
"""
rhs_Pgz Right-hand sides of coupled ODEs for hydrostatic pressure
Input: z = depth, the independent variable
y = vector (length 2) of dependent variables
rho = density (=constant)
Output: dydr = column vector of dy(i)/dz values
"""
# scales
a=scale['a']
ga=scale['ga']
P=scale['P']
dydr = [-(a*ga/P)*rho*y[1],
(a/ga)*4*np.pi*G*rho-2*y[1]/r]
return dydr
#### The k2 stuff ####
def getk2(rs,rf,rhoml,rhos,rhof,param,scale):
"""
%
% Calculates k2, xi = [(Bs-As) - (Bs'-As')]/ (Bmf -Amf)
% for given density structure of mercury and c22
%
%
"""
# Mercury parameters
M=param['GM']/G
rm = param['rm']
rhomean = 3*M/(4*np.pi*rm**3)
c22=param['c22']
# scales
a=scale['a']
ga=scale['ga']
bigGscale=ga/(rhomean*a)
bigGnd=G/bigGscale
# Define radial grid points
nr=400
nrs=int(round(nr*(rs/rf)))
nrf=nr-nrs
r = np.empty(nr)
rho = np.empty(nr)
| |
import json
import re
import time
import requests
from multiprocessing.dummy import Pool
import lxml.html
# # html = requests.get('https://www.baidu.com').content.decode()
# # print(html)
def query(url):
"""
获取网页源代码
:param url:
:return:
"""
requests.get(url)
def ca_time():
"""
计算获取100次网页代码多线程时间
:return:
"""
start = time.time()
url_list = []
for i in range(100):
url_list.append('https://baidu.com')
pool = Pool(5)
pool.map(query, url_list)
end = time.time()
print('多线程时间', {end - start})
#
#
# def calc_power(num):
# return num * num
#
#
# pool = Pool(3)
# origin_num = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# result = pool.map(calc_power, origin_num)
# print(result)
# start = time.time()
# for i in range(100):
# query('https://mi.com')
#
# end = time.time()
# print('单线程时间', {end - start})
def yanghuisanjia():
"""
杨辉三角
:return:
"""
l = []
for n in range(30):
r = [1]
l.append(r)
if n == 0:
print(r)
continue
for m in range(1, n):
r.append(l[n - 1][m - 1] + l[n - 1][m])
r.append(1)
print(r)
def get_suning():
"""
苏宁价格json
:return:
"""
url = 'https://ds.suning.com/ds/generalForTile/' \
'000000011346304317____R1901001_000060021-010-2-0000000000-1--ds0000000003792.jsonp?callback=ds0000000003792'
url2 = 'https://ds.suning.com/ds/generalForTile/' \
'000000010989586988____R1901001_000060864,' \
'000000010606649859____R1901001_000060021,' \
'000000010973073407____R1901001_000060864,' \
'000000011356354998____R1901001_000066138,' \
'000000010657713259____R1901001_000060864,' \
'000000011344612553____R1901001_000060DER,' \
'000000011382632596____R1901001_000060021,' \
'000000010657749544____R1901001_000060864,' \
'000000011239124433____R1901001_00006J675,' \
'000000010627906708____R1901001_000060864-010-2-0000000000-1--ds0000000006859.jsonp?callback=ds0000000006859'
url_json = requests.get(url).content.decode()
print(type(url_json))
from selenium import webdriver
def get_suning_code():
"""
用selenium模拟浏览器获取内容
:return:
"""
driver = webdriver.Chrome(r'E:\chromedriver_win32\chromedriver.exe')
driver.get('https://list.suning.com/0-20006-0.html?safp=d488778a.46601.searchMain.2&safc=cate.0.0')
# js = "var q=document.documentElement.scrollTop=100000"
# driver.execute_script("window.scrollTo(0,600000000)")
driver.execute_script("""
(function () {
var y = document.body.scrollTop;
var step = 100;
window.scroll(0, y);
function f() {
if (y < document.body.scrollHeight) {
y += step;
window.scroll(0, y);
setTimeout(f, 50);
}
else {
window.scroll(0, y);
document.title += "scroll-done";
}
}
setTimeout(f, 1000);
})();
""")
time.sleep(1000)
def xpath_test():
url = 'https://list.suning.com/0-20006-0.html?safp=d488778a.46601.searchMain.2&safc=cate.0.0'
# test = str(selector.xpath('//*[@id="bottom_pager"]/div/span[3]/text()'))
# text_block =int(re.findall(r"\d+\.?\d*", test)[0])
# print(text_block)
# # 找到页码输入框,输入页码,从2开始
driver = webdriver.Chrome(r'E:\chromedriver_win32\chromedriver.exe')
driver.get(url)
driver.execute_script("""
(function () {
var y = document.body.scrollTop;
var step = 100;
window.scroll(0, y);
function f() {
if (y < document.body.scrollHeight) {
y += step;
window.scroll(0, y);
setTimeout(f, 50);
}
else {
window.scroll(0, y);
document.title += "scroll-done";
}
}
setTimeout(f, 1000);
})();
""")
time.sleep(8)
html = driver.page_source
selector = lxml.html.fromstring(html)
# 商品id
goods_id = selector.xpath('//*[@id="product-list"]/ul/li/@id')
goods_id_list = []
goods_title_list = []
goods_selling_point_list = []
goods_feature_list = []
evaluation_num_list = []
for id in goods_id:
# # 商品id
goods_id = id
goods_id_list.append(goods_id)
# 商品标题
goods_title = selector.xpath('//*[@id="{}"]/div/div/div[2]/div[2]/a/text()'.format(id))[0]
goods_title_list.append(goods_title)
# 商品卖点
goods_selling_point = selector.xpath('//*[@id="{}"]/div/div/div[2]/div[2]/a/em/text()'.format(id))[0]
goods_selling_point_list.append(goods_selling_point)
# 商品特征
feature = selector.xpath('//*[@id="{}"]/div/div/div[2]/div[3]/em/text()'.format(id))
# 将list内容合并
goods_feature = "+".join(feature)
goods_feature_list.append(goods_feature)
# 评价条数
try:
evaluation_num = selector.xpath('//*[@id="{}"]/div/div/div[2]/div[4]/div/a/i/text()'.format(id))[0]
except IndexError:
evaluation_num = '暂无评价'
evaluation_num_list.append(evaluation_num)
threegroup_id = selector.xpath('//*[@id="0070094634-11370507783"]/div/div/div[2]/div[1]/span/@threegroup_id')
print(threegroup_id)
print(len(goods_id_list))
# input_f = driver.find_element_by_id('bottomPage')
# # 找到确定按钮,点击确定
# submit = driver.find_element_by_xpath('//*[@id="bottom_pager"]/div/a[7]')
# input_f.clear()
# input_f.send_keys(10)
# time.sleep(10)
# print('点击')
# submit.click()
# time.sleep(200)
def test():
html = requests.get(
'https://ds.suning.com/ds/generalForTile/000000011177564275____R1901001_000060864-010-2-0000000000-1--ds000000000001111.jsonp?callback=ds00000000011111111').content.decode()
print(html[18:-2])
json_ = {
"status": 200,
"rs": [{
"cmmdtyCode": "000000010657713259",
"price": "3618.00",
"priceType": "1",
"singlePrice": "",
"vipPrice": "",
"superPrice": "",
"pricingMode": "",
"bizCode": "0070094634",
"vendorName": "华科手机专营店",
"govPrice": "",
"type": "2",
"subCode": "",
"invStatus": "1",
"balanceStartTime": "",
"balanceEndTime": "",
"locatCode": "0001",
"stdLocatCode": "",
"plantCode": "Z048",
"chargePlantCode": "",
"cityFrom": "",
"arrivalDate": "",
"purchaseFlag": "5",
"vendorType": "921C店",
"supplierCode": "0070094634",
"commondityTry": "",
"reservationType": "",
"reservationPrice": "",
"subscribeType": "",
"subscribePrice": "",
"collection": "",
"visited": "",
"sellingPoint": "",
"promoTypes": [],
"promotionList": [{
"type": "11",
"simple": "领券999-5",
"full": "满999用5",
"giftList": []
}, {
"type": "5",
"simple": "赠品",
"full": "购买可送赠品",
"giftList": ["000000011001391114"]
}],
"imageUrl": "",
"patternCss": "",
"text": "",
"energySubsidy": "",
"feature": "0",
"priceDifference": "",
"jdPrice": "",
"jdPriceUpdateTime": "",
"snPrice": "3618.00",
"refPrice": "",
"discount": "",
"originalPrice": "",
"oversea": "0",
"shoppingCart": "1",
"bigPromotion": "0",
"storeStock": "",
"distance": "",
"storeStockName": "",
"prototype": "",
"prototypeStoreName": "",
"prototypeDistance": "",
"explosion": [{
"imageUrl": "http://image.suning.cn/uimg/pcms/label05/123672056913155548788900_05.png",
"patternCss": "2",
"text": "",
"labelPlaceArea": "0100"
}],
"subCodeImageVersion": "",
"directoryIds": "",
"pinPrice": "3608.00",
"promotionLable": "",
"promotionColor": "",
"promotionLable2": "",
"promotionColor2": "",
"purchase": "",
"replacementRisk": "0",
"minimumSale": "",
"suningLogistics": "",
"marketVipPriceType": "",
"pgActionId": "",
"pgNum": "",
"featureService": "",
"freightInsurance": "",
"salesVolume": "",
"goodShop": "https://image.suning.cn/uimg/MLS/label/128435957711911560924640.png",
"publicWelfare": "",
"excellentGoods": "",
"excellentGoodsText": "",
"shoppingAllowance": "",
"book": "",
"book2": "",
"newArrival": "",
"supernewArrival": "",
"freeInterest": "",
"dr": {},
"rq": {},
"categoryName": "",
"goodsIndex": "",
"qualityInspection": "",
"jsdflg": "",
"marketingEventTracking": "116"
}, {
"cmmdtyCode": "000000010973073475",
"price": "3688.00",
"priceType": "4-1",
"singlePrice": "",
"vipPrice": "",
"superPrice": "",
"pricingMode": "",
"bizCode": "0000000000",
"vendorName": "苏宁自营",
"govPrice": "",
"type": "2",
"subCode": "",
"invStatus": "1",
"balanceStartTime": "",
"balanceEndTime": "",
"locatCode": "0001",
"stdLocatCode": "0001",
"plantCode": "D009",
"chargePlantCode": "D009",
"cityFrom": "",
"arrivalDate": "",
"purchaseFlag": "0",
"vendorType": "",
"supplierCode": "0010127391",
"commondityTry": "",
"reservationType": "",
"reservationPrice": "",
"subscribeType": "",
"subscribePrice": "",
"collection": "",
"visited": "",
"sellingPoint": "",
"promoTypes": [],
"promotionList": [],
"imageUrl": "",
"patternCss": "",
"text": "",
"energySubsidy": "0",
"feature": "0",
"priceDifference": "",
"jdPrice": "",
"jdPriceUpdateTime": "",
"snPrice": "3688.00",
"refPrice": "4288.00",
"discount": "8.6",
"originalPrice": "4288.00",
"oversea": "0",
"shoppingCart": "1",
"bigPromotion": "0",
"storeStock": "",
"distance": "",
"storeStockName": "",
"prototype": "",
"prototypeStoreName": "",
"prototypeDistance": "",
"explosion": [{
"imageUrl": "http://image.suning.cn/uimg/pcms/label05/123672056913155548788900_05.png",
"patternCss": "2",
"text": "",
"labelPlaceArea": "0100"
}],
"subCodeImageVersion": "",
"directoryIds": "",
"pinPrice": "",
"promotionLable": "大聚惠",
"promotionColor": "1",
"promotionLable2": "大聚惠",
"promotionColor2": "1",
"purchase": "",
"replacementRisk": "0",
"minimumSale": "",
"suningLogistics": "",
"marketVipPriceType": "",
"pgActionId": "",
"pgNum": "",
"featureService": "",
"freightInsurance": "",
"salesVolume": "",
"goodShop": "",
"publicWelfare": "",
"excellentGoods": "",
"excellentGoodsText": "",
"shoppingAllowance": "",
"book": "",
"book2": "",
"newArrival": "",
"supernewArrival": "",
"freeInterest": "",
"dr": {},
"rq": {},
"categoryName": "",
"goodsIndex": "",
"qualityInspection": "",
"jsdflg": "",
"marketingEventTracking": "101"
}, {
"cmmdtyCode": "000000010679340444",
"price": "599.00",
"priceType": "4-1",
"singlePrice": "",
"vipPrice": "",
"superPrice": "",
"pricingMode": "",
"bizCode": "0000000000",
"vendorName": "苏宁自营",
"govPrice": "",
"type": "2",
"subCode": "",
"invStatus": "1",
"balanceStartTime": "",
"balanceEndTime": "",
"locatCode": "0001",
"stdLocatCode": "0001",
"plantCode": "D009",
"chargePlantCode": "D009",
"cityFrom": "",
"arrivalDate": "",
"purchaseFlag": "0",
"vendorType": "",
"supplierCode": "0010079513",
"commondityTry": "",
"reservationType": "",
"reservationPrice": "",
"subscribeType": "",
"subscribePrice": "",
"collection": "",
"visited": "",
"sellingPoint": "",
"promoTypes": [],
"promotionList": [],
"imageUrl": "",
"patternCss": "",
"text": "",
"energySubsidy": "0",
"feature": "0",
"priceDifference": "",
"jdPrice": "",
"jdPriceUpdateTime": "",
"snPrice": "799.00",
"refPrice": "799.00",
"discount": "7.5",
"originalPrice": "799.00",
"oversea": "0",
"shoppingCart": "1",
"bigPromotion": "0",
"storeStock": "",
"distance": "",
"storeStockName": "",
"prototype": "",
"prototypeStoreName": "",
"prototypeDistance": "",
"explosion": [{
"imageUrl": "http://image.suning.cn/uimg/pcms/label05/123672056913155548788900_05.png",
"patternCss": "2",
"text": "",
"labelPlaceArea": "0100"
}],
"subCodeImageVersion": "",
"directoryIds": "",
"pinPrice": "",
"promotionLable": "大聚惠",
"promotionColor": "1",
"promotionLable2": "大聚惠",
"promotionColor2": "1",
"purchase": "",
"replacementRisk": "0",
"minimumSale": "",
"suningLogistics": "",
"marketVipPriceType": "",
"pgActionId": "",
"pgNum": "",
"featureService": "",
"freightInsurance": "",
"salesVolume": "",
"goodShop": "",
"publicWelfare": "",
"excellentGoods": "",
"excellentGoodsText": "",
"shoppingAllowance": "",
"book": "",
"book2": "",
"newArrival": "",
"supernewArrival": "",
"freeInterest": "",
"dr": {},
"rq": {},
"categoryName": "",
"goodsIndex": "",
"qualityInspection": "",
"jsdflg": "",
"marketingEventTracking": "101"
}, {
"cmmdtyCode": "000000011527836113",
"price": "13499.00",
"priceType": "1",
"singlePrice": "",
"vipPrice": "",
"superPrice": "",
"pricingMode": "",
"bizCode": "0000000000",
"vendorName": "苏宁自营",
"govPrice": "",
"type": "2",
"subCode": "",
"invStatus": "1",
"balanceStartTime": "",
"balanceEndTime": "",
"locatCode": "0001",
"stdLocatCode": "0001",
"plantCode": "D009",
"chargePlantCode": "D009",
"cityFrom": "",
"arrivalDate": "",
"purchaseFlag": "0",
"vendorType": "",
"supplierCode": "0010127391",
"commondityTry": "",
"reservationType": "",
"reservationPrice": "",
"subscribeType": "",
"subscribePrice": "",
"collection": "",
"visited": "",
"sellingPoint": "",
"promoTypes": [],
"promotionList": [],
"imageUrl": "",
"patternCss": "",
"text": "",
"energySubsidy": "0",
"feature": "0",
"priceDifference": "",
"jdPrice": "",
"jdPriceUpdateTime": "",
"snPrice": "13499.00",
"refPrice": "",
"discount": "",
"originalPrice": "",
"oversea": "0",
"shoppingCart": "1",
"bigPromotion": "0",
"storeStock": "",
"distance": "",
"storeStockName": "",
"prototype": "",
"prototypeStoreName": "",
"prototypeDistance": "",
"explosion": [{
"imageUrl": "http://image.suning.cn/uimg/pcms/label05/123672056913155548788900_05.png",
"patternCss": "2",
"text": "",
"labelPlaceArea": "0100"
}],
"subCodeImageVersion": "",
"directoryIds": "",
"pinPrice": "",
"promotionLable": "",
"promotionColor": "",
"promotionLable2": "",
"promotionColor2": "",
"purchase": "",
"replacementRisk": "0",
"minimumSale": "",
"suningLogistics": "",
"marketVipPriceType": "",
"pgActionId": "",
"pgNum": "",
"featureService": "",
"freightInsurance": "",
"salesVolume": "",
"goodShop": "",
"publicWelfare": "",
"excellentGoods": "",
"excellentGoodsText": "",
"shoppingAllowance": "",
"book": "",
"book2": "",
"newArrival": "",
"supernewArrival": "",
"freeInterest": "",
"dr": {},
"rq": {},
"categoryName": "",
"goodsIndex": "",
"qualityInspection": "",
"jsdflg": "",
"marketingEventTracking": ""
}, {
"cmmdtyCode": "000000011204923031",
"price": "3388.00",
"priceType": "4-1",
"singlePrice": "",
"vipPrice": "",
"superPrice": "",
"pricingMode": "",
"bizCode": "0000000000",
"vendorName": "苏宁自营",
"govPrice": "",
"type": "2",
"subCode": "",
"invStatus": "1",
"balanceStartTime": "",
"balanceEndTime": "",
"locatCode": "0001",
"stdLocatCode": "0001",
"plantCode": "D009",
"chargePlantCode": "D009",
"cityFrom": "",
"arrivalDate": "",
"purchaseFlag": "0",
"vendorType": "",
"supplierCode": "0010127391",
"commondityTry": "",
"reservationType": "",
"reservationPrice": "",
"subscribeType": | |
Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AnalysisDefinition
:rtype:
~azure.mgmt.web.models.AnalysisDefinitionPaged[~azure.mgmt.web.models.AnalysisDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_analyses_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AnalysisDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'}
def get_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Site Analysis.
Get Site Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param analysis_name: Analysis Name
:type analysis_name: str
:param slot: Slot - optional
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.get_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'}
def execute_site_analysis_slot(
self, resource_group_name, site_name, diagnostic_category, analysis_name, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Analysis.
Execute Analysis.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name
:type analysis_name: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticAnalysis or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.DiagnosticAnalysis or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
# Construct URL
url = self.execute_site_analysis_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiagnosticAnalysis', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'}
def list_site_detectors_slot(
self, resource_group_name, site_name, diagnostic_category, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detectors.
Get Detectors.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_site_detectors_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'}
def get_site_detector_slot(
self, resource_group_name, site_name, diagnostic_category, detector_name, slot, custom_headers=None, raw=False, **operation_config):
"""Get Detector.
Get Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param diagnostic_category: Diagnostic Category
:type diagnostic_category: str
:param detector_name: Detector Name
:type detector_name: str
:param slot: Slot Name
:type slot: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DetectorDefinition
:rtype:
~azure.mgmt.web.models.DetectorDefinitionPaged[~azure.mgmt.web.models.DetectorDefinition]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_site_detector_slot.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.DefaultErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DetectorDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'}
def execute_site_detector_slot(
self, resource_group_name, site_name, detector_name, diagnostic_category, slot, start_time=None, end_time=None, time_grain=None, custom_headers=None, raw=False, **operation_config):
"""Execute Detector.
Execute Detector.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param site_name: Site Name
:type site_name: str
:param detector_name: Detector Resource Name
:type detector_name: str
:param diagnostic_category: Category Name
:type diagnostic_category: str
:param slot: Slot Name
:type slot: str
:param start_time: Start Time
:type start_time: datetime
:param end_time: End Time
:type end_time: datetime
:param time_grain: Time Grain
:type time_grain: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiagnosticDetectorResponse or ClientRawResponse if raw=true
:rtype: | |
"""Create important distributions classes.
Especially provide the logic for a hypererlang distributions with data
fitting.
"""
import logging
import multiprocessing as mp
from itertools import combinations_with_replacement
from typing import Any, Dict, Generator, Iterable, List, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from pynverse import inversefunc
from hoqunm.utils.utils import LOGGING_DIR, Heap, get_logger
class HypererlangSpecs:
"""Specifiactions for hyper-erlang fit.
:param processors: Processors to use for multiprocessing.
:param N: The sum of the length of the erlang distributions for state limitation.
:param convergence_criteria: The convergence cirteria in each round.
:param maximal_distributions: The maximla distributions in each step.
"""
def __init__(self,
processors: int = mp.cpu_count() - 1,
N: int = 10,
convergence_criteria: Optional[List[float]] = None,
maximal_distributions: Optional[List[int]] = None):
self.processors = processors
self.N = N
self.convergence_criteria = convergence_criteria if convergence_criteria is not None else [
1e-4, 1e-6, 1e-8
]
self.maximal_distributions = maximal_distributions if maximal_distributions is not None \
else [50, 25, 1]
if not len(self.convergence_criteria) == len(
self.maximal_distributions):
raise AttributeError(
"Length of convergence criteria and maximal distributions do not match."
)
if self.N <= 0:
raise ValueError(f"N has to be larger then 10. N is {N}")
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "HypererlangSpecs":
"""Create class from Dict with arguments and values in it.
:param arguments: The dict containing the parameter-argument pairs.
:return: Class instance.
"""
return HypererlangSpecs(**arguments)
class HyperDistribution:
"""A class representing a hyper distributions of a current distributions
type. for compatibility, the methods and attributes are similar to those of
scipy.stats.rv_continuous.
:param distribution: The distributions type.
:param hyper: The hyper parameters.
:param kwargs: The arguments needed for the distributions.
Each will be in list style having the same shape as hyper.
"""
def __init__(self, distribution: scipy.stats.rv_continuous,
hyper: Union[np.ndarray, List[float]],
**kwargs: Union[np.ndarray, List[float]]):
self.dist = self # for compatibility with scipy.stats
self.distribution = distribution
self.name = "hyper" + self.distribution.name
self.hyper = np.asarray(hyper).reshape(-1)
self.hyper = self.hyper / self.hyper.sum()
kwargs = {
key: np.asarray(arg).reshape(-1)
for key, arg in kwargs.items()
}
self.kwargs = [{key: arg[i]
for key, arg in kwargs.items()}
for i in range(self.hyper.shape[0])]
self.paramno = self.hyper.shape[0] * (1 + len(kwargs))
def mean(self) -> float:
"""Return the mean of the distributions.
:return: Mean of the distributions.
"""
return float(
np.sum([
p * self.distribution.mean(**self.kwargs[i])
for i, p in enumerate(self.hyper)
]))
def var(self) -> np.float:
"""Return the variance of the distributions.
:return: Variance of the distributions.
"""
return float(
np.sum([
p * self.distribution.var(**self.kwargs[i])
for i, p in enumerate(self.hyper)
]))
def pdf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The pdf (probability density function) evaluated at x.
:param x: x values, where the pdf should be evaluated.
:return: Corresponding value of pdf at x.
"""
return np.sum([
p * self.distribution.pdf(x=x, **self.kwargs[i])
for i, p in enumerate(self.hyper)
],
axis=0)
def cdf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The cdf (culmulative density function) evaluated at x.
:param x: x values, where the pdf should be evaluated.
:return: Corresponding value of cdf at x.
"""
return np.sum([
p * self.distribution.cdf(x=x, **self.kwargs[i])
for i, p in enumerate(self.hyper)
],
axis=0)
def ppf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The ppf (percent point function - the inverse of the cdf) evaluated at x.
Since this is not analytically available, compute it with an inversefunc module.
:param x: x values, where the ppf should be evaluated.
:return: Corresponding value of ppf at x.
"""
return inversefunc(self.cdf)(x)
def rvs(self, size: np.shape = None) -> Union[np.ndarray, float]:
"""A random value of the random variable.
:param size: The size of the np.array with random values.
:return: Random value(s).
"""
index = np.random.choice(a=self.hyper.shape[0],
p=self.hyper,
size=size)
out = np.zeros(size, dtype="float64")
if size:
for i, _ in enumerate(self.hyper):
out[index == i] = self.distribution.rvs(**self.kwargs[i],
size=size)[index == i]
else:
out = self.distribution.rvs(**self.kwargs[index], size=size)
return out
def log_likelihood(self, x: Union[float, np.ndarray]) -> float:
"""Compute the log likelihood of the hyper_distribution w.r.t to
observed data x.
:param x: The observed data.
:return: The log likelihood.
"""
return np.sum(np.log(self.pdf(x)))
def __str__(self) -> str:
"""A representation of the class very basic.
:return: String of all attributes with respective values.
"""
return str([(key, val) for key, val in self.__dict__.items()
if not callable(getattr(self, key))])
class Hypererlang(HyperDistribution):
"""A class representing a hyper erlang distributions this is in so far
special, that we know an algorithm to fit a hypererlang distributions to
data.
:param hyper: The hyper parameters.
:param kwargs: The arguments needed for the distributions.
Each will be in list style having the same shape as hyper.
"""
name = "hypererlang"
def __init__(self,
hyper: List[float],
paramno: Optional[int] = None,
logger: Optional[logging.Logger] = None,
**kwargs: Union[np.ndarray, List[float]]):
if kwargs.get("lambd"):
lambd = np.asarray(kwargs.pop("lambd"))
kwargs["scale"] = 1 / lambd
super().__init__(scipy.stats.erlang, hyper, **kwargs)
if paramno is not None:
self.paramno = paramno
self.lambd = 1 / np.asarray(kwargs["scale"]).reshape(-1)
self.a = np.asarray(kwargs["a"]).reshape(-1)
self.convergence_error = np.inf
self.log_likelihood_fit = -np.inf
self.logger = logger if logger is not None else get_logger(
"hypererlang_distribution",
LOGGING_DIR.joinpath("hypererlang_distribution.log"))
def save_dict(self) -> Dict[str, Any]:
"""Create dictionary with argument value mapping.
:return: Argument value mapping for class creation.
"""
arguments = {"hyper": self.hyper.tolist(), "paramno": self.paramno}
arguments.update({
key: [arg[key] for arg in self.kwargs]
for key in self.kwargs[-1]
})
arguments["a"] = self.a.tolist()
return arguments
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "Hypererlang":
"""Create class instance from given dict.
:param arguments: Arguments value mapping for class instance.
:return: Class instance.
"""
return Hypererlang(**arguments)
def ppf(self, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""The ppf (percent point function - the inverse of the cdf) evaluated at x.
Since this is not analytically available, compute it with an inversefunc module.
It is known, that the domain are only positive floats, so provide domain!
:param x: x values, where the ppf should be evaluated.
:return: Corresponding value of ppf at x.
"""
return inversefunc(self.cdf, domain=0)(x)
def fit_lambd_hyper(self,
x: Union[List[float], np.ndarray],
convergence_criterion: float = 1e-6) -> "Hypererlang":
"""fit lambda and hyper parameters for given data until
convergence_criterion is met.
:param x: The data to fit the distributions to.
:param convergence_criterion: The criterion which has to be met in order to quit fitting.
:return: An instane of self.
"""
x = np.asarray(x)
log_a = np.array([
np.sum(np.log(np.arange(1, a_, dtype="float64")))
for i, a_ in enumerate(self.a)
]) # shape(m)
x_ = x.reshape(-1, 1) # shape(k, 1)
runs = 0
self.log_likelihood_fit = self.log_likelihood(x)
while convergence_criterion <= self.convergence_error:
p_ = self.lambd * np.exp((self.a - 1) * np.log(self.lambd * x_) -
log_a - self.lambd * x_) # shape(k, m)
q_ = self.hyper * p_ # shape(k, m)
q_ = q_ / q_.sum(axis=1).reshape(-1, 1) # shape(k, m)
self.hyper = (1 / x_.shape[0]) * q_.sum(axis=0) # shape(m)
self.lambd = self.a * q_.sum(axis=0) / np.sum(q_ * x_,
axis=0) # shape(m)
log_likelihood_fit = self.log_likelihood(x)
self.convergence_error = abs(
(log_likelihood_fit - self.log_likelihood_fit) /
self.log_likelihood_fit)
self.log_likelihood_fit = log_likelihood_fit
runs += 1
for i, kwarg_i in enumerate(self.kwargs):
kwarg_i["scale"] = 1 / self.lambd[i]
return self
def fit(self,
x: Union[List[float], np.ndarray],
specs: Optional[HypererlangSpecs] = None) -> None:
"""Compute a hypererlang distributions which fits the data with EM
algorithm according to "A novel approach for phasetype fitting", where
the length of all erlang distributions is equal to N. The fitting is
done in 3 respective rounds, each reducing the number of configurations
under consideration while increasing the convergence_criterium floc is
appears only for compatibility reasons with
scipy.stats.rv_continuous.fit.
Change the parameters on self!
:param x: The data to fit to.
:param specs: The specifications.
"""
if specs is None:
specs = HypererlangSpecs()
convergence_criteria = np.asarray(specs.convergence_criteria)
maximal_distributions = np.asarray(specs.maximal_distributions)
hypererlangs: Iterable[Hypererlang] = self.iterate_hyp_erl(specs.N)
heap = Heap()
for i, convergence_criterion in enumerate(convergence_criteria):
heap.change_length(maximal_distributions[i])
if specs.processors > 1:
pool = mp.Pool(processes=specs.processors)
for hypererlang_ in hypererlangs:
# this gives all allowed values for r_m
pool.apply_async(hypererlang_.fit_lambd_hyper,
args=(x, convergence_criterion),
callback=heap.push,
error_callback=self.error_callback)
pool.close()
pool.join()
else:
for hypererlang_ in hypererlangs:
# this gives all allowed values for r_m
heap.push(
hypererlang_.fit_lambd_hyper(x, convergence_criterion))
hypererlangs = heap.copy_to_list()
# heap[0] has the paramters we want, so copy them
candidate = heap.nlargest(1)[0]
for key, val in candidate.__dict__.items():
if hasattr(candidate, key):
setattr(self, key, val)
@staticmethod
def iterate_hyp_erl(N: int = 10) -> Generator["Hypererlang", None, None]:
"""Generate all combinations of hypererlang | |
# Author: <NAME>
# VK: vk.com/maksim2009rus
# 1) Generating a Non-Deterministic Finite Automaton by a regular expression.
# 2) Translation of NDFA to DFA.
#
import graphviz as gv
# Standard character classes
D = frozenset("0123456789")
W = D | frozenset("_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
S = frozenset(" \t\n\v\f\r")
SYMBOLS = frozenset("`~!@#%&=;:'\",<>/")
SPECIALS = frozenset("\|.()[]{}*+?-$^")
ALPHABET = W | S | SYMBOLS
ESCAPES = SPECIALS | frozenset("wrtsdfvbn")
def states_generator(index: int) -> str:
""" Generating unique states. """
while True:
index += 1
state = f"S{index}"
yield state
def f1(string: str, A: str, B: str, matrix: dict) -> None:
string += '|'
counter_round_brackets = 0
counter_square_brackets = 0
ind, pos = 0, 0
substrings = list()
while ind < len(string):
if string[ind:ind + 2] in ("\|", "\(", "\)", "\[", "\]"):
ind += 1
elif string[ind] == '(':
if not counter_square_brackets:
counter_round_brackets += 1
elif string[ind] == ')':
if not counter_square_brackets:
counter_round_brackets -= 1
elif string[ind] == '[':
if counter_square_brackets < 1:
counter_square_brackets += 1
elif string[ind] == ']':
counter_square_brackets -= 1
elif string[ind] == '|':
if not counter_square_brackets and not counter_round_brackets \
and string[pos:ind + 1]:
substrings.append(string[pos:ind])
pos = ind + 1
ind += 1
for sub in substrings:
f2(sub, A, B, matrix)
def f2(substr: str, A: str, B: str, matrix: dict) -> None:
""" Splitting a string into its component parts. """
substr2 = ""
substr3 = ""
round_brackets = 0
square_brackets = 0
idx = 0
while idx < len(substr):
if len(substr) >= 3 and substr[0] == '\\' and \
substr[1] in SPECIALS and substr[2] in ('*', '+'): # "\)+"
substr2 = substr[:3]
substr3 = substr[3:]
break
elif len(substr) >= 2 and \
((substr[0] == '\\' and substr[1] in SPECIALS) or # "\("
(substr[0] in ALPHABET and substr[1] in ('*', '+'))): # "a+"
substr2 = substr[:2]
substr3 = substr[2:]
break
elif len(substr) >= 2 and substr[0] in ALPHABET and \
substr[1] not in ('+', '*'):
substr2 = substr[0] # 'a'
substr3 = substr[1:]
break
elif len(substr) == 1 and substr not in SPECIALS: # 'a'
substr2 = substr
break
elif substr[idx] == '(':
if not square_brackets:
round_brackets += 1
elif substr[idx] == ')' and not substr[idx - 1] == '\\':
if not square_brackets:
round_brackets -= 1
if not round_brackets and not square_brackets:
if idx < len(substr) - 1:
if substr[idx + 1] not in ('*', '+'): # "(abc)"
substr2 = substr[:idx + 1]
substr3 = substr[idx + 1:]
break
else: # "(abc)+", "(abc)*"
substr2 = substr[:idx + 2]
substr3 = substr[idx + 2:]
break
elif substr[idx] == '[':
if not square_brackets:
square_brackets += 1
elif substr[idx] == ']' and not substr[idx - 1] == '\\':
square_brackets -= 1
if not square_brackets and not round_brackets:
if idx == len(substr) - 1: # "(abc)": # "[abc]"
substr2 = substr[:idx + 1]
substr3 = substr[idx + 1:]
break
else: # "[abc]+", "[abc]*"
substr2 = substr[:idx + 2]
substr3 = substr[idx + 2:]
break
idx += 1
if not substr3:
f3(substr, A, B, matrix)
else:
K = next(C)
f3(substr2, A, K, matrix)
f2(substr3, K, B, matrix)
def f3(substr: str, A: str, B: str, matrix: dict) -> None:
""" Formation of the "state matrix". """
last_two_symbols = ''
K = next(C)
if len(substr) >= 2:
last_two_symbols = ''.join([substr[-2], substr[-1]])
# if substr in ALPHABET or substr == 'eps' or \
if substr in ALPHABET or \
(substr[0] == '\\' and
substr[1] in SPECIALS and len(substr) == 2): # 'a', 'eps', '\['
matrix_add(substr, A, B, matrix)
elif (substr[0] in ALPHABET and substr[-1] == '+') or \
(substr[0] == '\\' and substr[-1] == '+'): # 'a+', '\)+'
pos_plus = substr.find('+')
matrix_add(substr[:pos_plus], A, K, matrix)
matrix_add(substr[:pos_plus], K, K, matrix)
matrix_add('eps', K, B, matrix)
elif (substr[0] in ALPHABET and substr[-1] == '*') or \
(substr[0] == '\\' and substr[-1] == '*'): # 'a*', '\)*'
pos_mult = substr.find('*')
matrix_add('eps', A, K, matrix)
matrix_add(substr[:pos_mult], K, K, matrix)
matrix_add('eps', K, B, matrix)
elif substr[0] == '(' and substr[-1] == ')': # ()
f1(substr[1:-1], A, B, matrix)
elif substr[0] == '(' and last_two_symbols == ")+": # ()+
f1(substr[1:-2], A, K, matrix)
f1(substr[1:-2], K, K, matrix)
matrix_add('eps', K, B, matrix)
elif substr[0] == '(' and last_two_symbols == ")*": # ()*
matrix_add('eps', A, K, matrix)
f1(substr[1:-2], K, K, matrix)
matrix_add('eps', K, B, matrix)
elif substr[0] == '[' and substr[-1] == ']': # []
new_substr = add_vertical_lines(substr[1:-1])
f1(new_substr, A, B, matrix)
elif substr[0] == '[' and last_two_symbols == "]+": # []+
new_substr = add_vertical_lines(substr[1:-2])
new_substr = '(' + new_substr + ')+'
f1(new_substr, A, B, matrix)
elif substr[0] == '[' and last_two_symbols == "]*": # []+
new_substr = add_vertical_lines(substr[1:-2])
new_substr = '(' + new_substr + ')*'
f1(new_substr, A, B, matrix)
def add_vertical_lines(substr: str) -> str:
""" Add vertical lines between each character. """
tokens = []
idx = 0
while idx < len(substr):
char = substr[idx]
if char == '\\' and substr[idx + 1:idx + 2] in SPECIALS:
tokens.append(substr[idx:idx + 2])
idx += 1
elif char in SPECIALS:
tokens.append(f"\\{char}")
else:
tokens.append(char)
idx += 1
return '|'.join(tokens)
def matrix_add(
substr: str,
A: str,
B: (str or list or bool),
matrix: dict) -> None:
"""
Adding an element to the matrix.*
Type 'B' can be a "list", a "string" or "bool".
"""
if not matrix.get(A):
if isinstance(B, (list, bool)):
matrix[A] = {substr: B}
elif isinstance(B, str):
matrix[A] = {substr: [B]}
else:
raise TypeError
else:
if isinstance(B, bool):
matrix[A][substr] = B
elif isinstance(B, str):
if not matrix[A].get(substr):
matrix[A][substr] = [B]
elif B not in matrix[A][substr]:
matrix[A][substr].append(B)
elif isinstance(B, list):
if not matrix[A].get(substr):
matrix[A][substr] = B
else:
unique = get_a_sorted_list_of_vertices(
set(matrix[A][substr] + B)
)
matrix[A][substr] = unique
else:
raise TypeError
def get_a_sorted_list_of_vertices(x: dict or list or set) -> list:
""" Sorting the matrix by the name of states. """
def help_sorting(lst: list or set) -> int:
for el in lst:
if len(el) >= 2:
yield int(el[1:])
nums = list()
new_list = list()
z_flag = False
init = 'A'
final = 'Z'
if isinstance(x, dict):
nums = sorted(list(help_sorting(x.keys())))
new_list.append(init) # initial vertex
elif isinstance(x, (list, set)):
if init in x:
new_list.append(init) # initial vertex
if final in x:
z_flag = True
nums = sorted(list(help_sorting(x)))
for num in nums:
new_list.append('S' + str(num))
if z_flag:
new_list.append(final)
return new_list
def translate(eps_ndfsm: dict, dfsm: dict, expr: str) -> None:
"""
Translation of a non-deterministic FSM to a deterministic FSM.
(Determination of the FSM).
"""
alphabet = get_alphabet(expr)
vertices = get_a_sorted_list_of_vertices(eps_ndfsm)
remove_of_eps_edges(eps_ndfsm, vertices)
# remove_of_eps_edges(eps_ndfsm, ndfsm, alphabet)
remove_unreachable_states(eps_ndfsm, 'A')
create_dfsm(eps_ndfsm, dfsm, alphabet)
def remove_of_eps_edges(
d: dict,
vertices: list) -> None:
"""
Removing empty transitions in eps_ndFSM.
ndFSM -- non-deterministic FSM.
eps_ndFSM -- non-deterministic FSM with eps-transitions..
"""
final = 'Z'
for v in vertices:
while d[v].get('eps'):
eps_v = d[v]['eps'][0]
if eps_v != final:
d[v] = combining_dict(d[v], d[eps_v])
d[v]['eps'].remove(eps_v)
if not d[v].get('eps'):
del d[v]['eps']
else:
del d[v]['eps']
def combining_dict(d1: dict, d2: dict) -> dict:
""" Combining the values of two dictionaries for a given key. """
new_dict = dict()
for d1_key in d1.keys():
for d2_key in d2.keys():
if d1_key == d2_key:
content = get_a_sorted_list_of_vertices(
set(d1[d1_key]) | set(d2[d2_key])
)
new_dict[d1_key] = content
else:
if new_dict.get(d1_key):
new_dict[d1_key].extend(d1[d1_key])
new_dict[d1_key] = get_a_sorted_list_of_vertices(
set(new_dict[d1_key])
)
else:
new_dict[d1_key] = get_a_sorted_list_of_vertices(
set(d1[d1_key])
)
if new_dict.get(d2_key):
new_dict[d2_key].extend(d2[d2_key])
new_dict[d2_key] = get_a_sorted_list_of_vertices(
set(new_dict[d2_key])
)
else:
new_dict[d2_key] = get_a_sorted_list_of_vertices(
set(d2[d2_key])
)
return new_dict
def create_dfsm(d1: dict, d2: dict, alphabet: list) -> None:
"""
Creation of a determinate finite automaton.
Remove transitions on the same symbol.
"""
"""
1) Создаем новые вершины для 'A'.
Цикл по всем символам из алфавита.
Если d_old['A'].get(symbol), то создаем вершину -- объединение входящих
вершин в список.
Пример: A: {'x': ['S1', 'S3', 'Z']}.
Результат: новая вершина A: {'x': "S1_S3_Z"}.
2) Просматриваем созданные вершины из А по каждому символу из алфавита.
Заходим в каждую из них и смотрим, в какие вершины они вели по данному
символу. Создаем вершину -- объединение всех вершин, которые вели по
данному символу в другие вершины.
Пример: A: {'x': ['S1', 'S5']}, S1: {'x': ['S2', 'S3']}, S5: {'x': ['S6']}.
Результат: переход в новую вершину по | |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import datetime
import time
import json
try:
import arcpy
arcpyFound = True
except:
arcpyFound = False
from ..packages import six
import copy
import os
import tempfile
import uuid
from .spatial import json_to_featureclass
from .geometry import Point, MultiPoint, Polygon, Polyline, SpatialReference
from .._abstract.abstract import AbstractGeometry
__all__ = ['_unicode_convert', "Feature", "FeatureSet",
"_date_handler", "local_time_to_online",
"online_time_to_string", "timestamp_to_datetime",
"MosaicRuleObject", "create_uid"]
def create_uid():
if six.PY3:
return uuid.uuid4().hex
else:
return uuid.uuid4().get_hex()
def _unicode_convert(obj):
""" converts unicode to anscii """
if isinstance(obj, dict):
return {_unicode_convert(key): _unicode_convert(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [_unicode_convert(element) for element in obj]
elif isinstance(obj, str):
return obj
elif isinstance(obj, six.text_type):
return obj.encode('utf-8')
elif isinstance(obj, six.integer_types):
return obj
else:
return obj
#----------------------------------------------------------------------
def _date_handler(obj):
if isinstance(obj, datetime.datetime):
return local_time_to_online(obj)
else:
return obj
#----------------------------------------------------------------------
def local_time_to_online(dt=None):
"""
converts datetime object to a UTC timestamp for AGOL
Inputs:
dt - datetime object
Output:
Long value
"""
if dt is None:
dt = datetime.datetime.now()
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = (time.altzone if is_dst else time.timezone)
return (time.mktime(dt.timetuple()) * 1000) + (utc_offset *1000)
#----------------------------------------------------------------------
def online_time_to_string(value, timeFormat, utcOffset=0):
"""Converts AGOL timestamp to formatted string.
Args:
value (float): A UTC timestamp as reported by AGOL (time in ms since Unix epoch * 1000)
timeFormat (str): Date/Time format string as parsed by :py:func:`datetime.strftime`.
utcOffset (int): Hours difference from UTC and desired output. Default is 0 (remain in UTC).
Returns:
str: A string representation of the timestamp.
Examples:
>>> rcrest.general.online_time_to_string(1457167261000.0, "%Y-%m-%d %H:%M:%S")
'2016-03-05 00:41:01'
>>> rcrest.general.online_time_to_string(731392515000.0, '%m/%d/%Y %H:%M:%S', -8) # PST is UTC-8:00
'03/05/1993 12:35:15'
See Also:
:py:func:`local_time_to_online` for converting a :py:class:`datetime.datetime` object to AGOL timestamp
"""
try:
return datetime.datetime.fromtimestamp(value/1000 + utcOffset*3600).strftime(timeFormat)
except:
return ""
finally:
pass
#----------------------------------------------------------------------
def timestamp_to_datetime(timestamp):
"""
Converts a timestamp to a datetime object
Inputs:
timestamp - timestamp value as Long
output:
datetime object
"""
return datetime.datetime.fromtimestamp(timestamp /1000)
########################################################################
class Feature(object):
""" returns a feature """
_json = None
_dict = None
_geom = None
_geomType = None
_attributes = None
_wkid = None
_wkt = None
#----------------------------------------------------------------------
def __init__(self, json_string, wkid=None, spatialReference=None):
"""Constructor"""
self._wkid = wkid
if type(json_string) is dict:
self._dict = json_string
elif type(json_string) is str:
self._dict = json.loads(json_string)
else:
raise TypeError("Invalid Input, only dictionary or string allowed")
if 'geometry' in self._dict:
if not wkid is None: # kept for compatibility
self._dict['geometry']['spatialReference'] = {"wkid" : wkid}
if not spatialReference is None and isinstance(spatialReference, dict):
if 'wkid' in spatialReference:
self._wkid = spatialReference['wkid']
if 'wkt' in spatialReference:
self._wkt = spatialReference['wkt']
self._dict['geometry'].update({'spatialReference':spatialReference})
self._geom = self.geometry
self._json = json.dumps(self._dict, default=_date_handler)
#----------------------------------------------------------------------
def set_value(self, field_name, value):
""" sets an attribute value for a given field name """
if field_name in self.fields:
if not value is None:
self._dict['attributes'][field_name] = _unicode_convert(value)
else:
pass
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
if isinstance(value, dict):
if 'geometry' in value:
self._dict['geometry'] = value['geometry']
elif any(k in value.keys() for k in ['x','y','points','paths','rings', 'spatialReference']):
self._dict['geometry'] = value
elif isinstance(value, AbstractGeometry):
self._dict['geometry'] = value.asDictionary
elif arcpyFound:
if isinstance(value, arcpy.Geometry) and \
value.type == self.geometryType:
self._dict['geometry']=json.loads(value.JSON)
self._geom = None
self._geom = self.geometry
else:
return False
self._json = json.dumps(self._dict, default=_date_handler)
return True
#----------------------------------------------------------------------
def get_value(self, field_name):
""" returns a value for a given field name """
if field_name in self.fields:
return self._dict['attributes'][field_name]
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
return self._dict['geometry']
return None
#----------------------------------------------------------------------
@property
def asDictionary(self):
"""returns the feature as a dictionary"""
feat_dict = {}
if self._geom is not None:
if 'feature' in self._dict:
feat_dict['geometry'] = self._dict['feature']['geometry']
elif 'geometry' in self._dict:
feat_dict['geometry'] = self._dict['geometry']
if 'feature' in self._dict:
feat_dict['attributes'] = self._dict['feature']['attributes']
else:
feat_dict['attributes'] = self._dict['attributes']
return self._dict
#----------------------------------------------------------------------
@property
def asRow(self):
""" converts a feature to a list for insertion into an insert cursor
Output:
[row items], [field names]
returns a list of fields and the row object
"""
fields = self.fields
row = [""] * len(fields)
for k,v in self._attributes.items():
row[fields.index(k)] = v
del v
del k
if self.geometry is not None:
row.append(self.geometry)
fields.append("SHAPE@")
return row, fields
#----------------------------------------------------------------------
@property
def geometry(self):
"""returns the feature geometry"""
if arcpyFound:
if self._geom is None:
if 'feature' in self._dict:
self._geom = arcpy.AsShape(self._dict['feature']['geometry'], esri_json=True)
elif 'geometry' in self._dict:
self._geom = arcpy.AsShape(self._dict['geometry'], esri_json=True)
return self._geom
return None
@geometry.setter
def geometry(self, value):
"""gets/sets a feature's geometry"""
if isinstance(value, (Polygon, Point, Polyline, MultiPoint)):
if value.type == self.geometryType:
self._geom = value
elif arcpyFound:
if isinstance(value, arcpy.Geometry):
if value.type == self.geometryType:
self._dict['geometry']=json.loads(value.JSON)
self._geom = None
self._geom = self.geometry
#----------------------------------------------------------------------
@property
def fields(self):
""" returns a list of feature fields """
if 'feature' in self._dict:
self._attributes = self._dict['feature']['attributes']
else:
self._attributes = self._dict['attributes']
return self._attributes.keys()
#----------------------------------------------------------------------
@property
def geometryType(self):
""" returns the feature's geometry type """
if self._geomType is None:
if self.geometry is not None:
self._geomType = self.geometry.type
else:
self._geomType = "Table"
return self._geomType
@staticmethod
def fc_to_features(dataset):
"""
converts a dataset to a list of feature objects
Input:
dataset - path to table or feature class
Output:
list of feature objects
"""
if arcpyFound:
desc = arcpy.Describe(dataset)
fields = [field.name for field in arcpy.ListFields(dataset) if field.type not in ['Geometry']]
date_fields = [field.name for field in arcpy.ListFields(dataset) if field.type =='Date']
non_geom_fields = copy.deepcopy(fields)
features = []
if hasattr(desc, "shapeFieldName"):
fields.append("SHAPE@JSON")
del desc
with arcpy.da.SearchCursor(dataset, fields) as rows:
for row in rows:
row = list(row)
for df in date_fields:
if row[fields.index(df)] != None:
row[fields.index(df)] = int((_date_handler(row[fields.index(df)])))
template = {
"attributes" : dict(zip(non_geom_fields, row))
}
if "SHAPE@JSON" in fields:
template['geometry'] = \
json.loads(row[fields.index("SHAPE@JSON")])
features.append(
Feature(json_string=_unicode_convert(template))
)
del row
return features
return None
#----------------------------------------------------------------------
def __str__(self):
""""""
return json.dumps(self.asDictionary)
########################################################################
class MosaicRuleObject(object):
"""
The image service uses a mosaic rule to mosaick multiple rasters on the
fly. The mosaic rule parameter is used by many image service operations,
for example, export image and identify operations.
"""
__allowedMosaicMethods = [
"esriMosaicNone",
"esriMosaicCenter",
"esriMosaicNadir",
"esriMosaicViewpoint",
"esriMosaicAttribute",
"esriMosaicLockRaster",
"esriMosaicNorthwest",
"esriMosaicSeamline"
]
__allowedMosaicOps = [
"MT_FIRST",
"MT_LAST",
"MT_MIN",
"MT_MAX",
"MT_MEAN",
"MT_BLEND",
"MT_SUM"
]
_mosaicMethod = None
_where = None
_sortField = None
_sortValue = None
_ascending = None
_lockRasterIds = None
_viewpoint = None
_fids = None
_mosaicOperation = None
_itemRenderingRule = None
#----------------------------------------------------------------------
def __init__(self,
mosaicMethod,
where="",
sortField="",
sortValue="",
ascending=True,
lockRasterIds=[],
viewpoint=None,
fids=[],
mosaicOperation=None,
itemRenderingRule=""):
"""Constructor"""
if mosaicMethod in self.__allowedMosaicMethods:
self._mosaicMethod = mosaicMethod
else:
raise AttributeError("Invalid mosaic method.")
self._where = where
self._sortField = sortField
self._sortValue = sortValue
self._ascending = ascending
self._localRasterIds = lockRasterIds
self._itemRenderingRule = itemRenderingRule
if isinstance(viewpoint, Point):
self._viewpoint = viewpoint
self._fids = fids
if mosaicOperation is not None and \
mosaicOperation in self.__allowedMosaicOps:
self._mosaicOperation = mosaicOperation
#----------------------------------------------------------------------
@property
def where(self):
"""
Use where clause to define a subset of rasters used in the mosaic,
be aware that the rasters may not be visible at all scales
"""
return self._where
#----------------------------------------------------------------------
@where.setter
def where(self, value):
"""
Use where clause to define a subset of rasters used in the mosaic,
be aware that the rasters may not be visible at all scales
"""
if value != self._where:
self._where = value
#----------------------------------------------------------------------
@property
def mosaicMethod(self):
"""
get/set the mosaic method
"""
return self._mosaicMethod
#----------------------------------------------------------------------
@mosaicMethod.setter
def mosaicMethod(self, value):
"""
get/set the mosaic method
"""
if value in self.__allowedMosaicMethods and \
self._mosaicMethod != value:
self._mosaicMethod = value
#----------------------------------------------------------------------
@property
def sortField(self):
""""""
return self._sortField
#----------------------------------------------------------------------
@sortField.setter
def sortField(self, value):
""""""
if self._sortField != value:
self._sortField = value
#----------------------------------------------------------------------
@property
def sortValue(self):
""""""
return self._sortValue
#----------------------------------------------------------------------
@sortValue.setter
def sortValue(self, value):
""""""
if self._sortValue != value:
self._sortValue = value
#----------------------------------------------------------------------
@property
def ascending(self):
""""""
return self._ascending
#----------------------------------------------------------------------
@ascending.setter
def ascending(self, value):
""""""
if isinstance(value, bool):
self._ascending = value
#----------------------------------------------------------------------
@property
def lockRasterIds(self):
""""""
return self._lockRasterIds
#----------------------------------------------------------------------
@lockRasterIds.setter
def lockRasterIds(self, value):
""""""
if isinstance(self._lockRasterIds, list):
self._lockRasterIds = value
#----------------------------------------------------------------------
@property
def viewpoint(self):
""""""
return self._viewpoint
#----------------------------------------------------------------------
@viewpoint.setter
def viewpoint(self, value):
""""""
if isinstance(value, Point):
self._viewpoint = value
#----------------------------------------------------------------------
@property
def fids(self):
""""""
return self._fids
#----------------------------------------------------------------------
@fids.setter
def fids(self, value):
""""""
self._fids = value
#----------------------------------------------------------------------
@property
def mosaicOperation(self):
""""""
return self._mosaicOperation
#----------------------------------------------------------------------
@mosaicOperation.setter
def mosaicOperation(self, value):
""""""
if value in self.__allowedMosaicOps and \
self._mosaicOperation != value:
self._mosaicOperation = value
#----------------------------------------------------------------------
@property
def itemRenderingRule(self):
""""""
return self._itemRenderingRule
#----------------------------------------------------------------------
@itemRenderingRule.setter
def itemRenderingRule(self, value):
""""""
if self._itemRenderingRule != value:
self._itemRenderingRule | |
import numpy as np
import math
from .utils import (
MinCurve,
get_nev,
get_vec,
get_angles,
HLA_to_NEV,
NEV_to_HLA,
get_sigmas
)
from welleng.error import ErrorModel
from welleng.exchange.wbp import TurnPoint
class Survey:
def __init__(
self,
md,
inc,
azi,
n=None,
e=None,
tvd=None,
x=None,
y=None,
z=None,
vec=None,
radius=None,
cov_nev=None,
cov_hla=None,
error_model=None,
well_ref_params=None,
start_xyz=[0,0,0],
start_nev=[0,0,0],
start_cov_nev=None,
deg=True,
unit="meters"
):
"""
Initialize a welleng.Survey object.
Parameters
----------
md: (,n) list or array of floats
List or array of well bore measured depths.
inc: (,n) list or array of floats
List or array of well bore survey inclinations
azi: (,n) list or array of floats
List or array of well bore survey azimuths
n: (,n) list or array of floats (default: None)
List or array of well bore northings
e: (,n) list or array of floats (default: None)
List or array of well bore eastings
tvd: (,n) list or array of floats (default: None)
List or array of local well bore z coordinates, i.e. depth
and usually relative to surface or mean sea level.
x: (,n) list or array of floats (default: None)
List or array of local well bore x coordinates, which is
usually aligned to the east direction.
y: (,n) list or array of floats (default: None)
List or array of local well bore y coordinates, which is
usually aligned to the north direction.
z: (,n) list or array of floats (default: None)
List or array of well bore true vertical depths relative
to the well surface datum (usually the drill floor
elevation DFE, so not always identical to tvd).
vec: (n,3) list or array of (,3) floats (default: None)
List or array of well bore unit vectors that describe the
inclination and azimuth of the well relative to (x,y,z)
coordinates.
radius: float or (,n) list or array of floats (default: None)
If a single float is specified, this value will be
assigned to the entire well bore. If a list or array of
floats is provided, these are the radii of the well bore.
If None, a well bore radius of 12" or approximately 0.3 m
is applied.
cov_nev: (n,3,3) list or array of floats (default: None)
List or array of covariance matrices in the (n,e,v)
coordinate system.
cov_hla: (n,3,3) list or array of floats (default: None)
List or array of covariance matrices in the (h,l,a)
well bore coordinate system (high side, lateral, along
hole).
error_model: str (default: None)
If specified, this model is used to calculate the
covariance matrices if they are not present. Currently,
only the "ISCWSA_MWD" model is provided.
well_ref_params: dict (default: None)
If an error_model is set, these well reference params
are provided to the welleng.error.ErrorModel class. The
defaults are:
dict(
Latitude = -40, # degrees
G = 9.80665, # m/s2
BTotal = 61000, # nT
Dip = -70, # degrees
Declination = 13, # degrees
Convergence = 0, # degrees
)
start_xyz: (,3) list or array of floats (default: [0,0,0])
The start position of the well bore in (x,y,z) coordinates.
start_nev: (,3) list or array of floats (default: [0,0,0])
The start position of the well bore in (n,e,v) coordinates.
start_cov_nev: (,3,3) list or array of floats (default: None)
The covariance matrix for the start position of the well
bore in (n,e,v) coordinates.
deg: boolean (default: True)
Indicates whether the provided angles are in degrees
(True), else radians (False).
unit: str (default: 'meters')
Indicates whether the provided lengths and distances are
in 'meters' or 'feet', which impacts the calculation of
the dls (dog leg severity).
Returns
-------
A welleng.survey.Survey object.
"""
self.unit = unit
self.deg = deg
self.start_xyz = start_xyz
self.start_nev = start_nev
self.md = np.array(md)
self.start_cov_nev = start_cov_nev
self._make_angles(inc, azi, deg)
self._get_radius(radius)
self.survey_deg = np.array([self.md, self.inc_deg, self.azi_deg]).T
self.survey_rad = np.array([self.md, self.inc_rad, self.azi_rad]).T
self.n = n
self.e = e
self.tvd = tvd
self.x = x
self.y = y
self.z = z
self.vec = vec
self._min_curve()
self._get_toolface_and_rates()
# initialize errors
# TODO: read this from a yaml file in errors
error_models = ["ISCWSA_MWD"]
if error_model is not None:
assert error_model in error_models, "Unrecognized error model"
self.error_model = error_model
self.well_ref_params = well_ref_params
self.cov_hla = cov_hla
self.cov_nev = cov_nev
self._get_errors()
def _get_radius(self, radius=None):
if radius is None:
self.radius = np.full_like(self.md.astype(float), 0.3048)
elif np.array([radius]).shape[-1] == 1:
self.radius = np.full_like(self.md.astype(float), radius)
else:
assert len(radius) == len(self.md), "Check radius"
self.radius = np.array(radius)
def _min_curve(self):
"""
Get the (x,y,z), (n,e,v), doglegs, rfs, delta_mds, dlss and
vectors for the well bore if they were not provided, using the
minimum curvature method.
"""
mc = MinCurve(self.md, self.inc_rad, self.azi_rad, self.start_xyz, self.unit)
self.dogleg = mc.dogleg
self.rf = mc.rf
self.delta_md = mc.delta_md
self.dls = mc.dls
self.pos = mc.poss
if self.x is None:
# self.x, self.y, self.z = (mc.poss + self.start_xyz).T
self.x, self.y, self.z = (mc.poss).T
if self.n is None:
self._get_nev()
if self.vec is None:
self.vec = get_vec(self.inc_rad, self.azi_rad, deg=False)
def _get_nev(self):
self.n, self.e, self.tvd = get_nev(
np.array([
self.x,
self.y,
self.z
]).T,
start_xyz=self.start_xyz,
start_nev=self.start_nev
).T
def _make_angles(self, inc, azi, deg=True):
"""
Calculate angles in radians if they were provided in degrees or
vice versa.
"""
if deg:
self.inc_rad = np.radians(inc)
self.azi_rad = np.radians(azi)
self.inc_deg = np.array(inc)
self.azi_deg = np.array(azi)
else:
self.inc_rad = np.array(inc)
self.azi_rad = np.array(azi)
self.inc_deg = np.degrees(inc)
self.azi_deg = np.degrees(azi)
def _get_errors(self):
"""
Initiate a welleng.error.ErrorModel object and calculate the
covariance matrices with the specified error model.
"""
if self.error_model:
if self.error_model == "ISCWSA_MWD":
if self.well_ref_params is None:
self.err = ErrorModel(
survey=self.survey_deg,
surface_loc=self.start_xyz,
)
else:
self.err = ErrorModel(
survey=self.survey_deg,
surface_loc=self.start_xyz,
well_ref_params=self.well_ref_params,
)
self.cov_hla = self.err.errors.cov_HLAs.T
self.cov_nev = self.err.errors.cov_NEVs.T
else:
if self.cov_nev is not None and self.cov_hla is None:
self.cov_hla = NEV_to_HLA(self.survey_rad, self.cov_nev.T).T
elif self.cov_nev is None and self.cov_hla is not None:
self.cov_nev = HLA_to_NEV(self.survey_rad, self.cov_hla.T).T
else:
pass
if (
self.start_cov_nev is not None
and self.cov_nev is not None
):
self.cov_nev += self.start_cov_nev
self.cov_hla = NEV_to_HLA(self.survey_rad, self.cov_nev.T).T
def _curvature_to_rate(self, curvature):
with np.errstate(divide='ignore', invalid='ignore'):
radius = 1 / curvature
circumference = 2 * np.pi * radius
if self.unit == 'meters':
x = 30
else:
x = 100
rate = np.absolute(np.degrees(2 * np.pi / circumference) * x)
return rate
def _get_toolface_and_rates(self):
"""
Reference SPE-84246.
theta is inc, phi is azi
"""
# split the survey
s = SplitSurvey(self)
if self.unit == 'meters':
x = 30
else:
x = 100
# this is lazy I know, but I'm using this mostly for flags
with np.errstate(divide='ignore', invalid='ignore'):
t1 = np.arctan(
np.sin(s.inc2) * np.sin(s.delta_azi) /
(
np.sin(s.inc2) * np.cos(s.inc1) * np.cos(s.delta_azi)
- np.sin(s.inc1) * np.cos(s.inc2)
)
)
t1 = np.nan_to_num(
np.where(t1 < 0, t1 + 2 * np.pi, t1),
nan=np.nan
)
t2 = np.arctan(
np.sin(s.inc1) * np.sin(s.delta_azi) /
(
np.sin(s.inc2) * np.cos(s.inc1)
- np.sin(s.inc1) * np.cos(s.inc2) * np.cos(s.delta_azi)
)
)
t2 = np.nan_to_num(
np.where(t2 < 0, t2 + 2 * np.pi, t2),
nan=np.nan
)
self.curve_radius = (360 / self.dls * x) / (2 * np.pi)
curvature_dls = 1 / self.curve_radius
self.toolface = np.concatenate((t1, np.array([t2[-1]])))
curvature_turn = curvature_dls * (np.sin(self.toolface) / np.sin(self.inc_rad))
self.turn_rate = self._curvature_to_rate(curvature_turn)
curvature_build = curvature_dls * np.cos(self.toolface)
self.build_rate = self._curvature_to_rate(curvature_build)
# calculate plan normals
n12 = np.cross(s.vec1, s.vec2)
with np.errstate(divide='ignore', invalid='ignore'):
self.normals = n12 / np.linalg.norm(n12, axis=1).reshape(-1,1)
def interpolate_survey(survey, x=0, index=0):
"""
Interpolates a point distance x between two survey stations
using minimum curvature.
Parameters
----------
survey: welleng.Survey
A survey object with at least two survey stations.
x: float
Length along well path from indexed survey station to
perform the interpolate at. Must be less than length
to the next survey station.
index: int
The index of the survey station from which to interpolate
from.
Returns
-------
survey: welleng.Survey
A survey object consisting of the two survey stations
between which the interpolation has been made (index 0 and
2), with the interpolated station between them (index 1)
"""
index = int(index)
assert index < len(survey.md) - 1, "Index is out of range"
# assert x <= survey.delta_md[index + 1], "x is out of range"
# check if | |
<filename>spectramanipulator/treewidget.py
from PyQt5.QtCore import Qt, QItemSelectionModel, QItemSelection, pyqtSignal, QModelIndex
from PyQt5.QtWidgets import QApplication, QMessageBox, QMenu, QAction
from PyQt5.QtGui import QCursor, QColor
import numpy as np
import os
from spectramanipulator.spectrum import Spectrum, SpectrumList
from spectramanipulator.dialogs.int_int_inputdialog import IntIntInputDialog
from spectramanipulator.dialogs.interpolate_dialog import InterpolateDialog
from spectramanipulator.dialogs.rename_dialog import RenameDialog
from spectramanipulator.dialogs.fitwidget import FitWidget
from spectramanipulator.dialogs.stylewidget import StyleWidget
# from dialogs.rangedialog import RangeDialog
from spectramanipulator.dialogs.rangewidget import RangeWidget
from spectramanipulator.dialogs.export_spectra_as import ExportSpectraAsDialog
from spectramanipulator.settings import Settings
from spectramanipulator.logger import Logger
from spectramanipulator.utils.rename import rename
from spectramanipulator.treeview.item import SpectrumItemGroup, SpectrumItem, GenericItem
from spectramanipulator.treeview.model import TreeView, ItemIterator
from spectramanipulator.console import Console
from spectramanipulator.parsers import parse_XML_Spreadsheet
from spectramanipulator.dataloader import parse_text, parse_files
from spectramanipulator.exporter import list_to_string, list_to_files
# from spectramanipulator.plotwidget import PlotWidget
# TODO-->> rewrite
def get_hierarchic_list(items_iterator):
""" get a hierarchic structure of selected items
spectra in groups are appended in list of spectrum objects
spectra not in groups are appended as spectrum objects
"""
sp_list = []
temp_list = []
last_grp_item = None
for item in items_iterator:
if isinstance(item, SpectrumItemGroup):
continue
item.name = item.name
if item.is_in_group():
curr_grp_item = item.parent
item.group_name = curr_grp_item.name
if last_grp_item != curr_grp_item:
last_grp_item = curr_grp_item
if len(temp_list) != 0:
sp_list.append(temp_list)
temp_list = []
temp_list.append(item)
else:
temp_list.append(item)
else:
if len(temp_list) > 0:
sp_list.append(temp_list)
temp_list = []
item.group_name = None
sp_list.append(item)
if len(temp_list) > 0:
sp_list.append(temp_list)
return sp_list
class TreeWidget(TreeView):
redraw_spectra = pyqtSignal()
state_changed = pyqtSignal()
# all_spectra_list =
def __init__(self, parent=None):
super(TreeWidget, self).__init__(parent)
self.main_widget = self.parent().parent()
self.header().setStretchLastSection(True)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.context_menu)
self.items_deleted_signal.connect(self.items_deleted)
self.myModel.item_edited_signal.connect(self.item_edited)
self.myModel.checked_changed_signal.connect(self.check_changed)
self.myModel.data_dropped_signal.connect(self.data_dropped)
self.myModel.all_unchecked_signal.connect(lambda: self.redraw_spectra.emit())
self.myModel.data_modified_signal.connect(self.data_modified)
self.myModel.info_modified_signal.connect(self.info_modified)
self.myModel.items_ungrouped_signal.connect(lambda: self.redraw_spectra.emit())
def items_deleted(self, item_was_checked):
if item_was_checked:
self.redraw_spectra.emit()
def item_edited(self, item_is_checked):
self.state_changed.emit()
if item_is_checked:
self.redraw_spectra.emit()
def data_modified(self, items):
self.main_widget.update_items_data(items)
def info_modified(self, items):
self.setup_info()
self.update_view()
def check_changed(self, items, checked):
# self.redraw_spectra.emit()
self.main_widget.redraw_items(items, not checked)
def data_dropped(self):
self.redraw_spectra.emit()
def save_state(self):
super(TreeWidget, self).save_state()
self.state_changed.emit()
# if self.top_level_items_count() == 0:
# self.all_spectra_list = []
# return
# self.all_spectra_list = self.get_hierarchic_list(
# self.myModel.iterate_items(ItemIterator.NoChildren))
# Console.push_variables({'item': self.all_spectra_list})
# Console.push_variables({'item': self.myModel.root})
def export_selected_items_as(self):
if ExportSpectraAsDialog.is_opened:
ExportSpectraAsDialog.get_instance().activateWindow()
ExportSpectraAsDialog.get_instance().setFocus()
return
if len(self.selectedIndexes()) == 0:
return
dialog = ExportSpectraAsDialog()
if not dialog.accepted:
return
path, ext, delimiter, decimal_sep = dialog.result
sp_list = get_hierarchic_list(
self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False))
try:
list_to_files(sp_list, path, ext,
include_group_name=Settings.files_exp_include_group_name,
include_header=Settings.files_exp_include_header,
delimiter=delimiter,
decimal_sep=decimal_sep,
x_data_name=Settings.bottom_axis_label)
except Exception as ex:
QMessageBox.warning(self, 'Error', ex.__str__(), QMessageBox.Ok)
Logger.message(f"Data were saved to {path}")
def copy_selected_items_to_clipboard(self):
sp_list = get_hierarchic_list(self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False))
if len(sp_list) == 0:
return
Logger.status_message("Copying selected items to clipboard...")
try:
output = list_to_string(sp_list, include_group_name=Settings.clip_exp_include_group_name,
include_header=Settings.clip_exp_include_header,
delimiter=Settings.clip_exp_delimiter,
decimal_sep=Settings.clip_exp_decimal_sep,
x_data_name=Settings.bottom_axis_label)
cb = QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(output, mode=cb.Clipboard)
except Exception as ex:
Logger.message(ex.__str__())
return
Logger.status_message("Done")
def paste_from_clipboard(self):
m = QApplication.clipboard().mimeData()
if m is not None and m.hasFormat("XML Spreadsheet") and not Settings.excel_imp_as_text:
self.parse_XML_Spreadsheet(m.data("XML Spreadsheet").data())
return
cb = QApplication.clipboard()
text_data = cb.text(mode=cb.Clipboard)
spectra = parse_text(text_data)
self.import_spectra(spectra)
# --------- Operation Functions --------
def normalize(self):
if len(self.selectedIndexes()) == 0:
return
def accepted():
x0, x1 = rng_dialog.returned_range
try:
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
item.normalize_no_update(x0, x1)
items.append(item)
self.data_modified(items)
self.state_changed.emit()
except ValueError as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Warning', ex.__str__(), QMessageBox.Ok)
return
Logger.status_message("Done")
rng_dialog = RangeWidget(self.main_widget.var_widget, accepted, title="Normalize",
label_text="Set x range values. Maximum y value is find in this range and y values "
"are divided by this maximum. For normalizing to specific value, set x0 "
"equal to x1:",
parent=self)
def cut(self):
if len(self.selectedIndexes()) == 0:
return
def accepted():
x0, x1 = rng_dialog.returned_range
# Logger.status_message("Cutting the spectra...")
try:
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
item.cut_no_update(x0, x1)
items.append(item)
self.data_modified(items)
self.info_modified(items)
self.state_changed.emit()
except ValueError as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Warning', ex.__str__(), QMessageBox.Ok)
return
Logger.status_message("Done")
rng_dialog = RangeWidget(self.main_widget.var_widget, accepted, title="Cut",
label_text="Set x range values. Values outside this range will"
" be deleted from the spectrum/spectra:",
parent=self)
def extend_by_zeros(self):
if len(self.selectedIndexes()) == 0:
return
def accepted():
x0, x1 = rng_dialog.returned_range
try:
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
item.extend_by_value_no_update(x0, x1)
items.append(item)
self.data_modified(items)
self.info_modified(items)
self.state_changed.emit()
except ValueError as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Warning', ex.__str__(), QMessageBox.Ok)
return
Logger.status_message("Done")
rng_dialog = RangeWidget(self.main_widget.var_widget, accepted, title="Extend by zeros",
label_text="Set x range values. Values outside the range of spectrum "
"will be set up to zero. Spacing is calculated as average "
"of the spectrum spacing (spacing will be the same for "
"evenly spaced spectrum:",
parent=self)
def baseline_correct(self):
if len(self.selectedIndexes()) == 0:
return
def accepted():
# Logger.status_message("Baseline correcting...")
x0, x1 = rng_dialog.returned_range
try:
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
item.baseline_correct_no_update(x0, x1)
items.append(item)
self.data_modified(items)
self.state_changed.emit()
except ValueError as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Warning', ex.__str__(), QMessageBox.Ok)
return
Logger.status_message("Done")
rng_dialog = RangeWidget(self.main_widget.var_widget, accepted, title="Baseline correction",
label_text="Set x range values. Y values of this range will "
"be averaged and subtracted from all points:",
parent=self)
def interpolate(self):
if len(self.selectedIndexes()) == 0:
return
if InterpolateDialog.is_opened:
InterpolateDialog.get_instance().activateWindow()
InterpolateDialog.get_instance().setFocus()
return
interp_dialog = InterpolateDialog(self)
if not interp_dialog.accepted:
return
spacing, kind = interp_dialog.spacing, interp_dialog.selected_kind
try:
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
item.interpolate_no_update(spacing, kind)
items.append(item)
self.data_modified(items)
self.info_modified(items)
self.state_changed.emit()
except ValueError as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Warning', ex.__str__(), QMessageBox.Ok)
return
Logger.status_message("Done")
def select_every_nth_item(self):
items_count = len(self.selectedIndexes()) / 2
if items_count == 0:
return
if IntIntInputDialog.is_opened:
IntIntInputDialog.get_instance().activateWindow()
IntIntInputDialog.get_instance().setFocus()
return
n, shift = 2, 0
intintinput_dialog = IntIntInputDialog(n, shift, n_min=1, offset_min=0,
title="Select every n-th item",
label="Set the n value and shift value. Group items will be skipped:")
if not intintinput_dialog.accepted:
return
n, shift = intintinput_dialog.returned_range
try:
shift = shift % n
i = 0
self.selecting = True
flags = QItemSelectionModel.Select
selection = QItemSelection()
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False,
clear_selection=True):
if i % n == shift:
start_index = self.myModel.createIndex(item.row(), 0, item)
end_index = self.myModel.createIndex(item.row(), 1, item)
selection.select(start_index, end_index)
i += 1
self.selectionModel().select(selection, flags)
# self.selecting = False
except Exception as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Error', ex.__str__(), QMessageBox.Ok)
finally:
self.selecting = False
def fit_curve(self):
items_count = len(self.selectedIndexes()) / 2
if items_count == 0:
return
selected_node = self.myModel.node_from_index(self.selectedIndexes()[0])
# if isinstance(selected_node, SpectrumItemGroup): # TODO>>>>
# return
def accepted():
self.import_spectra([fit_dialog.fits, fit_dialog.residuals])
self.state_changed.emit()
fit_dialog = FitWidget(self.main_widget.var_widget, accepted, selected_node, parent=self)
Console.push_variables({'fw': fit_dialog})
def set_style(self):
items_count = len(self.selectedIndexes()) / 2
if items_count == 0:
return
selected_node = self.myModel.node_from_index(self.selectedIndexes()[0])
if isinstance(selected_node, SpectrumItemGroup):
selected_node = selected_node[0] # select spectrum
def accepted():
items = []
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
color = None
if not style_widget.cbColor.isChecked() and style_widget.color is not None:
color = style_widget.color.name(QColor.HexRgb) # in rgb format
line_alpha = int(style_widget.sbAlpha.value())
line_width = None if style_widget.cbLineWidth.isChecked() else float(
style_widget.dsbLineWidth.value())
line_type = None if style_widget.cbLineType.isChecked() else \
style_widget.line_types[style_widget.combLineType.currentIndex()]['index']
symbol = style_widget.symbol_types[style_widget.combSymbol.currentIndex()]['sym']
sym_brush_color = None
if not style_widget.cbSymBrushDefault.isChecked() and style_widget.sym_brush_color is not None:
# style_widget.sym_brush_color.setAlpha(style_widget.sbSymBrushAlpha.value())
sym_brush_color = style_widget.sym_brush_color.name(QColor.HexRgb) # in rgb format
sym_fill_color = None
if not style_widget.cbSymFillDefault.isChecked() and style_widget.sym_fill_color is not None:
# style_widget.sym_fill_color.setAlpha(style_widget.sbSymFillAlpha.value())
sym_fill_color = style_widget.sym_fill_color.name(QColor.HexRgb) # in rgb format
sym_brush_alpha = int(style_widget.sbSymBrushAlpha.value())
sym_fill_alpha = int(style_widget.sbSymFillAlpha.value())
symbol_size = float(style_widget.dsbSymSize.value())
plot_legend = style_widget.cbPlotLegend.isChecked()
if not hasattr(item, 'plot_legend'):
setattr(item, 'plot_legend', plot_legend)
item.color = color
item.line_width = line_width
item.line_type = line_type
item.plot_legend = plot_legend
setattr(item, 'symbol', symbol)
setattr(item, 'symbol_brush', sym_brush_color)
setattr(item, 'symbol_fill', sym_fill_color)
setattr(item, 'symbol_size', symbol_size)
setattr(item, 'line_alpha', line_alpha)
setattr(item, 'sym_brush_alpha', sym_brush_alpha)
setattr(item, 'sym_fill_alpha', sym_fill_alpha)
items.append(item)
self.check_changed(items, True)
self.state_changed.emit()
style_widget = StyleWidget(self.main_widget.var_widget, accepted, selected_node, parent=self)
def rename(self):
items_count = len(self.selectedIndexes()) / 2
if items_count == 0:
return
if RenameDialog.is_opened:
RenameDialog.get_instance().activateWindow()
RenameDialog.get_instance().setFocus()
return
expression, offset, c_mult_factor = Settings.last_rename_expression, 0, 1
rename_dialog = RenameDialog(expression, offset,
last_rename_take_name_from_list=Settings.last_rename_take_name_from_list)
if not rename_dialog.accepted:
return
if rename_dialog.is_renaming_by_expression:
expression, offset, c_mult_factor = rename_dialog.result
else:
import csv
splitted_list = csv.reader([rename_dialog.list], doublequote=True, skipinitialspace=True,
delimiter=',').__next__()
if len(splitted_list) == 0:
return
try:
i = 0
for item in self.myModel.iterate_selected_items(skip_groups=True,
skip_childs_in_selected_groups=False):
name = item.name
if rename_dialog.is_renaming_by_expression:
name = rename(expression, name, float(offset) * float(c_mult_factor))
offset += 1
else:
try:
name = splitted_list[i].strip()
i += 1
except IndexError:
pass
item.name = name
self.redraw_spectra.emit()
self.update_view()
self.state_changed.emit()
Settings.last_rename_expression = expression
Settings.last_rename_take_name_from_list = not rename_dialog.is_renaming_by_expression
Settings.save()
except Exception as ex:
Logger.message(ex.__str__())
QMessageBox.warning(self, 'Error', ex.__str__(), QMessageBox.Ok)
def context_menu(self, pos):
"""Creates a context menu in a TreeWidget."""
# pos is local position on QTreeWidget
# cursor.pos() is position on screen
item = self.myModel.node_from_index(self.indexAt(pos))
# print(pos, item.text(0) if item is not None else "None")
menu = QMenu()
sel_it_menu = QMenu("With Selected Items")
check_selected_items = QAction("Check Items (Ctrl + Q)", self)
# check_selected_items.setShortcut(QKeySequence(Qt.Key_Control, Qt.Key_D))
sel_it_menu.addAction(check_selected_items)
check_selected_items.triggered.connect(self.check_selected_items)
uncheck_selected_items = sel_it_menu.addAction("Uncheck Items (Ctrl + W)")
uncheck_selected_items.triggered.connect(self.uncheck_selected_items)
select_every_nth = sel_it_menu.addAction("Select Every n-th Item (Ctrl + D)")
select_every_nth.triggered.connect(self.select_every_nth_item)
rename = sel_it_menu.addAction("Rename Items (Ctrl + R)")
rename.triggered.connect(self.rename)
sel_it_menu.addSeparator()
cut = sel_it_menu.addAction("Cut (Ctrl + T)")
cut.triggered.connect(self.cut)
baseline_correct = sel_it_menu.addAction("Baseline Correct (Ctrl + B)")
baseline_correct.triggered.connect(self.baseline_correct)
| |
self.title_email_5.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_5.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_5.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_5.setFont(font)
self.title_email_5.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_5.setObjectName("title_email_5")
self.text_site_5 = QtWidgets.QLabel(self.registro_5)
self.text_site_5.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_5.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_5.setText("")
self.text_site_5.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_5.setObjectName("text_site_5")
self.text_senha_5 = QtWidgets.QLabel(self.registro_5)
self.text_senha_5.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_5.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_5.setText("")
self.text_senha_5.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_5.setObjectName("text_senha_5")
self.text_email_5 = QtWidgets.QLabel(self.registro_5)
self.text_email_5.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_5.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_5.setText("")
self.text_email_5.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_5.setObjectName("text_email_5")
self.button_copiar_site_5 = QtWidgets.QPushButton(self.registro_5)
self.button_copiar_site_5.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_5.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_5.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_5.setStyleSheet("border-radius:1px")
self.button_copiar_site_5.setText("")
self.button_copiar_site_5.setObjectName("button_copiar_site_5")
self.button_deletar_registro_5 = QtWidgets.QPushButton(self.registro_5)
self.button_deletar_registro_5.setGeometry(QtCore.QRect(200, 2, 221, 27))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.button_deletar_registro_5.setFont(font)
self.button_deletar_registro_5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_deletar_registro_5.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
self.button_deletar_registro_5.setObjectName("button_deletar_registro_5")
self.qual_site_5 = QtWidgets.QLabel(self.registro_5)
self.qual_site_5.setGeometry(QtCore.QRect(522, 29, 32, 32))
self.qual_site_5.setText("")
self.qual_site_5.setObjectName("qual_site_5")
self.verticalLayout_3.addWidget(self.registro_5)
self.registro_6 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.registro_6.setMinimumSize(QtCore.QSize(621, 121))
self.registro_6.setMaximumSize(QtCore.QSize(100000, 10000))
self.registro_6.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
self.registro_6.setFrameShape(QtWidgets.QFrame.NoFrame)
self.registro_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.registro_6.setObjectName("registro_6")
self.title_site_6 = QtWidgets.QLabel(self.registro_6)
self.title_site_6.setGeometry(QtCore.QRect(0, 30, 80, 30))
self.title_site_6.setMinimumSize(QtCore.QSize(0, 30))
self.title_site_6.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_site_6.setFont(font)
self.title_site_6.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_site_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_site_6.setObjectName("title_site_6")
self.title_senha_6 = QtWidgets.QLabel(self.registro_6)
self.title_senha_6.setGeometry(QtCore.QRect(0, 60, 80, 30))
self.title_senha_6.setMinimumSize(QtCore.QSize(0, 30))
self.title_senha_6.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_senha_6.setFont(font)
self.title_senha_6.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_senha_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_senha_6.setObjectName("title_senha_6")
self.title_email_6 = QtWidgets.QLabel(self.registro_6)
self.title_email_6.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_6.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_6.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_6.setFont(font)
self.title_email_6.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_6.setObjectName("title_email_6")
self.text_site_6 = QtWidgets.QLabel(self.registro_6)
self.text_site_6.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_6.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_6.setText("")
self.text_site_6.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_6.setObjectName("text_site_6")
self.text_senha_6 = QtWidgets.QLabel(self.registro_6)
self.text_senha_6.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_6.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_6.setText("")
self.text_senha_6.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_6.setObjectName("text_senha_6")
self.text_email_6 = QtWidgets.QLabel(self.registro_6)
self.text_email_6.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_6.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_6.setText("")
self.text_email_6.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_6.setObjectName("text_email_6")
self.button_copiar_site_6 = QtWidgets.QPushButton(self.registro_6)
self.button_copiar_site_6.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_6.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_6.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_6.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_6.setStyleSheet("border-radius:1px")
self.button_copiar_site_6.setText("")
self.button_copiar_site_6.setObjectName("button_copiar_site_6")
self.button_deletar_registro_6 = QtWidgets.QPushButton(self.registro_6)
self.button_deletar_registro_6.setGeometry(QtCore.QRect(200, 2, 221, 27))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.button_deletar_registro_6.setFont(font)
self.button_deletar_registro_6.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_deletar_registro_6.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
self.button_deletar_registro_6.setObjectName("button_deletar_registro_6")
self.qual_site_6 = QtWidgets.QLabel(self.registro_6)
self.qual_site_6.setGeometry(QtCore.QRect(522, 29, 32, 32))
self.qual_site_6.setText("")
self.qual_site_6.setObjectName("qual_site_6")
self.verticalLayout_3.addWidget(self.registro_6)
self.registro_7 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.registro_7.setMinimumSize(QtCore.QSize(621, 121))
self.registro_7.setMaximumSize(QtCore.QSize(100000, 10000))
self.registro_7.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
self.registro_7.setFrameShape(QtWidgets.QFrame.NoFrame)
self.registro_7.setFrameShadow(QtWidgets.QFrame.Raised)
self.registro_7.setObjectName("registro_7")
self.title_site_7 = QtWidgets.QLabel(self.registro_7)
self.title_site_7.setGeometry(QtCore.QRect(0, 30, 80, 30))
self.title_site_7.setMinimumSize(QtCore.QSize(0, 30))
self.title_site_7.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_site_7.setFont(font)
self.title_site_7.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_site_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_site_7.setObjectName("title_site_7")
self.title_senha_7 = QtWidgets.QLabel(self.registro_7)
self.title_senha_7.setGeometry(QtCore.QRect(0, 60, 80, 30))
self.title_senha_7.setMinimumSize(QtCore.QSize(0, 30))
self.title_senha_7.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_senha_7.setFont(font)
self.title_senha_7.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_senha_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_senha_7.setObjectName("title_senha_7")
self.title_email_7 = QtWidgets.QLabel(self.registro_7)
self.title_email_7.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_7.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_7.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_7.setFont(font)
self.title_email_7.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_7.setObjectName("title_email_7")
self.text_site_7 = QtWidgets.QLabel(self.registro_7)
self.text_site_7.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_7.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_7.setText("")
self.text_site_7.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_7.setObjectName("text_site_7")
self.text_senha_7 = QtWidgets.QLabel(self.registro_7)
self.text_senha_7.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_7.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_7.setText("")
self.text_senha_7.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_7.setObjectName("text_senha_7")
self.text_email_7 = QtWidgets.QLabel(self.registro_7)
self.text_email_7.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_7.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_7.setText("")
self.text_email_7.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_7.setObjectName("text_email_7")
self.button_copiar_site_7 = QtWidgets.QPushButton(self.registro_7)
self.button_copiar_site_7.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_7.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_7.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_7.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_7.setStyleSheet("border-radius:1px")
self.button_copiar_site_7.setText("")
self.button_copiar_site_7.setObjectName("button_copiar_site_7")
self.button_deletar_registro_7 = QtWidgets.QPushButton(self.registro_7)
self.button_deletar_registro_7.setGeometry(QtCore.QRect(200, 2, 221, 27))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.button_deletar_registro_7.setFont(font)
self.button_deletar_registro_7.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_deletar_registro_7.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
self.button_deletar_registro_7.setObjectName("button_deletar_registro_7")
self.qual_site_7 = QtWidgets.QLabel(self.registro_7)
self.qual_site_7.setGeometry(QtCore.QRect(522, 29, 32, 32))
self.qual_site_7.setText("")
self.qual_site_7.setObjectName("qual_site_7")
self.verticalLayout_3.addWidget(self.registro_7)
self.registro_8 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.registro_8.setMinimumSize(QtCore.QSize(621, 121))
self.registro_8.setMaximumSize(QtCore.QSize(100000, 10000))
self.registro_8.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
self.registro_8.setFrameShape(QtWidgets.QFrame.NoFrame)
self.registro_8.setFrameShadow(QtWidgets.QFrame.Raised)
self.registro_8.setObjectName("registro_8")
self.title_site_8 = QtWidgets.QLabel(self.registro_8)
self.title_site_8.setGeometry(QtCore.QRect(0, 30, 80, 30))
self.title_site_8.setMinimumSize(QtCore.QSize(0, 30))
self.title_site_8.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_site_8.setFont(font)
self.title_site_8.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_site_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_site_8.setObjectName("title_site_8")
self.title_senha_8 = QtWidgets.QLabel(self.registro_8)
self.title_senha_8.setGeometry(QtCore.QRect(0, 60, 80, 30))
self.title_senha_8.setMinimumSize(QtCore.QSize(0, 30))
self.title_senha_8.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_senha_8.setFont(font)
self.title_senha_8.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_senha_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_senha_8.setObjectName("title_senha_8")
self.title_email_8 = QtWidgets.QLabel(self.registro_8)
self.title_email_8.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_8.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_8.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_8.setFont(font)
self.title_email_8.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_8.setObjectName("title_email_8")
self.text_site_8 = QtWidgets.QLabel(self.registro_8)
self.text_site_8.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_8.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_8.setText("")
self.text_site_8.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_8.setObjectName("text_site_8")
self.text_senha_8 = QtWidgets.QLabel(self.registro_8)
self.text_senha_8.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_8.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_8.setText("")
self.text_senha_8.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_8.setObjectName("text_senha_8")
self.text_email_8 = QtWidgets.QLabel(self.registro_8)
self.text_email_8.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_8.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_8.setText("")
self.text_email_8.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_8.setObjectName("text_email_8")
self.button_copiar_site_8 = QtWidgets.QPushButton(self.registro_8)
self.button_copiar_site_8.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_8.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_8.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_8.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_8.setStyleSheet("border-radius:1px")
self.button_copiar_site_8.setText("")
self.button_copiar_site_8.setObjectName("button_copiar_site_8")
self.button_deletar_registro_8 = QtWidgets.QPushButton(self.registro_8)
self.button_deletar_registro_8.setGeometry(QtCore.QRect(200, 2, 221, 27))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.button_deletar_registro_8.setFont(font)
self.button_deletar_registro_8.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_deletar_registro_8.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
self.button_deletar_registro_8.setObjectName("button_deletar_registro_8")
self.qual_site_8 = QtWidgets.QLabel(self.registro_8)
self.qual_site_8.setGeometry(QtCore.QRect(522, 29, 32, 32))
self.qual_site_8.setText("")
self.qual_site_8.setObjectName("qual_site_8")
self.verticalLayout_3.addWidget(self.registro_8)
self.registro_9 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.registro_9.setMinimumSize(QtCore.QSize(621, 121))
self.registro_9.setMaximumSize(QtCore.QSize(100000, 10000))
self.registro_9.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
self.registro_9.setFrameShape(QtWidgets.QFrame.NoFrame)
self.registro_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.registro_9.setObjectName("registro_9")
self.title_site_9 = QtWidgets.QLabel(self.registro_9)
self.title_site_9.setGeometry(QtCore.QRect(0, 30, 80, 30))
self.title_site_9.setMinimumSize(QtCore.QSize(0, 30))
self.title_site_9.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_site_9.setFont(font)
self.title_site_9.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_site_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_site_9.setObjectName("title_site_9")
self.title_senha_9 = QtWidgets.QLabel(self.registro_9)
self.title_senha_9.setGeometry(QtCore.QRect(0, 60, 80, 30))
self.title_senha_9.setMinimumSize(QtCore.QSize(0, 30))
self.title_senha_9.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_senha_9.setFont(font)
self.title_senha_9.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_senha_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_senha_9.setObjectName("title_senha_9")
self.title_email_9 = QtWidgets.QLabel(self.registro_9)
self.title_email_9.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_9.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_9.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_9.setFont(font)
self.title_email_9.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_9.setObjectName("title_email_9")
self.text_site_9 = QtWidgets.QLabel(self.registro_9)
self.text_site_9.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_9.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_9.setText("")
self.text_site_9.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_9.setObjectName("text_site_9")
self.text_senha_9 = QtWidgets.QLabel(self.registro_9)
self.text_senha_9.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_9.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_9.setText("")
self.text_senha_9.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_9.setObjectName("text_senha_9")
self.text_email_9 = QtWidgets.QLabel(self.registro_9)
self.text_email_9.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_9.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_9.setText("")
self.text_email_9.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_9.setObjectName("text_email_9")
self.button_copiar_site_9 = QtWidgets.QPushButton(self.registro_9)
self.button_copiar_site_9.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_9.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_9.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_9.setStyleSheet("border-radius:1px")
self.button_copiar_site_9.setText("")
self.button_copiar_site_9.setObjectName("button_copiar_site_9")
self.button_copiar_senha_9 = QtWidgets.QPushButton(self.registro_9)
self.button_copiar_senha_9.setGeometry(QtCore.QRect(580, 29, 32, 32))
self.button_copiar_senha_9.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_senha_9.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_senha_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_senha_9.setStyleSheet("border-radius:1px")
self.button_copiar_senha_9.setText("")
self.button_copiar_senha_9.setObjectName("button_copiar_senha_9")
self.button_deletar_registro_9 = QtWidgets.QPushButton(self.registro_9)
self.button_deletar_registro_9.setGeometry(QtCore.QRect(200, 2, 221, 27))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.button_deletar_registro_9.setFont(font)
self.button_deletar_registro_9.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_deletar_registro_9.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
self.button_deletar_registro_9.setObjectName("button_deletar_registro_9")
self.qual_site_9 = QtWidgets.QLabel(self.registro_9)
self.qual_site_9.setGeometry(QtCore.QRect(522, 29, 32, 32))
self.qual_site_9.setText("")
self.qual_site_9.setObjectName("qual_site_9")
self.verticalLayout_3.addWidget(self.registro_9)
self.registro_10 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.registro_10.setMinimumSize(QtCore.QSize(621, 121))
self.registro_10.setMaximumSize(QtCore.QSize(100000, 10000))
self.registro_10.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
self.registro_10.setFrameShape(QtWidgets.QFrame.NoFrame)
self.registro_10.setFrameShadow(QtWidgets.QFrame.Raised)
self.registro_10.setObjectName("registro_10")
self.title_site_10 = QtWidgets.QLabel(self.registro_10)
self.title_site_10.setGeometry(QtCore.QRect(0, 30, 80, 30))
self.title_site_10.setMinimumSize(QtCore.QSize(0, 30))
self.title_site_10.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_site_10.setFont(font)
self.title_site_10.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_site_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_site_10.setObjectName("title_site_10")
self.title_senha_10 = QtWidgets.QLabel(self.registro_10)
self.title_senha_10.setGeometry(QtCore.QRect(0, 60, 80, 30))
self.title_senha_10.setMinimumSize(QtCore.QSize(0, 30))
self.title_senha_10.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_senha_10.setFont(font)
self.title_senha_10.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_senha_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_senha_10.setObjectName("title_senha_10")
self.title_email_10 = QtWidgets.QLabel(self.registro_10)
self.title_email_10.setGeometry(QtCore.QRect(0, 90, 80, 30))
self.title_email_10.setMinimumSize(QtCore.QSize(0, 30))
self.title_email_10.setMaximumSize(QtCore.QSize(10000, 30))
font = QtGui.QFont()
font.setFamily("Segoe UI Black")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.title_email_10.setFont(font)
self.title_email_10.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
self.title_email_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.title_email_10.setObjectName("title_email_10")
self.text_site_10 = QtWidgets.QLabel(self.registro_10)
self.text_site_10.setGeometry(QtCore.QRect(80, 30, 441, 30))
self.text_site_10.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_site_10.setText("")
self.text_site_10.setAlignment(QtCore.Qt.AlignCenter)
self.text_site_10.setObjectName("text_site_10")
self.text_senha_10 = QtWidgets.QLabel(self.registro_10)
self.text_senha_10.setGeometry(QtCore.QRect(80, 60, 441, 30))
self.text_senha_10.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_senha_10.setText("")
self.text_senha_10.setAlignment(QtCore.Qt.AlignCenter)
self.text_senha_10.setObjectName("text_senha_10")
self.text_email_10 = QtWidgets.QLabel(self.registro_10)
self.text_email_10.setGeometry(QtCore.QRect(80, 90, 441, 30))
self.text_email_10.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
self.text_email_10.setText("")
self.text_email_10.setAlignment(QtCore.Qt.AlignCenter)
self.text_email_10.setObjectName("text_email_10")
self.button_copiar_site_10 = QtWidgets.QPushButton(self.registro_10)
self.button_copiar_site_10.setGeometry(QtCore.QRect(580, 60, 32, 32))
self.button_copiar_site_10.setMinimumSize(QtCore.QSize(0, 32))
self.button_copiar_site_10.setMaximumSize(QtCore.QSize(32, 32))
self.button_copiar_site_10.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.button_copiar_site_10.setStyleSheet("border-radius:1px")
self.button_copiar_site_10.setText("")
self.button_copiar_site_10.setObjectName("button_copiar_site_10")
self.button_deletar_registro_10 = QtWidgets.QPushButton(self.registro_10)
| |
trp_player = 0
trp_multiplayer_profile_troop_male = 1
trp_multiplayer_profile_troop_female = 2
trp_temp_troop = 3
trp_find_item_cheat = 4
trp_random_town_sequence = 5
trp_tournament_participants = 6
trp_tutorial_maceman = 7
trp_tutorial_archer = 8
trp_tutorial_swordsman = 9
trp_novice_fighter = 10
trp_regular_fighter = 11
trp_veteran_fighter = 12
trp_champion_fighter = 13
trp_arena_training_fighter_1 = 14
trp_arena_training_fighter_2 = 15
trp_arena_training_fighter_3 = 16
trp_arena_training_fighter_4 = 17
trp_arena_training_fighter_5 = 18
trp_arena_training_fighter_6 = 19
trp_arena_training_fighter_7 = 20
trp_arena_training_fighter_8 = 21
trp_arena_training_fighter_9 = 22
trp_arena_training_fighter_10 = 23
trp_cattle = 24
trp_farmer = 25
trp_townsman = 26
trp_watchman = 27
trp_caravan_guard = 28
trp_mercenary_swordsman = 29
trp_hired_blade = 30
trp_mercenary_crossbowman = 31
trp_mercenary_horseman = 32
trp_mercenary_cavalry = 33
trp_mercenaries_end = 34
trp_swadian_recruit = 35
trp_swadian_militia = 36
trp_swadian_footman = 37
trp_swadian_infantry = 38
trp_swadian_sergeant = 39
trp_swadian_skirmisher = 40
trp_swadian_crossbowman = 41
trp_swadian_sharpshooter = 42
trp_swadian_man_at_arms = 43
trp_swadian_knight = 44
trp_swadian_messenger = 45
trp_swadian_deserter = 46
trp_swadian_prison_guard = 47
trp_swadian_castle_guard = 48
trp_vaegir_recruit = 49
trp_vaegir_footman = 50
trp_vaegir_skirmisher = 51
trp_vaegir_archer = 52
trp_vaegir_marksman = 53
trp_vaegir_veteran = 54
trp_vaegir_infantry = 55
trp_vaegir_guard = 56
trp_vaegir_horseman = 57
trp_vaegir_knight = 58
trp_vaegir_messenger = 59
trp_vaegir_deserter = 60
trp_vaegir_prison_guard = 61
trp_vaegir_castle_guard = 62
trp_khergit_tribesman = 63
trp_khergit_skirmisher = 64
trp_khergit_horseman = 65
trp_khergit_horse_archer = 66
trp_khergit_veteran_horse_archer = 67
trp_khergit_lancer = 68
trp_khergit_messenger = 69
trp_khergit_deserter = 70
trp_khergit_prison_guard = 71
trp_khergit_castle_guard = 72
trp_nord_recruit = 73
trp_nord_footman = 74
trp_nord_trained_footman = 75
trp_nord_warrior = 76
trp_nord_veteran = 77
trp_nord_champion = 78
trp_nord_huntsman = 79
trp_nord_archer = 80
trp_nord_veteran_archer = 81
trp_nord_messenger = 82
trp_nord_deserter = 83
trp_nord_prison_guard = 84
trp_nord_castle_guard = 85
trp_rhodok_tribesman = 86
trp_rhodok_spearman = 87
trp_rhodok_trained_spearman = 88
trp_rhodok_veteran_spearman = 89
trp_rhodok_sergeant = 90
trp_rhodok_crossbowman = 91
trp_rhodok_trained_crossbowman = 92
trp_rhodok_veteran_crossbowman = 93
trp_rhodok_sharpshooter = 94
trp_rhodok_messenger = 95
trp_rhodok_deserter = 96
trp_rhodok_prison_guard = 97
trp_rhodok_castle_guard = 98
trp_sarranid_recruit = 99
trp_sarranid_footman = 100
trp_sarranid_veteran_footman = 101
trp_sarranid_infantry = 102
trp_sarranid_guard = 103
trp_sarranid_skirmisher = 104
trp_sarranid_archer = 105
trp_sarranid_master_archer = 106
trp_sarranid_horseman = 107
trp_sarranid_mamluke = 108
trp_sarranid_messenger = 109
trp_sarranid_deserter = 110
trp_sarranid_prison_guard = 111
trp_sarranid_castle_guard = 112
trp_looter = 113
trp_bandit = 114
trp_brigand = 115
trp_mountain_bandit = 116
trp_forest_bandit = 117
trp_sea_raider = 118
trp_steppe_bandit = 119
trp_taiga_bandit = 120
trp_desert_bandit = 121
trp_black_khergit_horseman = 122
trp_manhunter = 123
trp_slave_driver = 124
trp_slave_hunter = 125
trp_slave_crusher = 126
trp_slaver_chief = 127
trp_follower_woman = 128
trp_hunter_woman = 129
trp_fighter_woman = 130
trp_sword_sister = 131
trp_refugee = 132
trp_peasant_woman = 133
trp_caravan_master = 134
trp_kidnapped_girl = 135
trp_town_walker_1 = 136
trp_town_walker_2 = 137
trp_khergit_townsman = 138
trp_khergit_townswoman = 139
trp_sarranid_townsman = 140
trp_sarranid_townswoman = 141
trp_village_walker_1 = 142
trp_village_walker_2 = 143
trp_spy_walker_1 = 144
trp_spy_walker_2 = 145
trp_tournament_master = 146
trp_trainer = 147
trp_constable_hareck = 148
trp_ramun_the_slave_trader = 149
trp_guide = 150
trp_xerina = 151
trp_dranton = 152
trp_kradus = 153
trp_tutorial_trainer = 154
trp_tutorial_student_1 = 155
trp_tutorial_student_2 = 156
trp_tutorial_student_3 = 157
trp_tutorial_student_4 = 158
trp_galeas = 159
trp_farmer_from_bandit_village = 160
trp_trainer_1 = 161
trp_trainer_2 = 162
trp_trainer_3 = 163
trp_trainer_4 = 164
trp_trainer_5 = 165
trp_ransom_broker_1 = 166
trp_ransom_broker_2 = 167
trp_ransom_broker_3 = 168
trp_ransom_broker_4 = 169
trp_ransom_broker_5 = 170
trp_ransom_broker_6 = 171
trp_ransom_broker_7 = 172
trp_ransom_broker_8 = 173
trp_ransom_broker_9 = 174
trp_ransom_broker_10 = 175
trp_tavern_traveler_1 = 176
trp_tavern_traveler_2 = 177
trp_tavern_traveler_3 = 178
trp_tavern_traveler_4 = 179
trp_tavern_traveler_5 = 180
trp_tavern_traveler_6 = 181
trp_tavern_traveler_7 = 182
trp_tavern_traveler_8 = 183
trp_tavern_traveler_9 = 184
trp_tavern_traveler_10 = 185
trp_tavern_bookseller_1 = 186
trp_tavern_bookseller_2 = 187
trp_tavern_minstrel_1 = 188
trp_tavern_minstrel_2 = 189
trp_tavern_minstrel_3 = 190
trp_tavern_minstrel_4 = 191
trp_tavern_minstrel_5 = 192
trp_kingdom_heroes_including_player_begin = 193
trp_npc1 = 194
trp_npc2 = 195
trp_npc3 = 196
trp_npc4 = 197
trp_npc5 = 198
trp_npc6 = 199
trp_npc7 = 200
trp_npc8 = 201
trp_npc9 = 202
trp_npc10 = 203
trp_npc11 = 204
trp_npc12 = 205
trp_npc13 = 206
trp_npc14 = 207
trp_npc15 = 208
trp_npc16 = 209
trp_kingdom_1_lord = 210
trp_kingdom_2_lord = 211
trp_kingdom_3_lord = 212
trp_kingdom_4_lord = 213
trp_kingdom_5_lord = 214
trp_kingdom_6_lord = 215
trp_knight_1_1 = 216
trp_knight_1_2 = 217
trp_knight_1_3 = 218
trp_knight_1_4 = 219
trp_knight_1_5 = 220
trp_knight_1_6 = 221
trp_knight_1_7 = 222
trp_knight_1_8 = 223
trp_knight_1_9 = 224
trp_knight_1_10 = 225
trp_knight_1_11 = 226
trp_knight_1_12 = 227
trp_knight_1_13 = 228
trp_knight_1_14 = 229
trp_knight_1_15 = 230
trp_knight_1_16 = 231
trp_knight_1_17 = 232
trp_knight_1_18 = 233
trp_knight_1_19 = 234
trp_knight_1_20 = 235
trp_knight_2_1 = 236
trp_knight_2_2 = 237
trp_knight_2_3 = 238
trp_knight_2_4 = 239
trp_knight_2_5 = 240
trp_knight_2_6 = 241
trp_knight_2_7 = 242
trp_knight_2_8 = 243
trp_knight_2_9 = 244
trp_knight_2_10 = 245
trp_knight_2_11 = 246
trp_knight_2_12 = 247
trp_knight_2_13 = 248
trp_knight_2_14 = 249
trp_knight_2_15 = 250
trp_knight_2_16 = 251
trp_knight_2_17 = 252
trp_knight_2_18 = 253
trp_knight_2_19 = 254
trp_knight_2_20 = 255
trp_knight_3_1 = 256
trp_knight_3_2 = 257
trp_knight_3_3 = 258
trp_knight_3_4 = 259
trp_knight_3_5 = 260
trp_knight_3_6 = 261
trp_knight_3_7 = 262
trp_knight_3_8 = 263
trp_knight_3_9 = 264
trp_knight_3_10 = 265
trp_knight_3_11 = 266
trp_knight_3_12 = 267
trp_knight_3_13 = 268
trp_knight_3_14 = 269
trp_knight_3_15 = 270
trp_knight_3_16 = 271
trp_knight_3_17 = 272
trp_knight_3_18 = 273
trp_knight_3_19 = 274
trp_knight_3_20 = 275
trp_knight_4_1 = 276
trp_knight_4_2 = 277
trp_knight_4_3 = 278
trp_knight_4_4 = 279
trp_knight_4_5 = 280
trp_knight_4_6 = 281
trp_knight_4_7 = 282
trp_knight_4_8 = 283
trp_knight_4_9 = 284
trp_knight_4_10 = 285
trp_knight_4_11 = 286
trp_knight_4_12 = 287
trp_knight_4_13 = 288
trp_knight_4_14 = 289
trp_knight_4_15 = 290
trp_knight_4_16 = 291
trp_knight_4_17 = 292
trp_knight_4_18 = 293
trp_knight_4_19 = 294
trp_knight_4_20 = 295
trp_knight_5_1 = 296
trp_knight_5_2 = 297
trp_knight_5_3 = 298
trp_knight_5_4 = 299
trp_knight_5_5 = 300
trp_knight_5_6 = 301
trp_knight_5_7 = 302
trp_knight_5_8 = 303
trp_knight_5_9 = 304
trp_knight_5_10 = 305
trp_knight_5_11 = 306
trp_knight_5_12 = 307
trp_knight_5_13 = 308
trp_knight_5_14 = 309
trp_knight_5_15 = 310
trp_knight_5_16 = 311
trp_knight_5_17 = 312
trp_knight_5_18 = 313
trp_knight_5_19 = 314
trp_knight_5_20 = 315
trp_knight_6_1 = 316
trp_knight_6_2 = 317
trp_knight_6_3 = 318
trp_knight_6_4 = 319
trp_knight_6_5 = 320
trp_knight_6_6 = 321
trp_knight_6_7 = 322
trp_knight_6_8 = 323
trp_knight_6_9 = 324
trp_knight_6_10 = 325
trp_knight_6_11 = 326
trp_knight_6_12 = 327
trp_knight_6_13 = 328
trp_knight_6_14 = 329
trp_knight_6_15 = 330
trp_knight_6_16 = 331
trp_knight_6_17 = 332
trp_knight_6_18 = 333
trp_knight_6_19 = 334
trp_knight_6_20 = 335
trp_kingdom_1_pretender = 336
trp_kingdom_2_pretender = 337
trp_kingdom_3_pretender = 338
trp_kingdom_4_pretender = 339
trp_kingdom_5_pretender = 340
trp_kingdom_6_pretender = 341
trp_knight_1_1_wife = 342
trp_kingdom_1_lady_1 = 343
trp_kingdom_1_lady_2 = 344
trp_knight_1_lady_3 = 345
trp_knight_1_lady_4 = 346
trp_kingdom_l_lady_5 = 347
trp_kingdom_1_lady_6 = 348
trp_kingdom_1_lady_7 = 349
trp_kingdom_1_lady_8 = 350
trp_kingdom_1_lady_9 = 351
trp_kingdom_1_lady_10 = 352
trp_kingdom_1_lady_11 = 353
trp_kingdom_1_lady_12 = 354
trp_kingdom_l_lady_13 = 355
trp_kingdom_1_lady_14 = 356
trp_kingdom_1_lady_15 = 357
trp_kingdom_1_lady_16 = 358
trp_kingdom_1_lady_17 = 359
trp_kingdom_1_lady_18 = 360
trp_kingdom_1_lady_19 = 361
trp_kingdom_1_lady_20 = 362
trp_kingdom_2_lady_1 = 363
trp_kingdom_2_lady_2 = 364
trp_kingdom_2_lady_3 = 365
trp_kingdom_2_lady_4 = 366
trp_kingdom_2_lady_5 = 367
trp_kingdom_2_lady_6 = 368
trp_kingdom_2_lady_7 = 369
trp_kingdom_2_lady_8 = 370
trp_kingdom_2_lady_9 = 371
trp_kingdom_2_lady_10 = 372
trp_kingdom_2_lady_11 = 373
trp_kingdom_2_lady_12 = 374
trp_kingdom_2_lady_13 = 375
trp_kingdom_2_lady_14 = 376
trp_kingdom_2_lady_15 = 377
trp_kingdom_2_lady_16 = 378
trp_kingdom_2_lady_17 = 379
trp_kingdom_2_lady_18 = 380
trp_kingdom_2_lady_19 = 381
trp_kingdom_2_lady_20 = 382
trp_kingdom_3_lady_1 = 383
trp_kingdom_3_lady_2 = 384
trp_kingdom_3_lady_3 = 385
trp_kingdom_3_lady_4 = 386
trp_kingdom_3_lady_5 = 387
trp_kingdom_3_lady_6 = 388
trp_kingdom_3_lady_7 = 389
trp_kingdom_3_lady_8 = 390
trp_kingdom_3_lady_9 = 391
trp_kingdom_3_lady_10 = 392
trp_kingdom_3_lady_11 = 393
trp_kingdom_3_lady_12 = 394
trp_kingdom_3_lady_13 = 395
trp_kingdom_3_lady_14 = 396
trp_kingdom_3_lady_15 = 397
trp_kingdom_3_lady_16 = 398
trp_kingdom_3_lady_17 = 399
trp_kingdom_3_lady_18 = 400
trp_kingdom_3_lady_19 = 401
trp_kingdom_3_lady_20 = 402
trp_kingdom_4_lady_1 = 403
trp_kingdom_4_lady_2 = 404
trp_kingdom_4_lady_3 = 405
trp_kingdom_4_lady_4 = 406
trp_kingdom_4_lady_5 = 407
trp_kingdom_4_lady_6 = 408
trp_kingdom_4_lady_7 = 409
trp_knight_4_2b_daughter_1 = 410
trp_kingdom_4_lady_9 = 411
trp_knight_4_2c_wife_1 = 412
trp_kingdom_4_lady_11 = 413
trp_knight_4_2c_daughter = 414
trp_knight_4_1b_wife = 415
trp_kingdom_4_lady_14 = 416
trp_knight_4_1b_daughter = 417
trp_knight_4_2b_daughter_2 = 418
trp_kingdom_4_lady_17 = 419
trp_knight_4_2c_wife_2 = 420
trp_knight_4_1c_daughter = 421
trp_kingdom_4_lady_20 = 422
trp_kingdom_5_lady_1 = 423
trp_kingdom_5_lady_2 = 424
trp_kingdom_5_lady_3 = 425
trp_kingdom_5_lady_4 = 426
trp_kingdom_5_5_wife = 427
trp_kingdom_5_2b_wife_1 = 428
trp_kingdom_5_1c_daughter_1 = 429
trp_kingdom_5_2c_daughter_1 = 430
trp_kingdom_5_1c_wife_1 = 431
trp_kingdom_5_2c_wife_1 = 432
trp_kingdom_5_1c_daughter_2 = 433
trp_kingdom_5_2c_daughter_2 = 434
trp_kingdom_5_1b_wife = 435
trp_kingdom_5_2b_wife_2 = 436
trp_kingdom_5_1c_daughter_3 = 437
trp_kingdom_5_lady_16 = 438
trp_kingdom_5_1c_wife_2 = 439
trp_kingdom_5_2c_wife_2 = 440
trp_kingdom_5_1c_daughter_4 = 441
trp_kingdom_5_lady_20 = 442
trp_kingdom_6_lady_1 = 443
trp_kingdom_6_lady_2 = 444
trp_kingdom_6_lady_3 = 445
trp_kingdom_6_lady_4 = 446
trp_kingdom_6_lady_5 = 447
trp_kingdom_6_lady_6 = 448
trp_kingdom_6_lady_7 = 449
trp_kingdom_6_lady_8 = 450
trp_kingdom_6_lady_9 = 451
trp_kingdom_6_lady_10 = 452
trp_kingdom_6_lady_11 = 453
trp_kingdom_6_lady_12 = 454
trp_kingdom_6_lady_13 = 455
trp_kingdom_6_lady_14 = 456
trp_kingdom_6_lady_15 = 457
trp_kingdom_6_lady_16 = 458
trp_kingdom_6_lady_17 = 459
trp_kingdom_6_lady_18 = 460
trp_kingdom_6_lady_19 = 461
trp_kingdom_6_lady_20 = 462
trp_heroes_end = 463
trp_town_1_seneschal = 464
trp_town_2_seneschal = 465
trp_town_3_seneschal = 466
trp_town_4_seneschal = 467
trp_town_5_seneschal = 468
trp_town_6_seneschal = 469
trp_town_7_seneschal = 470
trp_town_8_seneschal = 471
trp_town_9_seneschal = 472
trp_town_10_seneschal = 473
trp_town_11_seneschal = 474
trp_town_12_seneschal = 475
trp_town_13_seneschal = 476
trp_town_14_seneschal = 477
trp_town_15_seneschal = 478
trp_town_16_seneschal = 479
trp_town_17_seneschal = 480
trp_town_18_seneschal = 481
trp_town_19_seneschal = 482
trp_town_20_seneschal = 483
trp_town_21_seneschal = 484
trp_town_22_seneschal = 485
trp_castle_1_seneschal = 486
trp_castle_2_seneschal = 487
trp_castle_3_seneschal = 488
trp_castle_4_seneschal = 489
trp_castle_5_seneschal = 490
trp_castle_6_seneschal = 491
trp_castle_7_seneschal = 492
trp_castle_8_seneschal = 493
trp_castle_9_seneschal = 494
trp_castle_10_seneschal = 495
trp_castle_11_seneschal = 496
trp_castle_12_seneschal = 497
trp_castle_13_seneschal = 498
trp_castle_14_seneschal = 499
trp_castle_15_seneschal = 500
trp_castle_16_seneschal = 501
trp_castle_17_seneschal = 502
trp_castle_18_seneschal = 503
trp_castle_19_seneschal = 504
trp_castle_20_seneschal = 505
trp_castle_21_seneschal = 506
trp_castle_22_seneschal = 507
trp_castle_23_seneschal = 508
trp_castle_24_seneschal = 509
trp_castle_25_seneschal = 510
trp_castle_26_seneschal = 511
trp_castle_27_seneschal = 512
trp_castle_28_seneschal = 513
trp_castle_29_seneschal = 514
trp_castle_30_seneschal = 515
trp_castle_31_seneschal = 516
trp_castle_32_seneschal = 517
trp_castle_33_seneschal = 518
trp_castle_34_seneschal = 519
trp_castle_35_seneschal = 520
trp_castle_36_seneschal = 521
trp_castle_37_seneschal = 522
trp_castle_38_seneschal = 523
trp_castle_39_seneschal = 524
trp_castle_40_seneschal = 525
trp_castle_41_seneschal = 526
trp_castle_42_seneschal = 527
trp_castle_43_seneschal = 528
trp_castle_44_seneschal = 529
trp_castle_45_seneschal = 530
trp_castle_46_seneschal = 531
trp_castle_47_seneschal = 532
trp_castle_48_seneschal = 533
trp_town_1_arena_master = 534
trp_town_2_arena_master = 535
trp_town_3_arena_master = 536
trp_town_4_arena_master = 537
trp_town_5_arena_master = 538
trp_town_6_arena_master = 539
trp_town_7_arena_master = 540
trp_town_8_arena_master = 541
trp_town_9_arena_master = 542
trp_town_10_arena_master = 543
trp_town_11_arena_master = 544
trp_town_12_arena_master = 545
trp_town_13_arena_master = 546
trp_town_14_arena_master = 547
trp_town_15_arena_master = 548
trp_town_16_arena_master = 549
trp_town_17_arena_master = 550
trp_town_18_arena_master = 551
trp_town_19_arena_master = 552
trp_town_20_arena_master = 553
trp_town_21_arena_master = 554
trp_town_22_arena_master = 555
trp_town_1_armorer = 556
trp_town_2_armorer = 557
trp_town_3_armorer = 558
trp_town_4_armorer = 559
trp_town_5_armorer = 560
trp_town_6_armorer = 561
trp_town_7_armorer = 562
trp_town_8_armorer = 563
trp_town_9_armorer = 564
trp_town_10_armorer = 565
trp_town_11_armorer = 566
trp_town_12_armorer = 567
trp_town_13_armorer = 568
trp_town_14_armorer = 569
trp_town_15_armorer = 570
trp_town_16_armorer = 571
trp_town_17_armorer = 572
trp_town_18_armorer = 573
trp_town_19_armorer = 574
trp_town_20_armorer = 575
trp_town_21_armorer = 576
trp_town_22_armorer = 577
trp_town_1_weaponsmith = 578
trp_town_2_weaponsmith = 579
trp_town_3_weaponsmith = 580
trp_town_4_weaponsmith = 581
trp_town_5_weaponsmith = 582
trp_town_6_weaponsmith = 583
trp_town_7_weaponsmith = 584
trp_town_8_weaponsmith = 585
trp_town_9_weaponsmith = 586
trp_town_10_weaponsmith = 587
trp_town_11_weaponsmith = 588
trp_town_12_weaponsmith = 589
trp_town_13_weaponsmith = 590
trp_town_14_weaponsmith = 591
trp_town_15_weaponsmith = 592
trp_town_16_weaponsmith = 593
trp_town_17_weaponsmith = 594
trp_town_18_weaponsmith = 595
trp_town_19_weaponsmith = 596
trp_town_20_weaponsmith = 597
trp_town_21_weaponsmith = 598
trp_town_22_weaponsmith = 599
trp_town_1_tavernkeeper = 600
trp_town_2_tavernkeeper = 601
trp_town_3_tavernkeeper = 602
trp_town_4_tavernkeeper = 603
trp_town_5_tavernkeeper = 604
trp_town_6_tavernkeeper = 605
trp_town_7_tavernkeeper = 606
trp_town_8_tavernkeeper = 607
trp_town_9_tavernkeeper = 608
trp_town_10_tavernkeeper = 609
trp_town_11_tavernkeeper = 610
trp_town_12_tavernkeeper = 611
trp_town_13_tavernkeeper = 612
trp_town_14_tavernkeeper = 613
trp_town_15_tavernkeeper = 614
trp_town_16_tavernkeeper = 615
trp_town_17_tavernkeeper = 616
trp_town_18_tavernkeeper = 617
trp_town_19_tavernkeeper = 618
trp_town_20_tavernkeeper = 619
trp_town_21_tavernkeeper = 620
trp_town_22_tavernkeeper = 621
trp_town_1_merchant = 622
trp_town_2_merchant = 623
trp_town_3_merchant = 624
trp_town_4_merchant = 625
trp_town_5_merchant = 626
trp_town_6_merchant = 627
trp_town_7_merchant = 628
trp_town_8_merchant = 629
trp_town_9_merchant = 630
trp_town_10_merchant = 631
trp_town_11_merchant = 632
trp_town_12_merchant = 633
trp_town_13_merchant = 634
trp_town_14_merchant = 635
trp_town_15_merchant = 636
trp_town_16_merchant = 637
trp_town_17_merchant = 638
trp_town_18_merchant = | |
in memory
nLC = len(self.loadCases)
self.PLN = [0] * nLC
self.PLFx = [0] * nLC
self.PLFy = [0] * nLC
self.PLFz = [0] * nLC
self.PLMx = [0] * nLC
self.PLMy = [0] * nLC
self.PLMz = [0] * nLC
for icase, lc in enumerate(self.loadCases):
gx = lc.gx
gy = lc.gy
gz = lc.gz
# copy data over for this case
self.PLN[icase] = np.copy(lc.NF)
self.PLFx[icase] = np.copy(lc.Fx)
self.PLFy[icase] = np.copy(lc.Fy)
self.PLFz[icase] = np.copy(lc.Fz)
self.PLMx[icase] = np.copy(lc.Mxx)
self.PLMy[icase] = np.copy(lc.Myy)
self.PLMz[icase] = np.copy(lc.Mzz)
for iextra in range(len(self.ENMnode)):
Nm = self.ENMnode[iextra]
mass = self.ENMmass[iextra]
x = self.ENMrhox[iextra]
y = self.ENMrhoy[iextra]
z = self.ENMrhoz[iextra]
# check if a point load already exists for this node
if Nm in self.PLN[icase]:
idx = np.where(self.PLN[icase] == Nm)[0]
# if so just add it
self.PLFx[icase][idx] += mass * gx
self.PLFy[icase][idx] += mass * gy
self.PLFz[icase][idx] += mass * gz
self.PLMx[icase][idx] += mass * (y * gz - z * gy)
self.PLMy[icase][idx] += mass * (z * gx - x * gz)
self.PLMz[icase][idx] += mass * (x * gy - y * gx)
else:
# otherwise append to end
self.PLN[icase] = np.concatenate([self.PLN[icase], [Nm]])
self.PLFx[icase] = np.concatenate([self.PLFx[icase], [mass * gx]])
self.PLFy[icase] = np.concatenate([self.PLFy[icase], [mass * gy]])
self.PLFz[icase] = np.concatenate([self.PLFz[icase], [mass * gz]])
self.PLMx[icase] = np.concatenate([self.PLMx[icase], [mass * (y * gz - z * gy)]])
self.PLMy[icase] = np.concatenate([self.PLMy[icase], [mass * (z * gx - x * gz)]])
self.PLMz[icase] = np.concatenate([self.PLMz[icase], [mass * (x * gy - y * gx)]])
lc.pL = C_PointLoads(
len(self.PLN[icase]),
ip(self.PLN[icase]),
dp(self.PLFx[icase]),
dp(self.PLFy[icase]),
dp(self.PLFz[icase]),
dp(self.PLMx[icase]),
dp(self.PLMy[icase]),
dp(self.PLMz[icase]),
)
if self.addGravityLoadForExtraElementMass:
L = self.eL
# add to interior point load
# save all data in memory
nLC = len(self.loadCases)
self.IPLE = np.array([[] * nLC])
self.IPLPx = np.array([[] * nLC])
self.IPLPy = np.array([[] * nLC])
self.IPLPz = np.array([[] * nLC])
self.IPLxE = np.array([[] * nLC])
for icase, lc in enumerate(self.loadCases):
gx = lc.gx
gy = lc.gy
gz = lc.gz
# iterate through additional mass
for iextra in range(len(self.EEMelement)):
element = self.EEMelement[iextra]
mass = self.EEMmass[iextra]
LE = L[element - 1]
# check whether an element load already exists for this element
if element in self.IPLE[icase]:
idx = np.where(self.IPLE[icase] == element)[0]
# if so we just add the weight loads
self.IPLPx[icase][idx] += mass * gx
self.IPLPy[icase][idx] += mass * gy
self.IPLPz[icase][idx] += mass * gz
# TODO: assumes xE does not change
else:
# otherwise append to the end
self.IPLE[icase] = np.append(self.IPLE[icase], element)
self.IPLPx[icase] = np.append(self.IPLPx[icase], mass * gx)
self.IPLPy[icase] = np.append(self.IPLPy[icase], mass * gy)
self.IPLPz[icase] = np.append(self.IPLPz[icase], mass * gz)
self.IPLxE[icase] = np.append(self.IPLxE[icase], 0.5 * LE)
# self.IPLE = np.concatenate([lc.ELE, element])
# self.IPLPx = np.concatenate([lc.Px, mass*gx])
# self.IPLPy = np.concatenate([lc.Py, mass*gy])
# self.IPLPz = np.concatenate([lc.Pz, mass*gz])
# self.IPLxE = np.concatenate([lc.xE, 0.5*LE])
lc.eL = C_ElementLoads(
len(self.IPLE[icase]),
ip(self.IPLE[icase]),
dp(self.IPLPx[icase]),
dp(self.IPLPy[icase]),
dp(self.IPLPz[icase]),
dp(self.IPLxE[icase]),
)
def run(self):
nCases = len(self.loadCases) # number of load cases
nN = len(self.nodes.node) # number of nodes
nE = len(self.elements.element) # number of elements
nR = len(self.reactions.node) # number of reactions
nM = self.nM # number of modes
if nCases == 0:
print("error: must have at least 1 load case")
return
self.__addGravityToExtraMass()
# initialize output arrays
dout = NodeDisplacements(
np.zeros((nCases, nN), dtype=np.int32),
np.zeros((nCases, nN)),
np.zeros((nCases, nN)),
np.zeros((nCases, nN)),
np.zeros((nCases, nN)),
np.zeros((nCases, nN)),
np.zeros((nCases, nN)),
)
fout = ElementEndForces(
np.zeros((nCases, 2 * nE), dtype=np.int32),
np.zeros((nCases, 2 * nE), dtype=np.int32),
np.zeros((nCases, 2 * nE)),
np.zeros((nCases, 2 * nE)),
np.zeros((nCases, 2 * nE)),
np.zeros((nCases, 2 * nE)),
np.zeros((nCases, 2 * nE)),
np.zeros((nCases, 2 * nE)),
)
rout = NodeReactions(
np.zeros((nCases, nR), dtype=np.int32),
np.zeros((nCases, nR)),
np.zeros((nCases, nR)),
np.zeros((nCases, nR)),
np.zeros((nCases, nR)),
np.zeros((nCases, nR)),
np.zeros((nCases, nR)),
)
dx = self.options.dx
ifout = [0] * nE
for i in range(nE):
L = self.eL[i]
nIF = int(max(math.floor(L / dx), 1)) + 1
ifout[i] = InternalForces(
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
np.zeros((nCases, nIF)),
)
mout = NodeMasses(
0.0,
0.0,
np.zeros(nN, dtype=np.int32),
np.zeros(nN),
np.zeros(nN),
np.zeros(nN),
np.zeros(nN),
np.zeros(nN),
np.zeros(nN),
)
modalout = Modes(
np.zeros(nM),
np.zeros(nM),
np.zeros(nM),
np.zeros(nM),
np.zeros((nM, nN), dtype=np.int32),
np.zeros((nM, nN)),
np.zeros((nM, nN)),
np.zeros((nM, nN)),
np.zeros((nM, nN)),
np.zeros((nM, nN)),
np.zeros((nM, nN)),
)
# create c structs
c_loadcases = (C_LoadCase * nCases)()
c_disp = (C_Displacements * nCases)()
c_forces = (C_Forces * nCases)()
c_reactions = (C_ReactionForces * nCases)()
c_internalForces = (POINTER(C_InternalForces) * nCases)()
for i in range(nCases):
lci = self.loadCases[i]
c_loadcases[i] = C_LoadCase(lci.gx, lci.gy, lci.gz, lci.pL, lci.uL, lci.tL, lci.eL, lci.tempL, lci.pD)
c_disp[i] = C_Displacements(
ip(dout.node[i, :]),
dp(dout.dx[i, :]),
dp(dout.dy[i, :]),
dp(dout.dz[i, :]),
dp(dout.dxrot[i, :]),
dp(dout.dyrot[i, :]),
dp(dout.dzrot[i, :]),
)
c_forces[i] = C_Forces(
ip(fout.element[i, :]),
ip(fout.node[i, :]),
dp(fout.Nx[i, :]),
dp(fout.Vy[i, :]),
dp(fout.Vz[i, :]),
dp(fout.Txx[i, :]),
dp(fout.Myy[i, :]),
dp(fout.Mzz[i, :]),
)
c_reactions[i] = C_ReactionForces(
ip(rout.node[i, :]),
dp(rout.Fx[i, :]),
dp(rout.Fy[i, :]),
dp(rout.Fz[i, :]),
dp(rout.Mxx[i, :]),
dp(rout.Myy[i, :]),
dp(rout.Mzz[i, :]),
)
c_internalForces[i] = (C_InternalForces * nE)()
for j in range(nE):
(c_internalForces[i])[j] = C_InternalForces(
dp(ifout[j].x[i, :]),
dp(ifout[j].Nx[i, :]),
dp(ifout[j].Vy[i, :]),
dp(ifout[j].Vz[i, :]),
dp(ifout[j].Tx[i, :]),
dp(ifout[j].My[i, :]),
dp(ifout[j].Mz[i, :]),
dp(ifout[j].Dx[i, :]),
dp(ifout[j].Dy[i, :]),
dp(ifout[j].Dz[i, :]),
dp(ifout[j].Rx[i, :]),
)
total_mass = c_double()
struct_mass = c_double()
c_massResults = C_MassResults(
pointer(total_mass),
pointer(struct_mass),
ip(mout.node),
dp(mout.xmass),
dp(mout.ymass),
dp(mout.zmass),
dp(mout.xinrta),
dp(mout.yinrta),
dp(mout.zinrta),
)
c_modalResults = (C_ModalResults * nM)()
freq = [0] * nM
xmpf = [0] * nM
ympf = [0] * nM
zmpf = [0] * nM
for i in range(nM):
freq[i] = c_double()
xmpf[i] = c_double()
ympf[i] = c_double()
zmpf[i] = c_double()
c_modalResults[i] = C_ModalResults(
pointer(freq[i]),
pointer(xmpf[i]),
pointer(ympf[i]),
pointer(zmpf[i]),
ip(modalout.node[i, :]),
dp(modalout.xdsp[i, :]),
dp(modalout.ydsp[i, :]),
dp(modalout.zdsp[i, :]),
dp(modalout.xrot[i, :]),
dp(modalout.yrot[i, :]),
dp(modalout.zrot[i, :]),
)
# set dynamics data
exagg_modal = 1.0 # not used
c_dynamicData = C_DynamicData(self.nM, self.Mmethod, self.lump, self.tol, self.shift, exagg_modal)
exitCode = self._pyframe3dd.run(
self.c_nodes,
self.c_reactions,
self.c_elements,
self.c_other,
nCases,
c_loadcases,
c_dynamicData,
self.c_extraInertia,
self.c_extraMass,
self.c_condensation,
c_disp,
c_forces,
c_reactions,
c_internalForces,
c_massResults,
c_modalResults,
)
nantest = np.isnan(np.c_[fout.Nx, fout.Vy, fout.Vz, fout.Txx, fout.Myy, fout.Mzz])
if (exitCode == 182 or exitCode == 183) and not np.any(nantest):
pass
elif exitCode != 0 or np.any(nantest):
raise RuntimeError("Frame3DD did not exit gracefully")
# put mass values back in since tuple is read only
mout = NodeMasses(
total_mass.value,
struct_mass.value,
mout.node,
mout.xmass,
mout.ymass,
mout.zmass,
mout.xinrta,
mout.yinrta,
mout.zinrta,
)
# put modal results back in
for i in range(nM):
modalout.freq[i] = freq[i].value
modalout.xmpf[i] = xmpf[i].value
modalout.ympf[i] = ympf[i].value
modalout.zmpf[i] = zmpf[i].value
return dout, fout, rout, ifout, mout, modalout
def write(self, fname):
f = open(fname, "w")
f.write("pyFrame3dd auto-generated file\n")
f.write("\n")
f.write(str(len(self.nnode)) + " # number of nodes\n")
f.write("#.node x y z r\n")
f.write("# m m m m\n")
f.write("\n")
for k in range(len(self.nnode)):
f.write(
str(self.nnode[k])
+ "\t"
+ str(self.nx[k])
+ "\t"
+ str(self.ny[k])
+ "\t"
+ str(self.nz[k])
+ "\t"
+ str(self.nr[k])
+ "\n"
)
f.write("\n")
f.write(str(len(self.rnode)) + " # number of nodes with reactions\n")
f.write("#.n x y z xx yy zz 1=fixed, 0=free\n")
for k in range(len(self.rnode)):
f.write(
str(self.rnode[k])
+ "\t"
+ str(self.rKx[k])
+ "\t"
+ str(self.rKy[k])
+ "\t"
+ str(self.rKz[k])
+ "\t"
+ str(self.rKtx[k])
+ "\t"
+ str(self.rKty[k])
+ "\t"
+ str(self.rKtz[k])
+ "\n"
)
f.write("\n")
f.write(str(len(self.eelement)) + " # number of frame elements\n")
f.write("#.e n1 n2 Ax Asy Asz Jxx Iyy Izz E G roll density\n")
f.write("# . . m^2 m^2 m^2 m^4 m^4 m^4 Pa Pa deg kg/m^3\n")
for k in range(len(self.eelement)):
f.write(
str(self.eelement[k])
+ "\t"
+ str(self.eN1[k])
+ "\t"
+ str(self.eN2[k])
+ "\t"
+ str(self.eAx[k])
+ "\t"
+ str(self.eAsy[k])
+ "\t"
+ str(self.eAsz[k])
+ "\t"
+ str(self.eJx[k])
+ "\t"
+ str(self.eIy[k])
+ "\t"
+ str(self.eIz[k])
+ "\t"
+ str(self.eE[k])
+ "\t"
+ str(self.eG[k])
+ "\t"
+ str(self.eroll[k])
+ "\t"
+ str(self.edensity[k])
+ "\n"
)
f.write("\n")
f.write("\n")
ishear = 1 if self.options.shear else 0
igeom = 1 if self.options.geom else 0
f.write(str(ishear) + " # 1: include shear deformation\n")
f.write(str(igeom) + " # 1: include geometric stiffness\n")
f.write("10.0 # exaggerate static mesh deformations\n")
f.write("2.5 # zoom scale for 3D plotting\n")
f.write(str(self.options.dx) + " # x-axis increment for internal forces, m\n")
f.write("\n")
f.write(str(len(self.loadCases)) + " # number of static load cases\n")
for iC in range(len(self.loadCases)):
mylc | |
<gh_stars>0
#######################################################################
# Module: SQLiteDataAccessLayer
# Purpose: This class has all the data access code specific to SQLite
#######################################################################
from core.expense_category import ExpenseCategory
from core.store import Store
from core.expense import Expense
from core.person import Person
from core.expense_category import ExpenseCategory
from core.payment_type import PaymentType
from core.da.dal import DataAccessLayer
from core.da.sqlitedriver import SQLiteDriver
from core.constants import _DATE_STR_DISPLAY_FORMAT_
from core.constants import _DATE_STR_STORAGE_FORMAT_
class SQLiteDataAccessLayer( DataAccessLayer ):
def __init__( self ) :
# TODO: The following line should not be hard coded and should come from param
self.data_file = './db/expenses.db'
# All expense methods
def addExpense( self, expense: Expense ) -> None:
print( 'sqlite_dat: add Expense' )
expense_detail = expense.getExpenseDetail()
expense_date = expense.getExpenseDate().strftime( _DATE_STR_STORAGE_FORMAT_ )
expense_amount = expense.getExpenseAmount()
person_id = expense.getPerson().getId()
store_id = expense.getStore().getId()
expense_category_id = expense.getExpenseCategory().getId()
payment_type_id = expense.getPaymentType().getId()
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """INSERT INTO EXPENSE( EXPENSE_DETAIL, EXPENSE_DATE, EXPENSE_AMOUNT, EXPENSE_CATEGORY_ID, PAYMENT_TYPE_ID, PERSON_ID, STORE_ID ) VALUES( ?, ?, ?, ?, ?, ?, ? )"""
cursor.execute(_SQL, (expense_detail, expense_date, expense_amount, expense_category_id, payment_type_id, person_id, store_id ))
def deleteExpense( self, id : str ) -> None :
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """DELETE FROM EXPENSE WHERE ID = ? """
cursor.execute(_SQL, (id, ))
def listExpenses( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT ID, EXPENSE_DETAIL, EXPENSE_DATE, EXPENSE_AMOUNT,
EXPENSE_CATEGORY_ID, PAYMENT_TYPE_ID, PERSON_ID, STORE_ID FROM EXPENSE """
_ORDERBY = " ORDER BY EXPENSE.EXPENSE_DATE DESC "
cursor.execute(_SQL + _ORDERBY)
contents = cursor.fetchall()
print( contents )
return contents
# Expense category methods
def addExpenseCategory( self, expense_category : ExpenseCategory ) -> None :
expense_type : str = expense_category.getExpenseType()
expense_type_detail : str = expense_category.getExpenseDetail()
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """INSERT INTO EXPENSE_CATEGORY(expense_type, expense_type_detail ) VALUES( ?, ? )"""
cursor.execute(_SQL, (expense_type, expense_type_detail))
def deleteExpenseCategory( self, id : str ) -> None :
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """DELETE FROM EXPENSE_CATEGORY WHERE ID = ? """
cursor.execute(_SQL, (id, ))
def listExpenseCategories( self ) -> list :
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT ID, expense_type, expense_type_detail FROM expense_category """
_ORDERBY = """ ORDER BY expense_type """
cursor.execute(_SQL + _ORDERBY)
contents = cursor.fetchall()
return contents
# Store(s) methods
def addStore( self, store : Store ) -> None :
store_name = store.getStoreName()
store_detail = store.getStoreDetail()
home_delivery = store.getHomeDeliveryString()
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """INSERT INTO STORE(STORE_NAME, STORE_DETAIL, HOME_DELIVERY ) VALUES( ?, ?, ? )"""
cursor.execute(_SQL, (store_name, store_detail, home_delivery))
def deleteStore( self, id : str ) -> None :
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """DELETE FROM STORE WHERE ID = ? """
cursor.execute(_SQL, (id, ))
def listStores( self ) -> list :
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT ID, STORE_NAME, STORE_DETAIL, HOME_DELIVERY FROM STORE """
_ORDERBY = """ ORDER BY STORE_NAME """
cursor.execute(_SQL + _ORDERBY)
contents = cursor.fetchall()
return contents
# Payment Type methods
def addPaymentType( self, payment_type : PaymentType ) -> None:
payment_mode = payment_type.getPaymentMode()
payment_mode_detail = payment_type.getPaymentModeDetail()
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ INSERT INTO PAYMENT_TYPE(PAYMENT_MODE, PAYMENT_MODE_DETAIL ) VALUES( ?, ? ) """
cursor.execute(_SQL, (payment_mode, payment_mode_detail))
def deletePaymentType( self, id : str ) -> None:
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """DELETE FROM PAYMENT_TYPE WHERE ID = ? """
cursor.execute(_SQL, (id, ))
def listPaymentTypes( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """SELECT ID, PAYMENT_MODE, PAYMENT_MODE_DETAIL from PAYMENT_TYPE ORDER BY PAYMENT_MODE """
cursor.execute( _SQL )
contents = cursor.fetchall()
return contents
# Person methods
def addPerson( self, person : Person ) -> None:
person_first_name = person.getFirstName()
person_last_name = person.getLastName()
person_short_name = person.getShortName()
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """INSERT INTO PERSON(PERSON_FIRST_NAME, PERSON_LAST_NAME, PERSON_SHORT_NAME ) VALUES( ?, ?, ? )"""
cursor.execute(_SQL, (person_first_name, person_last_name, person_short_name))
def deletePerson( self, id : str ) -> None :
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """DELETE FROM PERSON WHERE ID = ? """
cursor.execute(_SQL, (id, ))
def listPeople( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """SELECT ID, PERSON_FIRST_NAME, PERSON_LAST_NAME, PERSON_SHORT_NAME from PERSON """
cursor.execute( _SQL )
contents = cursor.fetchall()
return contents
# Other utility methods
def getPersonByShortName( self, short_name : str ) -> Person:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """SELECT ID, PERSON_FIRST_NAME, PERSON_LAST_NAME, PERSON_SHORT_NAME from PERSON """
_WHERE = "WHERE PERSON_SHORT_NAME = ? "
cursor.execute( _SQL + _WHERE, (short_name,) )
contents = cursor.fetchall()
print( contents )
if contents:
content = contents[ 0 ]
return Person( content[ 0 ], content[ 1 ], content[ 2 ], content[ 3 ])
else:
return None
def getStoreByStoreName( self, store_name : str ) -> Store:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT ID, STORE_NAME, STORE_DETAIL, HOME_DELIVERY FROM STORE """
_WHERE = """ WHERE STORE_NAME = ? """
cursor.execute(_SQL + _WHERE, (store_name, ) )
contents = cursor.fetchall()
if contents:
content = contents[ 0 ]
return Store( content[ 0 ], content[ 1 ], content[ 2 ], content[ 3 ])
else:
return None
def getPaymentTypeByMode( self, payment_mode : str ) -> PaymentType:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """SELECT ID, PAYMENT_MODE, PAYMENT_MODE_DETAIL from PAYMENT_TYPE """
_WHERE = """ WHERE PAYMENT_MODE = ? """
cursor.execute( _SQL + _WHERE, (payment_mode, ) )
contents = cursor.fetchall()
if contents:
content = contents[ 0 ]
return PaymentType( content[ 0 ], content[ 1 ], content[ 2 ])
else:
return None
def getExpenseCategoryByExpenseType( self, expense_type : str ) -> ExpenseCategory:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT ID, EXPENSE_TYPE, EXPENSE_TYPE_DETAIL FROM expense_category """
_WHERE = """ WHERE EXPENSE_TYPE = ? """
cursor.execute(_SQL + _WHERE, ( expense_type, ))
contents = cursor.fetchall()
print( contents )
if contents:
content = contents[ 0 ]
return ExpenseCategory( content[ 0 ], content[ 1 ], content[ 2 ] )
else:
return None
# All reporting functions
def getExpenseSummaryForMonth( self, search_string : str ) -> tuple:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT SUBSTR( EXPENSE_DATE, 1, 7 ), COUNT( ID ), SUM(EXPENSE_AMOUNT) FROM EXPENSE GROUP BY SUBSTR( EXPENSE_DATE, 1, 7 ) """
_WHERE = """ HAVING SUBSTR( EXPENSE_DATE, 1, 7) = ? """
cursor.execute( _SQL + _WHERE, ( search_string, ) )
contents = cursor.fetchall()
if contents:
content = contents[ 0 ]
return ( content[ 0 ], content[ 1 ], content[ 2 ] )
else:
return None
def getMonthwiseExpenseSummary( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT SUBSTR( EXPENSE_DATE, 1, 7 ), COUNT( ID ), SUM(EXPENSE_AMOUNT) FROM EXPENSE GROUP BY SUBSTR( EXPENSE_DATE, 1, 7 ) ORDER BY EXPENSE_DATE DESC """
cursor.execute( _SQL )
contents = cursor.fetchall()
if contents:
return contents
else:
return None
def getMonthwiseCategoryExpense( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT SUBSTR( EXPENSE_DATE, 1, 7), EXPENSE_CATEGORY.EXPENSE_TYPE, COUNT( EXPENSE.ID), SUM( EXPENSE_AMOUNT ) from EXPENSE, EXPENSE_CATEGORY WHERE EXPENSE.EXPENSE_CATEGORY_ID = EXPENSE_CATEGORY.ID GROUP BY SUBSTR( EXPENSE_DATE, 1, 7), EXPENSE_CATEGORY.EXPENSE_TYPE ORDER BY EXPENSE_DATE DESC, EXPENSE_CATEGORY.EXPENSE_TYPE """
cursor.execute( _SQL )
contents = cursor.fetchall()
if contents:
return contents
else:
return None
def getMonthwisePaymentTypeSummary( self ) -> list:
contents = []
with SQLiteDriver( self.data_file ) as cursor:
_SQL = """ SELECT SUBSTR( EXPENSE_DATE, 1, 7), PAYMENT_TYPE.PAYMENT_MODE, COUNT( EXPENSE.ID), SUM( EXPENSE_AMOUNT ) from EXPENSE, PAYMENT_TYPE WHERE EXPENSE.PAYMENT_TYPE_ID = PAYMENT_TYPE.ID GROUP BY SUBSTR( EXPENSE_DATE, 1, 7), PAYMENT_TYPE.PAYMENT_MODE ORDER BY EXPENSE_DATE DESC, PAYMENT_TYPE.PAYMENT_MODE """
cursor.execute( _SQL )
contents = cursor.fetchall()
if contents:
return contents
else:
return None
def | |
= Embed(
title="Akinator",
description="I think it is **{0.first_guess[name]} - {0.first_guess[description]}**\n\nWas I right?".format(aki)
)
e.set_footer(text=f"Thanks for playing, {ctx.author.name}!")
e.set_image(url=aki.first_guess["absolute_picture_path"])
e.set_thumbnail(url=akimage)
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Server error.")
await ctx.send(embed=e)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="guessthenumber", description="Play Guess the Number.", aliases=["gtn", "gn"])
async def guessthenumber(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send("**Welcome to Guess the Number. A random number between " +
"1-10000 will be generated and your job is to guess it. " +
"Input `time` to see total elapsed time, or `quit` to quit the game.**")
self.gameChannels.append(ctx.channel.id)
starttime = time()
number = randint(1, 10000)
attempts = 0
guess = ""
while guess != number:
await ctx.send("`Attempt {:,} for {}. Please guess a number between 1-10000.`".format(attempts + 1, ctx.author.name))
try:
message = await self.client.wait_for(
"message", check=lambda msg: msg.author == ctx.author and msg.channel == ctx.channel, timeout=30
)
except TimeoutError:
await ctx.send(f"`{ctx.author.name} has left Guess the Number for idling for too long.`")
break
try:
guess = int(message.content.replace(" ", "").replace(",", ""))
if not 1 <= guess <= 10000:
await ctx.send(embed=funcs.errorEmbed(None, "Input must be 1-10000 inclusive."))
else:
attempts += 1
if guess < number:
await ctx.send("`The number is larger than your guess. Guess higher!`")
elif guess > number:
await ctx.send("`The number is smaller than your guess. Guess lower!`")
else:
await ctx.send("`You have found the number!`")
except ValueError:
if message.content.casefold() == "quit" or message.content.casefold() == "exit" \
or message.content.casefold() == "stop":
break
elif message.content.casefold() == "time":
m, s = funcs.minSecs(time(), starttime)
await funcs.sendTime(ctx, m, s)
else:
await ctx.send(embed=funcs.errorEmbed(None, "Invalid input."))
await ctx.send("```The number was {:,}.\n\nTotal attempts: {:,}\n\n".format(number, attempts) +
f"Thanks for playing, {ctx.author.name}!```")
m, s = funcs.minSecs(time(), starttime)
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="bullsandcows", description="Play Bulls and Cows.",
aliases=["bc", "bulls", "cows", "cowsandbulls", "mastermind"])
async def bullsandcows(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send("**Welcome to Bulls and Cows. Input `help` for help, " +
"`time` to see total elapsed time, or `quit` to quit the game.**")
self.gameChannels.append(ctx.channel.id)
game = games.BullsAndCows()
while not game.getGameEnd():
await ctx.send("`Attempt {:,} for {}. ".format(game.getAttempts() + 1, ctx.author.name) +
"Please guess a four-digit number with no duplicates.`")
try:
message = await self.client.wait_for(
"message", check=lambda msg: msg.author == ctx.author and msg.channel == ctx.channel, timeout=90
)
except TimeoutError:
await ctx.send(f"`{ctx.author.name} has left Bulls and Cows for idling for too long.`")
break
try:
guess = message.content
bulls, cows = game.guess(guess)
if guess.casefold() == "time":
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
elif guess.casefold() == "help":
await ctx.send(
"```== Bulls and Cows ==\n\nBulls and Cows is a code-breaking logic game, " +
"originally played using pencil and paper, where one tries to guess a number that" +
" has been randomly generated by the bot. This game has inspired the commercially" +
" marketed board game Mastermind and possibly predates it by over a century.\n\n" +
"The randomly generated number contains exactly four digits between 0 and 9, and" +
" unlike Guess the Number, the digits have no repeats.\n\nExample of a valid guess:" +
" 1234\nExample of an invalid guess: 1244 (The digit 4 has been used twice when it " +
"can only be used once.)\n\nIn the game, the player is asked to enter a four-digit " +
"number which is then compared to the randomly generated four-digit number; each " +
"individual digit entered by the player is compared to each digit within the randomly" +
" generated number. If a digit in the player's guess is in the randomly generated " +
"number and is in the same position in it as it was in their number, then it is " +
'scored as a "bull". If that same digit is in a different position, then it is marked' +
' as a "cow".\n\nThe goal of this particular version of the game is to find all four ' +
"bulls in the shortest amount of time, using as few attempts as possible.\n\nExample:\n" +
"Randomly generated number: 1234\nGuess: 1325\nResult: 1 bull and 2 cows. (1 is the bull," +
" whereas 2 and 3 are the cows. 5 is not in the randomly generated number, hence it is " +
"not scored.)```"
)
elif guess.casefold() == "quit" or guess.casefold() == "exit" or guess.casefold() == "stop":
continue
else:
await ctx.send(f"`Result: {bulls} bull{'' if bulls == 1 else 's'} and " +
f"{cows} cow{'' if cows == 1 else 's'}." +
f"{'' if bulls != 4 else ' You have found the number!'}`")
except Exception as ex:
await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
await ctx.send("```The number was {}.\n\nTotal attempts: {:,}\n\n".format(game.getNumber(sep=True), game.getAttempts()) +
f"Thanks for playing, {ctx.author.name}!```")
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="21cardtrick", description="Play the 21 Card Trick.", aliases=["ct", "21", "cardtrick"], hidden=True)
async def cardtrick(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send("**Welcome to the 21 Card Trick. " +
"Pick a card from one of the three piles and I will try to guess it.**")
self.gameChannels.append(ctx.channel.id)
game = games.CardTrick()
cardSample = game.getSample()
for _ in range(3):
p1, p2, p3 = game.piles(cardSample)
await ctx.send(f"```{game.showCards(p1, p2, p3)}```")
while True:
await ctx.send(f"`Which pile is your card in, {ctx.author.name}? " +
"Enter either 1, 2, or 3 to pick a pile, or 'quit' quit the game.`")
try:
message = await self.client.wait_for(
"message", check=lambda msg: msg.author == ctx.author and msg.channel == ctx.channel, timeout=30
)
content = message.content
if content.casefold() == "quit" or content.casefold() == "exit" or content.casefold() == "stop":
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name} has left the 21 Card Trick.`")
userchoice = int(content)
if not 1 <= userchoice <= 3:
await ctx.send(embed=funcs.errorEmbed(None, "Input must be 1-3 inclusive."))
else:
cardSample = game.shuffle(userchoice, p1, p2, p3)
break
except TimeoutError:
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name} has left the 21 Card Trick for idling for too long.`")
except ValueError:
await ctx.send(embed=funcs.errorEmbed(None, "Invalid input."))
cardName, cardImg = game.getCardNameImg(cardSample[10])
e = Embed(title="21 Card Trick")
e.add_field(name=f"{ctx.author.name}'s Card", value=f"`{cardName}`")
e.set_image(url=cardImg)
e.set_footer(text="Have I guessed it correctly?")
await ctx.send(embed=e)
self.gameChannels.remove(ctx.channel.id)
@staticmethod
async def gameIdle(ctx, game, ms):
if ms:
title = "Minesweeper"
game.revealDots()
else:
title = "Battleship"
await ctx.send(f"`{ctx.author.name} has left {title} for idling for too long.`")
async def rowOrCol(self, ctx, game, userchoice, ms):
if userchoice:
rolCol = "vertical row number between 0-9. (set of numbers on the left)"
else:
rolCol = "horizontal column number between 0-9. (set of numbers at the top)"
await ctx.send(f"`Please enter a {rolCol}`")
try:
msg = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=120
)
except TimeoutError:
await self.gameIdle(ctx, game, ms)
return "quit"
yy = msg.content
while yy.casefold() != "time" and yy.casefold() != "quit" and yy.casefold() != "stop" and yy.casefold() != "exit":
try:
yy = int(yy)
if not 0 <= yy <= 9:
pass
else:
break
except ValueError:
pass
await ctx.send(embed=funcs.errorEmbed(None, "Invalid input."))
await ctx.send(f"`Please enter a {rolCol}`")
try:
msg = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=120
)
except TimeoutError:
await self.gameIdle(ctx, game, ms)
return "quit"
yy = msg.content
if str(yy).casefold() == "quit" or str(yy).casefold() == "exit" or str(yy).casefold() == "stop":
if ms:
game.revealDots()
return "quit"
elif str(yy).casefold() == "time":
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
return None
else:
return yy
async def gameOptions(self, ctx, game):
dotsLeft = 90 - game.getUncovered()
await ctx.send(
f"`{ctx.author.name} has {90 - game.getUncovered()} dot{'' if dotsLeft == 1 else 's'} left to uncover.`"
)
await ctx.send("`Would you like to reveal (r), flag (f), or unflag (u) a location?`")
try:
msg = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=120
)
except TimeoutError:
return await self.gameIdle(ctx, game, True)
decision = msg.content
while decision.casefold() != "f" and decision.casefold() != "flag" \
and decision.casefold() != "time" and decision.casefold() != "r" \
and decision.casefold() != "reveal" and decision.casefold() != "u" \
and decision.casefold() != "unflag" and decision.casefold() != "exit" \
and decision.casefold() != "quit" and decision.casefold() != "stop":
await ctx.send(embed=funcs.errorEmbed(None, "Invalid input."))
await ctx.send("`Would you like to reveal (r), | |
import numpy, logging
from sys import exit
from Classes.DotData import DotData
from Operations.Shari_Operations.localize.xpopMerge import xpopMerge
from Operations.Shari_Operations.localize.Scenario import GetSelectionScenarios, GetScenarios
from Operations.MiscUtil import MakeAlphaNum, Dict, Sfx, progress, AddFileSfx
def mergeSims( scenario, Ddata = '../Data/Shari_Data/sim/', simsOut = 'simsOut3', nreplicas = 5,
thinExt = '.thin', thinSfx = '',
selpop = None, getio = None ):
"""Gathers per-SNP information, for all replicas of a given scenario, and outputs it in a single DotData where each line
gives info for one SNP.
Specifically, reads simulation and Sweep output, collects columns needed for composite likehood test (chrom, base pair position, genetic
distance, anc frequencies for 3 populations, xpop for each pair, and ihs, iHH_A and iHH_D for selected population)
Input params:
scenario - an object of class Scenario, indicating the simulation scenario (either neutral or a selection scenario)
from which all replicas were simulated.
nreplicas - the number of replicas simulated under this scenario.
Each replica represents a chromosome region, with a set of SNPs on it.
Ddata - the directory under which the simulations and the Sweep analysis results live.
Under this directory we expect to find:
iHS analysis results, under power_ihs/
XP-EHH analysis results, under power_xpop
simulation output giving SNP positions
thinExt - the extension appended to simulation files that describe the SNPs in the simulated replica.
Sometimes we create simulations and then thin them under different thinning models (to simulate SNP ascertainment
by the various stages of HapMap; these differently thinned versions of the same simulations might be stored in
simulation files with different extensions.
thinSfx - the suffix appended to the power_ihs and power_xpop directory names, telling where to find iHS and XP-EHH
analyses of the simulations. When we analyze the same simulations after applying different thinning scenarios,
the iHS and XP-EHH analyses for each thinning scenario go into a separate set of directories.
Output params:
Ddata - under Ddata writes a DotData named merged_scenName.data, where each line gives info
for one SNP, with the following columns (type of data is float unless stated otherwise):
CHROM_POS 1 - physical (basepair) position of the SNP within its replica.
Note that one merged file contains SNPs for a set of replicas (all for the same scenario),
so there could be multiple SNPs with the same position. The replica number
is given in the Chrom column.
FREQ1 1 - derived allele frequency in pop 1 ( European )
FREQ1 4 - derived allele frequency in pop 4 ( EastAsian )
FREQ1 5 - derived allele frequency in pop 5 ( WestAfrican )
R AllEHH logratio Deviation European_WestAfrican - XP-EHH score to the right of the SNP,
between European and WestAfrican pops, normalized to the neutral background.
Analogously for the next five columns:
L AllEHH logratio Deviation European_WestAfrican
R AllEHH logratio Deviation EastAsian_European
L AllEHH logratio Deviation EastAsian_European
R AllEHH logratio Deviation EastAsian_WestAfrican
L AllEHH logratio Deviation EastAsian_WestAfrican
SNP pos (cM) European_WestAfrican - genetic map position of this SNP, within its replica.
(the European_WestAfrican suffix is irrelevant).
SNP pos (bases) European_WestAfrican - physical (basepair) position of this SNP within its replica.
(the European_WestAfrican suffix is irrelevant).
Chrom European_WestAfrican - the replica from which this SNP comes; can be nan.
(the European_WestAfrican suffix is irrelevant)
Chrom - the replica from which this SNP comes; can be nan
SNP pos (bases) - physical (basepair) position of this SNP within its replica.
SNP pos (cM) - genetic map position of this SNP within its replica
Both iHH_A - sum of iHH_A for both directions from this SNP
Both iHH_D - sum of iHH_D for both directions from this SNP
Both iHS - the value in 'Both Unstandardised iHS' (below), but binned by derived allele frequency
and normalized within the bin.
Left iHH_D - iHH_D to the left of the SNP (the raw integral value). analogously for the next three.
Right iHH_D
Left iHH_A
Right iHH_A
Both Unstandardised iHS - log( (iHH_A_left + iHH_A_right) / ( iHH_D_left + iHH_D_right ) )
( see also 'Both iHS' column for the standardized iHS score )
"""
assert selpop == None or scenario.is_neutral()
DataDir = Ddata + '/'
SimDir = DataDir + simsOut + thinSfx + '/'
if not scenario.is_neutral():
scenName = 'sel%d_%d' % ( scenario.mutFreq, scenario.mutPop )
scenDir = str( scenario.mutAge ) + 'ky/' + scenName
else:
scenName = 'neutral'
scenDir = 'neutral'
popName = {1:'European',4:'EastAsian',5:'WestAfrican'}
ihsSignifTsv = DataDir + 'power_ihs' + thinSfx + '/' + scenDir + '/ihs_sig_' + \
popName[ scenario.mutPop if not scenario.is_neutral() else ( selpop if selpop != None else 1 ) ] + '.tsv'
xpopSignifTsv = [ DataDir + 'power_xpop' + thinSfx + '/' + scenDir + '/xpop_significance_' + popPair + '.tsv'
for popPair in ( 'EastAsian_WestAfrican', 'EastAsian_European', 'European_WestAfrican' ) ]
posFiles = [ SimDir + scenDir + '/' + str(ichrom) + '_' + scenName + '.pos-%d%s' % ( pop, thinExt )
for ichrom in range( nreplicas ) for pop in ( 1, 4, 5 ) ]
ageSfx = '%dky' % ( scenario.mutAge if not scenario.isNeutral() else 10 )
mergedDotData = AddFileSfx( Ddata + 'merged.data/', ageSfx, scenario.scenName(), selpop, thinSfx )
fileDescrs = \
{ mergedDotData :
( 'Various per-snp statistics for SNPs in scenario $scenario, replicas 0-$nreplicas.',
( ( 'CHROM_POS 1', 'physical (basepair) position of the SNP within its replica. '
'Note that one merged file contains SNPs for a set of replicas (all for the same scenario), '
'so there could be multiple SNPs with the same position. The replica number '
'is given in the Chrom column. ' ),
( 'FREQ1 1', 'derived allele frequency in pop 1 ( European )' ),
( 'R AllEHH logratio Deviation European_WestAfrican', 'XP-EHH score to the R of the SNP, '
'between European and WestAfrican pops, normalized to the neutral background.' ),
( 'SNP pos (cM) European_WestAfrican', 'genetic map SNP position' ),
( 'SNP pos (bases) European_WestAfrican', 'physical SNP position' ),
( 'Chrom European_WestAfrican', 'chromosome (or replica number)' ),
( 'Chrom', 'chromosome (or replica number)' ),
( 'SNP pos (bases)', 'physical SNP position' ),
( 'SNP pos (cM)', 'genetic map SNP position' ),
( 'Both iHH_A', 'sum of iHH_A scores for both sides' ),
( 'Both iHH_D', 'sum of iHH_D scores for both sides' ),
( 'Both iHS', 'sum of iHS scores for both sides' ),
( ' Left iHH_D', 'iHH_D score to the left of the SNP' ),
( 'Right iHH_D', 'iHH_D score to the right of the SNP' ),
( 'Left iHH_A', 'iHH_A score to the left of the SNP' ),
( 'Right iHH_A', 'iHH_A score to the right of the SNP' ),
( 'Both Unstandardised iHS', 'sum of unstandardized iHS scores for both sides' ) ) ) }
if getio: return dict( depends_on = posFiles + [ ihsSignifTsv ] + xpopSignifTsv, creates = mergedDotData,
mediumRuleNameSfx = scenario.scenDir(),
fileDescrs = fileDescrs )
ncausal = 0
dashFixer = lambda v: v if v != '-' else numpy.nan
# Load iHS of selected pop
ihsAll = DotData(SVPath = ihsSignifTsv,ToLoad=['Chrom','SNP pos (bases)','SNP pos (cM)','Both iHH_A','Both iHH_D','Both iHS','Left iHH_D','Right iHH_D','Left iHH_A','Right iHH_A','Both Unstandardised iHS'], SVValueFixer = dashFixer)
ihsAllChrom = ihsAll.Chrom
# Load xpop values
xpopAll = xpopMerge( *xpopSignifTsv )
logging.info( 'done with xpopMerge' )
xpopAll = xpopAll[['R AllEHH logratio Deviation European_WestAfrican','L AllEHH logratio Deviation European_WestAfrican','R AllEHH logratio Deviation EastAsian_European','L AllEHH logratio Deviation EastAsian_European','R AllEHH logratio Deviation EastAsian_WestAfrican',
'L AllEHH logratio Deviation EastAsian_WestAfrican','SNP pos (cM) European_WestAfrican','SNP pos (bases) European_WestAfrican','Chrom European_WestAfrican']]
xpopAllChrom = xpopAll['Chrom European_WestAfrican']
replicates = []
xpopIdx = 0
ihsIdx = 0
for ichrom in range(nreplicas):
progress( 'Merging replicas', ichrom, nreplicas, freq = 1 )
logging.info( 'looking at replica %d of %d' % ( ichrom, nreplicas ) )
# Load in pos files for this replica.
# They give, for each SNP in the replica, its physical (basepair) position within the replica,
# and the frequency of the derived and the ancestral alleles.
pos1, pos4, pos5 = [ DotData(SVPath=SimDir + scenDir + '/' + str(ichrom) + '_' + scenName + '.pos-%d%s' % ( pop, thinExt),
SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ]) for pop in ( 1, 4, 5 ) ]
assert pos1.numCols() == pos4.numCols() == pos5.numCols()
posBlank = ((numpy.nan,)*pos1.numCols(),)*3
logging.info( 'Loaded pos files for chrom ' + str( ichrom ) + ': ' + str( len(pos1) ) + | |
<reponame>mathpluscode/DeepReg
import itertools
import numpy as np
import tensorflow as tf
import deepreg.model.layer_util as layer_util
class Activation(tf.keras.layers.Layer):
def __init__(self, identifier: str = "relu", **kwargs):
"""
Layer wraps tf.keras.activations.get().
:param identifier: e.g. "relu"
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
self._act = tf.keras.activations.get(identifier=identifier)
def call(self, inputs, **kwargs):
return self._act(inputs)
class Norm(tf.keras.layers.Layer):
def __init__(self, name: str = "batch_norm", axis: int = -1, **kwargs):
"""
Class merges batch norm and layer norm.
:param name: str, batch_norm or layer_norm
:param axis: int
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
if name == "batch_norm":
self._norm = tf.keras.layers.BatchNormalization(axis=axis, **kwargs)
elif name == "layer_norm":
self._norm = tf.keras.layers.LayerNormalization(axis=axis)
else:
raise ValueError("Unknown normalization type")
def call(self, inputs, training=None, **kwargs):
return self._norm(inputs=inputs, training=training)
class MaxPool3d(tf.keras.layers.Layer):
def __init__(
self,
pool_size: (int, tuple),
strides: (int, tuple, None) = None,
padding: str = "same",
**kwargs,
):
"""
Layer wraps tf.keras.layers.MaxPool3D
:param pool_size: int or tuple of 3 ints
:param strides: int or tuple of 3 ints, if None default will be pool_size
:param padding: str, same or valid
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
self._max_pool = tf.keras.layers.MaxPool3D(
pool_size=pool_size, strides=strides, padding=padding
)
def call(self, inputs, **kwargs):
return self._max_pool(inputs=inputs)
class Conv3d(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
kernel_size: int = 3,
strides: int = 1,
padding: str = "same",
activation: (str, None) = None,
use_bias: bool = True,
kernel_initializer: str = "glorot_uniform",
**kwargs,
):
"""
Layer wraps tf.keras.layers.Conv3D.
:param filters: number of channels of the output
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param strides: int or tuple of 3 ints, e.g. (1,1,1) or 1
:param padding: same or valid
:param activation: defines the activation function
:param use_bias: whether add bias to output
:param kernel_initializer: defines the initialization method,
defines the initialization method
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
self._conv3d = tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
)
def call(self, inputs, **kwargs):
return self._conv3d(inputs=inputs)
class Deconv3d(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
output_shape: (tuple, None) = None,
kernel_size: int = 3,
strides: int = 1,
padding: str = "same",
use_bias: bool = True,
**kwargs,
):
"""
Layer wraps tf.keras.layers.Conv3DTranspose
and does not requires input shape when initializing.
:param filters: number of channels of the output
:param output_shape: (out_dim1, out_dim2, out_dim3)
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param strides: int or tuple of 3 ints, e.g. (1,1,1) or 1
:param padding: same or valid.
:param use_bias: use bias for Conv3DTranspose or not.
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# save parameters
self._filters = filters
self._output_shape = output_shape
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._use_bias = use_bias
self._kwargs = kwargs
# init layer variables
self._output_padding = None
self._deconv3d = None
def build(self, input_shape):
super().build(input_shape)
if isinstance(self._kernel_size, int):
self._kernel_size = [self._kernel_size] * 3
if isinstance(self._strides, int):
self._strides = [self._strides] * 3
if self._output_shape is not None:
# pylint: disable-next=line-too-long
"""
https://github.com/tensorflow/tensorflow/blob/1cf0898dd4331baf93fe77205550f2c2e6c90ee5/tensorflow/python/keras/utils/conv_utils.py#L139-L185
When the output shape is defined, the padding should be calculated manually
if padding == 'same':
pad = filter_size // 2
length = ((input_length - 1) * stride + filter_size
- 2 * pad + output_padding)
"""
self._padding = "same"
self._output_padding = [
self._output_shape[i]
- (
(input_shape[1 + i] - 1) * self._strides[i]
+ self._kernel_size[i]
- 2 * (self._kernel_size[i] // 2)
)
for i in range(3)
]
self._deconv3d = tf.keras.layers.Conv3DTranspose(
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding=self._padding,
output_padding=self._output_padding,
use_bias=self._use_bias,
**self._kwargs,
)
def call(self, inputs, **kwargs):
return self._deconv3d(inputs=inputs)
class Conv3dBlock(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
kernel_size: (int, tuple) = 3,
strides: (int, tuple) = 1,
padding: str = "same",
**kwargs,
):
"""
A conv3d block having conv3d - norm - activation.
:param filters: number of channels of the output
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param strides: int or tuple of 3 ints, e.g. (1,1,1) or 1
:param padding: str, same or valid
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# init layer variables
self._conv3d = Conv3d(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
)
self._norm = Norm()
self._act = Activation()
def call(self, inputs, training=None, **kwargs) -> tf.Tensor:
"""
:param inputs: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
:param training: training flag for normalization layers (default: None)
:param kwargs: additional arguments.
:return: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
"""
output = self._conv3d(inputs=inputs)
output = self._norm(inputs=output, training=training)
output = self._act(output)
return output
class Deconv3dBlock(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
output_shape: (tuple, None) = None,
kernel_size: (int, tuple) = 3,
strides: (int, tuple) = 1,
padding: str = "same",
**kwargs,
):
"""
A deconv3d block having deconv3d - norm - activation.
:param filters: number of channels of the output
:param output_shape: (out_dim1, out_dim2, out_dim3)
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param strides: int or tuple of 3 ints, e.g. (1,1,1) or 1
:param padding: str, same or valid
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# init layer variables
self._deconv3d = Deconv3d(
filters=filters,
output_shape=output_shape,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
)
self._norm = Norm()
self._act = Activation()
def call(self, inputs, training=None, **kwargs) -> tf.Tensor:
"""
:param inputs: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
:param training: training flag for normalization layers (default: None)
:param kwargs: additional arguments.
:return output: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
"""
output = self._deconv3d(inputs=inputs)
output = self._norm(inputs=output, training=training)
output = self._act(output)
return output
class Residual3dBlock(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
kernel_size: (int, tuple) = 3,
strides: (int, tuple) = 1,
**kwargs,
):
"""
A resnet conv3d block.
1. conved = conv3d(conv3d_block(inputs))
2. out = act(norm(conved) + inputs)
:param filters: int, number of filters in the convolutional layers
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param strides: int or tuple of 3 ints, e.g. (1,1,1) or 1
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# init layer variables
self._conv3d_block = Conv3dBlock(
filters=filters, kernel_size=kernel_size, strides=strides
)
self._conv3d = Conv3d(
filters=filters, kernel_size=kernel_size, strides=strides, use_bias=False
)
self._norm = Norm()
self._act = Activation()
def call(self, inputs, training=None, **kwargs) -> tf.Tensor:
"""
:param inputs: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
:param training: training flag for normalization layers (default: None)
:param kwargs: additional arguments.
:return output: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
"""
return self._act(
self._norm(
inputs=self._conv3d(inputs=self._conv3d_block(inputs)),
training=training,
)
+ inputs
)
class DownSampleResnetBlock(tf.keras.layers.Layer):
def __init__(
self,
filters: int,
kernel_size: (int, tuple) = 3,
pooling: bool = True,
**kwargs,
):
"""
A down-sampling resnet conv3d block, with max-pooling or conv3d.
1. conved = conv3d_block(inputs) # adjust channel
2. skip = residual_block(conved) # develop feature
3. pooled = pool(skip) # down-sample
:param filters: number of channels of the output
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param pooling: if True, use max pooling to downsample, otherwise use conv.
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# save parameters
self._pooling = pooling
# init layer variables
self._conv3d_block = Conv3dBlock(filters=filters, kernel_size=kernel_size)
self._residual_block = Residual3dBlock(filters=filters, kernel_size=kernel_size)
self._max_pool3d = (
MaxPool3d(pool_size=(2, 2, 2), strides=(2, 2, 2)) if pooling else None
)
self._conv3d_block3 = (
None
if pooling
else Conv3dBlock(filters=filters, kernel_size=kernel_size, strides=2)
)
def call(self, inputs, training=None, **kwargs) -> (tf.Tensor, tf.Tensor):
"""
:param inputs: shape = (batch, in_dim1, in_dim2, in_dim3, channels)
:param training: training flag for normalization layers (default: None)
:param kwargs: additional arguments.
:return: (pooled, skip)
- downsampled, shape = (batch, in_dim1//2, in_dim2//2, in_dim3//2, channels)
- skipped, shape = (batch, in_dim1, in_dim2, in_dim3, channels)
"""
conved = self._conv3d_block(inputs=inputs, training=training) # adjust channel
skip = self._residual_block(inputs=conved, training=training) # develop feature
pooled = (
self._max_pool3d(inputs=skip)
if self._pooling
else self._conv3d_block3(inputs=skip, training=training)
) # downsample
return pooled, skip
class UpSampleResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=3, concat=False, **kwargs):
"""
An up-sampling resnet conv3d block, with deconv3d.
:param filters: number of channels of the output
:param kernel_size: int or tuple of 3 ints, e.g. (3,3,3) or 3
:param concat: bool,specify how to combine input and skip connection images.
If True, use concatenation, otherwise use sum (default=False).
:param kwargs: additional arguments.
"""
super().__init__(**kwargs)
# save parameters
self._filters = filters
self._concat = concat
# init layer variables
self._deconv3d_block = None
self._conv3d_block = Conv3dBlock(filters=filters, | |
, logLevel.DEBUG , method , "----- %s == %s" % (resolvedTemplatePath, path)])
resolvedTemplatePath = path[1]
else:
#logQ.put( [logType , logLevel.DEBUG , method , "----- %s != %s" % (resolvedTemplatePath, path)])
pass
logQ.put( [logType , logLevel.WARNING , method , exception])
logQ.put( [logType , logLevel.DEBUG , method , method + u' with errors!'])
'''
raise Exceptions.TemplatePathError(exception).with_traceback(tb)
except Exception:
# If noWarningOnFail is set to true, then the calling method considers a None type
# return acceptable and there is no need to clutter up the log files with warnings.
# A good example is when seraching for template paths using wildcards
#failLogLevel = logLevel.WARNING
#if noWarningOnFail == True:
# failLogLevel = logLevel.DEBUG
#Build up a full java or C# style stacktrace, so that devs can track down errors in script modules within repositories
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
exception = "Unable to resolve template path %s, Nested Traceback = %s: %s" %(resolvedTemplatePath,errorID, errorMsg)
#logQ.put( [logType , logLevel.DEBUG , method , "self.templates == %s" % (self.templates)])
for path in self.templates:
if path == resolvedTemplatePath:
#logQ.put( [logType , logLevel.DEBUG , method , "----- %s == %s" % (resolvedTemplatePath, path)])
resolvedTemplatePath = path[1]
else:
#logQ.put( [logType , logLevel.DEBUG , method , "----- %s != %s" % (resolvedTemplatePath, path)])
pass
raise Exceptions.TemplatePathError(exception).with_traceback(tb)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resolvedTemplate
def resolveTemplatePath(self, callingTemplate, calledTemplate, noWarningOnFail = True):
"""
resolveTemplate(), but return ONLY the fully resolved template path string
"""
#method = moduleName + '.' + self.className + '.resolveTemplatePath'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
try:
targetTemplate = self.resolveTemplate(callingTemplate, calledTemplate, noWarningOnFail)
return targetTemplate.path.fullTemplatePath
except Exceptions.TemplatePathError as e:
raise e
#logQ.put( [logType , logLevel.DEBUG , method , "exitong"])
def resolveTemplateAbsolutely(self, calledTemplate):
method = moduleName + '.' + self.className + '.resolveTemplateAbsolutely'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
#logQ.put( [logType , logLevel.DEBUG , method , 'Resolving template path of %s from absolute reference' % (calledTemplate)])
resolvedTemplate = None
try:
resolvedTemplate = self.templates[calledTemplate]
except Exception:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
exception = "Resolved template path %s has no entry in the template repository. Nested Traceback = %s: %s" %(calledTemplate, errorID, errorMsg)
#logQ.put( [logType , logLevel.DEBUG , method , "self.templates == %s" % (self.templates)])
for path in self.templates:
if path == calledTemplate:
#logQ.put( [logType , logLevel.DEBUG , method , "----- %s == %s" % (calledTemplate, path)])
calledTemplate = path[1]
else:
#logQ.put( [logType , logLevel.DEBUG , method , "----- %s != %s" % (calledTemplate, path)])
pass
logQ.put( [logType , logLevel.WARNING , method , exception])
#logQ.put( [logType , logLevel.DEBUG , method , method + u' with errors!'])
raise Exceptions.TemplatePathError(exception).with_traceback(tb)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resolvedTemplate
class TemplatePath(object):
className = "TemplatePath"
def __init__(self, modulePath, templateName, extension = None):
#method = moduleName + '.' + self.className + '.__init__'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
#logQ.put( [logType , logLevel.DEBUG , method , 'Building paths for template %s at %s' % (templateName, modulePath)])
self.packagePath = None # Path of the package that the template resides in
self.modulePath = modulePath # The full path of the module
self.moduleName = None # The path of the module relative to it's parent package. I.E Module Name
self.templateName = templateName # The name of the template within it's module
self.fullTemplatePath = None # Full path of template
#self.relativeTemplatePath = None # path of template relative to ts package
self.extension = extension # Used when assigning a path to a data structure within a template, such as properties of metamemes
uModulePath = str(modulePath)
uTemplateName = str(templateName)
#We've been having problems with relative paths when the source template is in a deeper heirarchy than
# the target template. A workaround is to disable relative heirarchies and enforce a strategy of either
# giving the template path to another template in the same module, OR enforcing that the full template
# path is given.
splitTemplateName = templateName.rsplit('.')
if len(splitTemplateName) > 1:
self.fullTemplatePath = templateName
else:
#If the module path has a dot in it, then it is the child of a package
# If it has no dot, then it is a free module
#trailingDot = re.compile('$\.')
#trimmedModulePath = re.split(trailingDot, modulePath)
trimmedModulePath = modulePath.rsplit('.')
#logQ.put( [logType , logLevel.DEBUG , method , 'Template %s has trimmed module path %s' % (templateName, trimmedModulePath)])
try:
if len(trimmedModulePath) > 1:
n = 1
nMax = len(trimmedModulePath) - 1
self.packagePath = str(trimmedModulePath[0])
while n < nMax:
self.packagePath = self.packagePath + '.' +str(trimmedModulePath[n])
n = n+1
self.moduleName = str(trimmedModulePath[nMax])
else:
self.moduleName = modulePath
except:
self.moduleName = str(trimmedModulePath[0])
self.fullTemplatePath = uModulePath + '.' + uTemplateName
#self.relativeTemplatePath = self.moduleName + u'.' + templateName
#logQ.put( [logType , logLevel.DEBUG , method , 'Paths for template %s = %s' % (templateName, self.__dict__)])
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
class EnhancementIndex(object):
''' All of the template enhancement relationships'''
className = "EnhancementIndex"
def __init__(self):
self.enhancementLists = {}
def addEnhancement(self, enhancingPath, enhancedPath):
''' Add an enhancement to the list of enhancements in the catalog '''
method = moduleName + '.' + self.className + '.addEnhancement'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
try:
enhancementList = self.enhancementLists[enhancedPath]
enhancementList.append(enhancingPath)
newEnhancementList = filterListDuplicates(enhancementList)
self.enhancementLists[enhancedPath] = newEnhancementList
except:
# key error. No enhancement yet registered
enhancementList = [enhancingPath]
self.enhancementLists[enhancedPath] = enhancementList
logQ.put( [logType , logLevel.INFO , method , "Template %s is now enhanced by %s." %(enhancedPath, enhancingPath)])
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def removeEnhancement(self, enhancingPath, enhancedPath):
''' Remove an enhancement from the list of enhancements in the catalog '''
method = moduleName + '.' + self.className + '.removeEnhancement'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
try:
enhancementList = self.enhancementLists[enhancedPath]
found = False
newList = []
for enhancement in enhancementList:
if enhancement != enhancingPath:
newList.append(enhancement)
else:
found = True
self.enhancementLists[enhancedPath] = newList
if found == True:
logQ.put( [logType , logLevel.INFO , method , "Template %s has had its enhancement from %s severed." %(enhancedPath, enhancingPath)])
else:
logQ.put( [logType , logLevel.INFO , method , "Template %s is not enhanced by %s." %(enhancedPath, enhancingPath)])
except:
# key error. No enhancement yet registered. Nothing to do.
logQ.put( [logType , logLevel.INFO , method , "Template %s has no enhancements. Request to remove %s from its enhancement is moot" %(enhancedPath, enhancingPath)])
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def getEnhancements(self, enhancedPath):
''' Add an enhancement to the list of enhancements in the catalog '''
#method = moduleName + '.' + self.className + '.getEnhancements'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
returnList = []
try:
returnList = self.enhancementLists[enhancedPath]
except:
pass
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return returnList
#Globals
global templateRepository
global entityRepository
global linkRepository
templateRepository = TemplateRepository()
sourceTemplateRepository = TemplateRepository()
#entityRepository = EntityRepository()
#linkRepository = LinkRepository()
tempRepository = TemplateRepository() # remporary repo for bootstrapping
enhancementIndex = EnhancementIndex()
linkDirectionTypes = LinkDirectionType()
linkAttributeOperatorTypes = LinkAttributeOperatorType()
class TraverseParameter(object):
operator = None
parameter = None
value = None
def __init__(self, statement):
splitEQ = statement.rpartition("=")
splitGT = statement.rpartition("<")
splitLT = statement.rpartition(">")
splitNE = statement.rpartition("!=")
splitEL = statement.rpartition(">=")
splitEG = statement.rpartition("<=")
splitNI = statement.rpartition("><")
splitIN = statement.rpartition("<>")
if (len(splitNE[0]) > 0):
self.operator = linkAttributeOperatorTypes.NOTEQUAL
self.parameter = splitNE[0].strip()
self.value = splitNE[2].strip()
elif (len(splitEL[0]) > 0):
self.operator = linkAttributeOperatorTypes.EQUALORLESS
self.parameter = splitEL[0].strip()
self.value = splitEL[2].strip()
elif (len(splitEG[0]) > 0):
self.operator = linkAttributeOperatorTypes.EQUALORGREATER
self.parameter = splitEG[0].strip()
self.value = splitEG[2].strip()
elif (len(splitIN[0]) > 0):
self.operator = linkAttributeOperatorTypes.IN
self.parameter = splitIN[0].strip()
self.value = splitIN[2].strip()
elif (len(splitNI[0]) > 0):
self.operator = linkAttributeOperatorTypes.NOTIN
self.parameter = splitNI[0].strip()
self.value = splitNI[2].strip()
elif (len(splitIN[0]) > 0):
self.operator = linkAttributeOperatorTypes.IN
self.parameter = splitNI[0].strip()
self.value = splitNI[2].strip()
elif (len(splitGT[0]) > 0):
self.operator = linkAttributeOperatorTypes.GREATER
self.parameter = splitGT[0].strip()
self.value = splitGT[2].strip()
elif (len(splitLT[0]) > 0):
self.operator = linkAttributeOperatorTypes.LESS
self.parameter = splitLT[0].strip()
| |
import face_recognition
from PIL import Image, ImageDraw
import numpy as np
from sklearn.svm import LinearSVC
import glob
import os
import matplotlib.pyplot as plt
'''
Draw box on all face_locations of an image
'''
def draw_face_box(image, face_locations):
# Convert the image to a PIL-format image
pil_image = Image.fromarray(image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left) in face_locations:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
return draw, pil_image
'''
Get encodings and names of all images
'''
def get_encoding(path,jitters=10,det_model="hog"):
face_encodings = []
for i in [glob.glob(path+'*.%s' % ext) for ext in ["jpg","gif","png","tga","jpeg"]]:
for item in i:
image = face_recognition.load_image_file(item)
face_locations = face_recognition.face_locations(image, model=det_model)
face_encoding = face_recognition.face_encodings(image, face_locations, num_jitters=jitters)
if (face_encoding == [] or len(face_encoding) > 1):
print("image, face_encoding len: ", item, len(face_encoding))
draw, pil_image = draw_face_box(image, face_locations)
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
if face_encoding != []:
face_encodings.append(face_encoding[0])
face_encodings = np.array(face_encodings)
return face_encodings
# Compute or load the known data
def load_data(image_path):
if not (os.path.exists(image_path+'encodings.npy')):
i = 0
for k_image_path in glob.glob(image_path+'*'):
print("image: ", k_image_path)
if k_image_path == []:
continue
k_encodings = get_encoding(k_image_path+'/',jitters,det_model=detection_model)
if k_encodings != []:
head, tail = os.path.split(k_image_path)
if i==0:
X = k_encodings
Y = np.repeat(i, len(k_encodings))
names = [tail]
else:
X = np.vstack((X,k_encodings))
Y = np.hstack((Y,np.repeat(i, len(k_encodings))))
names = np.hstack((names,tail))
i+=1
print("X.shape, Y.shape: ", X.shape, Y.shape)
print("names: ", names)
np.save(image_path+'encodings.npy', X)
np.save(image_path+'classes.npy', Y)
np.save(image_path+'names.npy', names)
else:
X = np.load(image_path+'encodings.npy')
Y = np.load(image_path+'classes.npy')
names = np.load(image_path+'names.npy')
return X, Y, names
def relative_euclidean_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to another face encoding and get a relative euclidean distance for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings/np.sqrt(np.mean(face_encodings**2)) - face_to_compare/np.sqrt(np.mean(face_to_compare**2)), axis=1)
#return np.linalg.norm(face_encodings - face_to_compare, axis=1)
if __name__ == '__main__':
jitters = 10
tolerance = 0.5
threshold = 0
detection_model = "cnn"
image_path = "./data/"
neg_image_path = "./data/Negative/"
known_image_path = "./data/known_resized/"
test_image_path = "./data/test_resized/"
test_short_image_path = "./data/twin_test_resized/Side_By_Side/"
side_by_side = True
# load the sample and test data
X, Y, names = load_data(known_image_path)
X_t, Y_t, names_t = load_data(test_image_path)
# Names of twins and family members (incl. Unknown)
#family_names = ["Eman", "Eden", "Enoch", "Albert", "Sandy", "Unknown"]
all_family_names = ["Eden", "Eman", "Enoch", "Albert", "Sandy", "Hailey", "Ivy", "Ivan"]
twin_names = ["Eman", "Eden"]
family_sizes = np.arange(2,9)
twin_accuracy_family = np.zeros_like(family_sizes, dtype=float)
red_twin_accuracy_family = np.zeros_like(family_sizes, dtype=float)
for x in family_sizes:
family_names = all_family_names[:x]
# # of classes in the first stage
stage1_classes = np.arange(len(family_names)+1,len(names)+1)
print("stage classes: ", stage1_classes)
# One-stage Linear SVC accuracy
accuracy = np.zeros_like(stage1_classes, dtype=float)
twin_accuracy = np.zeros_like(stage1_classes, dtype=float)
non_twin_accuracy = np.zeros_like(stage1_classes, dtype=float)
# Two-stage Linear SVC with RED accuracy
red_accuracy = np.zeros_like(stage1_classes, dtype=float)
red_twin_accuracy = np.zeros_like(stage1_classes, dtype=float)
red_non_twin_accuracy = np.zeros_like(stage1_classes, dtype=float)
X_n = np.empty((0,X.shape[1]))
Y_n = np.empty((0))
names_n = []
# load data of the family
for member_name, i in zip(family_names, range(len(family_names))):
member_X = X[[index for index in range(X.shape[0]) if Y[index] == np.where( names==member_name )]]
X_n = np.vstack((X_n,member_X))
Y_n = np.hstack((Y_n, np.repeat(i,member_X.shape[0])))
names_n = np.hstack((names_n, member_name))
#print("X_n, Y_n, names_n: ", X_n.shape, Y_n.shape, names_n)
# calculate Relative Euclidean Distance of the samples
family_red_X = np.empty((0,X_n.shape[0]))
for encoding in X_n:
red = relative_euclidean_distance(X_n, encoding)
family_red_X = np.vstack((family_red_X,red))
family_Y = np.copy(Y_n)
#print("family_red_X, family_Y: ", family_red_X.shape, family_Y)
# Train Family RED SVC
family_clf = LinearSVC(tol=1e-6, max_iter=20000, loss='hinge', class_weight='balanced')
family_clf.fit(family_red_X, family_Y)
score = family_clf.score(family_red_X, family_Y)
print("Family RED SVC score: ", score)
# load negatives
neg_X = X[[index for index in range(X.shape[0]) if Y[index] == np.where( names=="Unknown" )]]
X_n = np.vstack((X_n,neg_X))
Y_n = np.hstack((Y_n, np.repeat(len(family_names),neg_X.shape[0])))
names_n = np.hstack((names_n, "Unknown"))
# load the rest of the classes
class_names = [x for x in names if x not in names_n]
for class_name, n in zip(class_names, range(len(class_names))):
class_X = X[[index for index in range(X.shape[0]) if Y[index] == np.where( names==class_name )]]
X_n = np.vstack((X_n,class_X))
Y_n = np.hstack((Y_n, np.repeat(len(family_names)+n+1,class_X.shape[0])))
names_n = np.hstack((names_n, class_name))
#print("X_n, Y_n, names_n: ", X_n.shape, Y_n, names_n)
# loop through to calculate accuracies with different # of classes
for classes, m in zip(stage1_classes, range(len(stage1_classes))):
# Train the first stage SVM
clf = LinearSVC(tol=1e-6, max_iter=20000, loss='hinge', class_weight='balanced')
Y_m = np.copy(Y_n)
if classes < len(names_n):
index = np.min(np.where(Y_n == classes))
Y_m[index:] = np.repeat(len(family_names), len(Y_n[index:]))
clf.fit(X_n, Y_m)
score = clf.score(X_n, Y_m)
print("Classes, m, SVC score: ", classes, m, score)
#print("X_n, Y_m, names_n: ", X_n.shape, Y_m, names_n[:classes])
# Calculate accuracy with regular Linear SVC
twin_total = 0
correct = 0
twin_correct = 0
red_correct = 0
red_twin_correct = 0
for face_encoding, i in zip(X_t,range(X_t.shape[0])):
scores = clf.decision_function(face_encoding.flatten().reshape(1, -1))
prediction = clf.predict(face_encoding.flatten().reshape(1, -1))
name_predict = names_n[int(prediction[0])]
name_actual = names_t[int(Y_t[i])]
# all test data with classes not in SVC is considered Unknown
if name_actual in class_names[m:]:
name_actual = "Unknown"
# this is test data of a twin
if name_actual in twin_names:
twin_total+=1
# Correct at the 1st stage
if name_actual == name_predict:
correct+=1
red_correct+=1
# Correctly identified twin in the 1st stage
if name_actual in twin_names:
twin_correct+=1
red_twin_correct+=1
# secondary assessment based on family RED
if name_predict in family_names:
# calculate the RED of the test encoding
family_end_index = np.min(np.where(Y_n == len(family_names)))
red = relative_euclidean_distance(X_n[:family_end_index], face_encoding)
red_scores = family_clf.decision_function(red.flatten().reshape(1,-1))
red_prediction = family_clf.predict(red.flatten().reshape(1, -1))
red_name_predict = names_n[int(red_prediction[0])]
if (name_actual == red_name_predict):
# Corrected a wrong prediction from the 1st stage
if red_name_predict != name_predict:
#print("corrected!")
red_correct+=1
if name_actual in twin_names:
red_twin_correct+=1
print("twin corrected: 1-stage, 2-stage, actual: ", name_predict, red_name_predict, name_actual)
else:
# Made a wrong choice on the 2nd stage that was correct
if (name_actual == name_predict):
#print("wrong!")
red_correct-=1
if name_actual in twin_names:
red_twin_correct-=1
accuracy[m] = correct / X_t.shape[0] * 100
twin_accuracy[m] = twin_correct / twin_total * 100
non_twin_accuracy[m] = (correct - twin_correct) / (X_t.shape[0] - twin_total) * 100
#print("acc, twin_acc, non_twin_acc: ", accuracy[m], twin_accuracy[m], non_twin_accuracy[m])
red_accuracy[m] = red_correct / X_t.shape[0] * 100
red_twin_accuracy[m] = red_twin_correct / twin_total * 100
red_non_twin_accuracy[m] = (red_correct - red_twin_correct) / (X_t.shape[0] - twin_total) * 100
#print("red acc, twin_acc, non_twin_acc: ", red_accuracy[m], red_twin_accuracy[m], red_non_twin_accuracy[m])
plt.plot(stage1_classes, accuracy, 'r--', label='Overall Accuracy (1-stage)')
plt.plot(stage1_classes, twin_accuracy, 'r*-', label='Twin Accuracy (1-stage)')
plt.plot(stage1_classes, non_twin_accuracy, 'rs-', label='Non-Twin Accuracy (1-stage)')
plt.plot(stage1_classes, red_accuracy, 'b--', label='Overall Accuracy (2-stage RED)')
plt.plot(stage1_classes, red_twin_accuracy, 'b*-', label='Twin Accuracy (2-stage RED)')
plt.plot(stage1_classes, red_non_twin_accuracy, 'bs-', label='Non-Twin Accuracy (2-stage RED)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('# of classes in Stage 1 SVC')
plt.ylabel('Accuracy (%)')
plt.title('1-Stage Linear SVC vs. 2-Stage Family of %s RED SVC' % len(family_names))
plt.grid(True)
plt.savefig("test%s.png" % len(family_names))
plt.show()
twin_accuracy_family[x-2] = twin_accuracy[-1]
red_twin_accuracy_family[x-2] = red_twin_accuracy[-1]
# plot aginst family sizes
plt.plot(family_sizes, twin_accuracy_family, 'r--', label='Twin Accuracy (1-stage)')
plt.plot(family_sizes, red_twin_accuracy_family, 'b--', label='Twin Accuracy (2-stage RED)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('# of Family Members')
plt.ylabel('Twin Accuracy (%)')
plt.title('1-Stage Linear SVC vs. 2-Stage Family RED SVC')
plt.grid(True)
plt.savefig("test2.png")
plt.show()
if side_by_side:
# test all images in test file
image_files = [os.path.join(test_short_image_path, f) for f in os.listdir(test_short_image_path) if (f.endswith('.jpg') or f.endswith('.png'))]
for u_image_file in image_files:
print("Image file: ", u_image_file)
# Load an image with an unknown face
u_image = face_recognition.load_image_file(u_image_file)
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(u_image, model=detection_model)
face_encodings = face_recognition.face_encodings(u_image, face_locations)
# Convert the image to a PIL-format image
pil_image = Image.fromarray(u_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding, i in zip(face_locations, face_encodings, range(len(face_encodings))):
# See if the face is a match for the known face(s)
scores = clf.decision_function(face_encoding.flatten().reshape(1, -1))
prediction = clf.predict(face_encoding.flatten().reshape(1, -1))
name_predict = red_name_predict = names_n[int(prediction[0])]
# secondary | |
import theano
import theano.tensor as T
import lasagne as nn
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
_srng = RandomStreams()
class TiedDropoutLayer(nn.layers.Layer):
"""
Dropout layer that broadcasts the mask across all axes beyond the first two.
"""
def __init__(self, input_layer, p=0.5, rescale=True, **kwargs):
super(TiedDropoutLayer, self).__init__(input_layer, **kwargs)
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, *args, **kwargs):
if deterministic or self.p == 0:
return input
else:
retain_prob = 1 - self.p
if self.rescale:
input /= retain_prob
mask = _srng.binomial(input.shape[:2], p=retain_prob,
dtype=theano.config.floatX)
axes = [0, 1] + (['x'] * (input.ndim - 2))
mask = mask.dimshuffle(*axes)
return input * mask
class BatchNormLayer(nn.layers.Layer):
"""
lasagne.layers.BatchNormLayer(incoming, axes='auto', epsilon=1e-4,
alpha=0.5, nonlinearity=None, mode='low_mem',
beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), var=lasagne.init.Constant(1), **kwargs)
Batch Normalization
This layer implements batch normalization of its inputs, following [1]_:
.. math::
y = \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\gamma + \\beta
That is, the input is normalized to zero mean and unit variance, and then
linearly transformed.
During training, :math:`\\mu` and :math:`\\sigma^2` are defined to be the
mean and variance of the current input mini-batch :math:`x`, and during
testing, they are replaced with average statistics over the training
data. Consequently, this layer has four stored parameters: :math:`\\beta`,
:math:`\\gamma`, and the averages :math:`\\mu` and :math:`\\sigma^2`.
By default, this layer learns the average statistics as exponential moving
averages computed during training, so it can be plugged into an existing
network without any changes of the training procedure (see Notes).
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
axes : 'auto', int or tuple of int
The axis or axes to normalize over. If ``'auto'`` (the default),
normalize over all axes except for the second: this will normalize over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
epsilon : scalar
Small constant :math:`\\epsilon` added to the variance before taking
the square root and dividing by it, to avoid numeric problems
alpha : scalar
Coefficient for the exponential moving average of batch-wise means and
standard deviations computed during training; the closer to one, the
more it will depend on the last batches seen
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If ``None``
is provided, the layer will be linear (this is the default).
mode : {'low_mem', 'high_mem'}
Specify which batch normalization implementation to use: ``'low_mem'``
avoids storing intermediate representations and thus requires less
memory, while ``'high_mem'`` can reuse representations for the backward
pass and is thus 5-10% faster.
beta : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\beta`. Must match
the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
gamma : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\gamma`. Must
match the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
mean : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\mu`. Must match
the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
var : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\sigma^2`. Must
match the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
This layer should be inserted between a linear transformation (such as a
:class:`DenseLayer`, or :class:`Conv2DLayer`) and its nonlinearity. The
convenience function :func:`batch_norm` modifies an existing layer to
insert batch normalization in front of its nonlinearity.
The behavior can be controlled by passing keyword arguments to
:func:`lasagne.layers.get_output()` when building the output expression
of any network containing this layer.
During training, [1]_ normalize each input mini-batch by its statistics
and update an exponential moving average of the statistics to be used for
validation. This can be achieved by passing ``deterministic=False``.
For validation, [1]_ normalize each input mini-batch by the stored
statistics. This can be achieved by passing ``deterministic=True``.
For more fine-grained control, ``batch_norm_update_averages`` can be passed
to update the exponential moving averages (``True``) or not (``False``),
and ``batch_norm_use_averages`` can be passed to use the exponential moving
averages for normalization (``True``) or normalize each mini-batch by its
own statistics (``False``). These settings override ``deterministic``.
Note that for testing a model after training, [1]_ replace the stored
exponential moving average statistics by fixing all network weights and
re-computing average statistics over the training data in a layerwise
fashion. This is not part of the layer implementation.
See also
--------
batch_norm : Convenience function to apply batch normalization to a layer
References
----------
.. [1]: <NAME> and <NAME> (2015):
Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift. http://arxiv.org/abs/1502.03167.
"""
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
nonlinearity=None, mode='low_mem',
beta=nn.init.Constant(0), gamma=nn.init.Constant(1),
mean=nn.init.Constant(0), var=nn.init.Constant(1), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
if nonlinearity is None:
nonlinearity = nn.nonlinearities.identity
self.nonlinearity = nonlinearity
self.mode = mode
# create parameters, ignoring all dimensions in axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.axes]
if any(size is None for size in shape):
raise ValueError("BatchNormLayer needs specified input sizes for "
"all axes not normalized over.")
self.beta = self.add_param(beta, shape, 'beta',
trainable=True, regularizable=False)
self.gamma = self.add_param(gamma, shape, 'gamma',
trainable=True, regularizable=True)
self.mean = self.add_param(mean, shape, 'mean',
trainable=False, regularizable=False)
self.var = self.add_param(var, shape, 'var',
trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_var = input.var(self.axes)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = kwargs.get('batch_norm_use_averages',
deterministic)
if use_averages:
mean = self.mean
var = self.var
else:
mean = input_mean
var = input_var
# Decide whether to update the stored averages
update_averages = kwargs.get('batch_norm_update_averages',
not deterministic)
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_var = theano.clone(self.var, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_var.default_update = ((1 - self.alpha) * running_var +
self.alpha * input_var)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
var += 0 * running_var
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(range(self.beta.ndim))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = self.beta.dimshuffle(pattern)
gamma = self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
std = T.sqrt(var + self.epsilon)
std = std.dimshuffle(pattern)
# normalize
# normalized = (input - mean) * (gamma / std) + beta
normalized = T.nnet.batch_normalization(input, gamma=gamma, beta=beta,
mean=mean, std=std,
mode=self.mode)
return self.nonlinearity(normalized)
def batch_norm(layer, **kwargs):
"""
Apply batch normalization to an existing layer. This is a convenience
function modifying an existing layer to include batch normalization: It
will steal the layer's nonlinearity if there is one (effectively
introducing the normalization right before the nonlinearity), remove
the layer's bias if there is one (because it would be redundant), and add
a :class:`BatchNormLayer` on top.
Parameters
----------
layer : A :class:`Layer` instance
The layer to apply the normalization to; note that it will be
irreversibly modified as specified above
**kwargs
Any additional keyword arguments are passed on to the
:class:`BatchNormLayer` constructor. Especially note the `mode`
argument, which controls a memory usage to performance tradeoff.
Returns
-------
:class:`BatchNormLayer` instance
A batch normalization layer stacked on the given modified `layer`.
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nn.nonlinearities.identity
if hasattr(layer, 'b'):
del layer.params[layer.b]
layer.b = | |
<filename>src/huggingface_hub/repository.py
import logging
import os
import re
import subprocess
from contextlib import contextmanager
from pathlib import Path
from typing import List, Optional, Union
from huggingface_hub.constants import REPO_TYPES_URL_PREFIXES
from .hf_api import ENDPOINT, HfApi, HfFolder, repo_type_and_id_from_hf_id
from .lfs import LFS_MULTIPART_UPLOAD_COMMAND
logger = logging.getLogger(__name__)
def is_git_repo(folder: Union[str, Path]):
"""
Check if the folder is the root of a git repository
"""
folder_exists = os.path.exists(os.path.join(folder, ".git"))
git_branch = subprocess.run(
"git branch".split(), cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return folder_exists and git_branch.returncode == 0
def is_local_clone(folder: Union[str, Path], remote_url: str):
"""
Check if the folder is the a local clone of the remote_url
"""
if not is_git_repo(folder):
return False
remotes = subprocess.run(
"git remote -v".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=folder,
).stdout
# Remove token for the test with remotes.
remote_url = re.sub(r"https://.*@", "https://", remote_url)
remotes = [re.sub(r"https://.*@", "https://", remote) for remote in remotes.split()]
return remote_url in remotes
class Repository:
"""
Helper class to wrap the git and git-lfs commands.
The aim is to facilitate interacting with huggingface.co hosted model or dataset repos,
though not a lot here (if any) is actually specific to huggingface.co.
"""
def __init__(
self,
local_dir: str,
clone_from: Optional[str] = None,
repo_type: Optional[str] = None,
use_auth_token: Union[bool, str, None] = None,
git_user: Optional[str] = None,
git_email: Optional[str] = None,
):
"""
Instantiate a local clone of a git repo.
If specifying a `clone_from`:
will clone an existing remote repository, for instance one
that was previously created using ``HfApi().create_repo(token=huggingface_token, name=repo_name)``.
``Repository`` uses the local git credentials by default, but if required, the ``huggingface_token``
as well as the git ``user`` and the ``email`` can be explicitly specified.
If `clone_from` is used, and the repository is being instantiated into a non-empty directory,
e.g. a directory with your trained model files, it will automatically merge them.
Args:
local_dir (``str``):
path (e.g. ``'my_trained_model/'``) to the local directory, where the ``Repository`` will be initalized.
clone_from (``str``, `optional`):
repository url (e.g. ``'https://huggingface.co/philschmid/playground-tests'``).
repo_type (``str``, `optional`):
To set when creating a repo: et to "dataset" or "space" if creating a dataset or space, default is model.
use_auth_token (``str`` or ``bool``, `optional`, defaults ``None``):
huggingface_token can be extract from ``HfApi().login(username, password)`` and is used to authenticate against the hub
(useful from Google Colab for instance).
git_user (``str``, `optional`, defaults ``None``):
will override the ``git config user.name`` for committing and pushing files to the hub.
git_email (``str``, `optional`, defaults ``None``):
will override the ``git config user.email`` for committing and pushing files to the hub.
"""
os.makedirs(local_dir, exist_ok=True)
self.local_dir = os.path.join(os.getcwd(), local_dir)
self.repo_type = repo_type
self.check_git_versions()
if isinstance(use_auth_token, str):
self.huggingface_token = use_auth_token
elif use_auth_token:
self.huggingface_token = HfFolder.get_token()
else:
self.huggingface_token = None
if clone_from is not None:
self.clone_from(repo_url=clone_from)
else:
if is_git_repo(self.local_dir):
logger.debug("[Repository] is a valid git repo")
else:
logger.error(
"If not specifying `clone_from`, you need to pass Repository a valid git clone."
)
raise ValueError(
"If not specifying `clone_from`, you need to pass Repository a valid git clone."
)
# overrides .git config if user and email is provided.
if git_user is not None or git_email is not None:
self.git_config_username_and_email(git_user, git_email)
def check_git_versions(self):
"""
print git and git-lfs versions, raises if they aren't installed.
"""
try:
git_version = subprocess.run(
["git", "--version"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
).stdout.strip()
except FileNotFoundError:
raise EnvironmentError(
"Looks like you do not have git installed, please install."
)
try:
lfs_version = subprocess.run(
["git-lfs", "--version"],
encoding="utf-8",
check=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
).stdout.strip()
except FileNotFoundError:
raise EnvironmentError(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
logger.info(git_version + "\n" + lfs_version)
def clone_from(self, repo_url: str, use_auth_token: Union[bool, str, None] = None):
"""
Clone from a remote. If the folder already exists, will try to clone the repository within it.
If this folder is a git repository with linked history, will try to update the repository.
"""
token = use_auth_token if use_auth_token is not None else self.huggingface_token
api = HfApi()
if token is not None:
user, valid_organisations = api.whoami(token)
repo_type, namespace, repo_id = repo_type_and_id_from_hf_id(repo_url)
if namespace is None:
namespace = user
if repo_type is not None:
self.repo_type = repo_type
repo_url = ENDPOINT + "/"
if self.repo_type in REPO_TYPES_URL_PREFIXES:
repo_url += REPO_TYPES_URL_PREFIXES[self.repo_type]
repo_url += f"{namespace}/{repo_id}"
repo_url = repo_url.replace("https://", f"https://user:{token}@")
if namespace == user or namespace in valid_organisations:
api.create_repo(
token,
repo_id,
repo_type=self.repo_type,
organization=namespace,
exist_ok=True,
)
# For error messages, it's cleaner to show the repo url without the token.
clean_repo_url = re.sub(r"https://.*@", "https://", repo_url)
try:
subprocess.run(
"git lfs install".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
)
# checks if repository is initialized in a empty repository or in one with files
if len(os.listdir(self.local_dir)) == 0:
logger.debug(f"Cloning {clean_repo_url} into local empty directory.")
subprocess.run(
["git", "clone", repo_url, "."],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
else:
# Check if the folder is the root of a git repository
in_repository = is_git_repo(self.local_dir)
if in_repository:
if is_local_clone(self.local_dir, repo_url):
logger.debug(
f"{self.local_dir} is already a clone of {clean_repo_url}. Make sure you pull the latest"
"changes with `repo.git_pull()`."
)
else:
output = subprocess.run(
"git remote get-url origin".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
cwd=self.local_dir,
)
error_msg = (
f"Tried to clone {clean_repo_url} in an unrelated git repository.\nIf you believe this is "
f"an error, please add a remote with the following URL: {clean_repo_url}."
)
if output.returncode == 0:
clean_local_remote_url = re.sub(
r"https://.*@", "https://", output.stdout
)
error_msg += f"\nLocal path has its origin defined as: {clean_local_remote_url}"
raise EnvironmentError(error_msg)
if not in_repository:
raise EnvironmentError(
"Tried to clone a repository in a non-empty folder that isn't a git repository. If you really "
"want to do this, do it manually:\m"
"git init && git remote add origin && git pull origin main\n"
" or clone repo to a new folder and move your existing files there afterwards."
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_config_username_and_email(
self, git_user: Optional[str] = None, git_email: Optional[str] = None
):
"""
sets git user name and email (only in the current repo)
"""
try:
if git_user is not None:
subprocess.run(
["git", "config", "user.name", git_user],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
if git_email is not None:
subprocess.run(
["git", "config", "user.email", git_email],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_hash(self) -> str:
"""
Get commit sha on top of HEAD.
"""
try:
p = subprocess.run(
"git rev-parse HEAD".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=self.local_dir,
)
return p.stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_remote_url(self) -> str:
"""
Get URL to origin remote.
"""
try:
p = subprocess.run(
"git config --get remote.origin.url".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=self.local_dir,
)
url = p.stdout.strip()
# Strip basic auth info.
return re.sub(r"https://.*@", "https://", url)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_commit_url(self) -> str:
"""
Get URL to last commit on HEAD
We assume it's been pushed, and the url scheme is
the same one as for GitHub or HuggingFace.
"""
sha = self.git_head_hash()
url = self.git_remote_url()
if url.endswith("/"):
url = url[:-1]
return f"{url}/commit/{sha}"
def lfs_track(self, patterns: Union[str, List[str]]):
"""
Tell git-lfs to track those files.
"""
if isinstance(patterns, str):
patterns = [patterns]
try:
for pattern in patterns:
subprocess.run(
["git", "lfs", "track", pattern],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def lfs_enable_largefiles(self):
"""
HF-specific. This enables upload support of files >5GB.
"""
try:
subprocess.run(
"git config lfs.customtransfer.multipart.path huggingface-cli".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
subprocess.run(
f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_pull(self, rebase: Optional[bool] = False):
"""
git pull
"""
args = "git pull".split()
if rebase:
args.append("--rebase")
try:
subprocess.run(
args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_add(self, pattern="."):
"""
git add
"""
try:
subprocess.run(
["git", "add", pattern],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_commit(self, commit_message="commit files to HF hub"):
"""
git commit
"""
try:
subprocess.run(
["git", "commit", "-m", commit_message],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=self.local_dir,
)
except subprocess.CalledProcessError as exc:
if len(exc.stderr) > 0:
raise | |
# file eulfedora/api.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import csv
import logging
import requests
import time
import warnings
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor, \
user_agent
import six
from six.moves.urllib.parse import urljoin
from eulfedora import __version__ as eulfedora_version
from eulfedora.util import datetime_to_fedoratime, \
RequestFailed, ChecksumMismatch, PermissionDenied, parse_rdf, \
ReadableIterator, force_bytes
logger = logging.getLogger(__name__)
# low-level wrappers
def _safe_str(s):
# helper for _safe_urlencode: utf-8 encode unicode strings, convert
# non-strings to strings, and leave plain strings untouched.
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return str(s)
def _get_items(query, doseq):
# helper for _safe_urlencode: emulate urllib.urlencode "doseq" logic
if hasattr(query, 'items'):
query = query.items()
for k, v in query:
if isinstance(v, basestring):
yield k, v
elif doseq and iter(v): # if it's iterable
for e in v:
yield k, e
else:
yield k, str(v)
_sessions = {}
class HTTP_API_Base(object):
def __init__(self, base_url, username=None, password=<PASSWORD>):
# standardize url format; ensure we have a trailing slash,
# adding one if necessary
if not base_url.endswith('/'):
base_url = base_url + '/'
# TODO: can we re-use sessions safely across instances?
global _sessions
# check for an existing session for this fedora
if base_url in _sessions:
self.session = _sessions[base_url]
else:
# create a new session and add to global sessions
self.session = requests.Session()
# Set headers to be passed with every request
# NOTE: only headers that will be common for *all* requests
# to this fedora should be set in the session
# (i.e., do NOT include auth information here)
self.session.headers = {
'User-Agent': user_agent('eulfedora', eulfedora_version),
# 'user-agent': 'eulfedora/%s (python-requests/%s)' % \
# (eulfedora_version, requests.__version__),
'verify': True, # verify SSL certs by default
}
_sessions[base_url] = self.session
self.base_url = base_url
self.username = username
self.password = password
self.request_options = {}
if self.username is not None:
# store basic auth option to pass when making requests
self.request_options['auth'] = (self.username, self.password)
def absurl(self, rel_url):
return urljoin(self.base_url, rel_url)
def prep_url(self, url):
return self.absurl(url)
# thinnest possible wrappers around requests calls
# - add auth, make urls absolute
def _make_request(self, reqmeth, url, *args, **kwargs):
# copy base request options and update with any keyword args
rqst_options = self.request_options.copy()
rqst_options.update(kwargs)
start = time.time()
response = reqmeth(self.prep_url(url), *args, **rqst_options)
logger.debug('%s %s=>%d: %f sec' % (reqmeth.__name__.upper(), url,
response.status_code, time.time() - start))
# NOTE: currently doesn't do anything with 3xx responses
# (likely handled for us by requests)
if response.status_code >= requests.codes.bad: # 400 or worse
# separate out 401 and 403 (permission errors) to enable
# special handling in client code.
if response.status_code in (requests.codes.unauthorized,
requests.codes.forbidden):
raise PermissionDenied(response)
elif response.status_code == requests.codes.server_error:
# check response content to determine if this is a
# ChecksumMismatch or a more generic error
if 'Checksum Mismatch' in response.text:
raise ChecksumMismatch(response)
else:
raise RequestFailed(response)
else:
raise RequestFailed(response)
return response
def get(self, *args, **kwargs):
return self._make_request(self.session.get, *args, **kwargs)
def head(self, *args, **kwargs):
return self._make_request(self.session.head, *args, **kwargs)
def put(self, *args, **kwargs):
return self._make_request(self.session.put, *args, **kwargs)
def post(self, *args, **kwargs):
return self._make_request(self.session.post, *args, **kwargs)
def delete(self, *args, **kwargs):
return self._make_request(self.session.delete, *args, **kwargs)
# also available: head, patch
class REST_API(HTTP_API_Base):
"""Python object for accessing
`Fedora's REST API <https://wiki.duraspace.org/display/FEDORA38/REST+API>`_.
Most methods return an HTTP :class:`requests.models.Response`, which
provides access to status code and headers as well as content. Many
responses with XML content can be loaded using models in
:mod:`eulfedora.xml`.
"""
# always return xml response instead of html version
format_xml = {'format': 'xml'}
### API-A methods (access) ####
# describeRepository not implemented in REST, use API-A-LITE version
def findObjects(self, query=None, terms=None, pid=True, chunksize=None, session_token=None):
"""
Wrapper function for `Fedora REST API findObjects <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-findObjects>`_
and `Fedora REST API resumeFindObjects <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-resumeFindObjects>`_
One and only one of query or terms must be specified.
:param query: string of fields and terms to search for
:param terms: phrase search across all fields
:param pid: include pid in search results
:param chunksize: number of objects to return at a time
:param session_token: get an additional chunk of results from a prior search
:param parse: optional data parser function; defaults to returning
raw string data
:rtype: :class:`requests.models.Response`
"""
if query is not None and terms is not None:
raise Exception("Cannot findObject with both query ('%s') and terms ('%s')" % (query, terms))
http_args = {'resultFormat': 'xml'}
if query is not None:
http_args['query'] = query
if terms is not None:
http_args['terms'] = terms
if pid:
http_args['pid'] = 'true'
if session_token:
http_args['sessionToken'] = session_token
if chunksize:
http_args['maxResults'] = chunksize
return self.get('objects', params=http_args)
def getDatastreamDissemination(self, pid, dsID, asOfDateTime=None, stream=False,
head=False, rqst_headers={}):
"""Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID}/content ? [asOfDateTime] [download]
http_args = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
url = 'objects/%(pid)s/datastreams/%(dsid)s/content' % \
{'pid': pid, 'dsid': dsID}
if head:
reqmethod = self.head
else:
reqmethod = self.get
return reqmethod(url, params=http_args, stream=stream, headers=rqst_headers)
# NOTE:
def getDissemination(self, pid, sdefPid, method, method_params={}):
'''Get a service dissemination.
.. NOTE:
This method not available in REST API until Fedora 3.3
:param pid: object pid
:param sDefPid: service definition pid
:param method: service method name
:param method_params: method parameters
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods/{sdefPid}/{method} ? [method parameters]
uri = 'objects/%(pid)s/methods/%(sdefpid)s/%(method)s' % \
{'pid': pid, 'sdefpid': sdefPid, 'method': method}
return self.get(uri, params=method_params)
def getObjectHistory(self, pid):
'''Get the history for an object in XML format.
:param pid: object pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/versions ? [format]
return self.get('objects/%(pid)s/versions' % {'pid': pid},
params=self.format_xml)
def getObjectProfile(self, pid, asOfDateTime=None):
"""Get top-level information aboug a single Fedora object; optionally,
retrieve information as of a particular date-time.
:param pid: object pid
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid} ? [format] [asOfDateTime]
http_args = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
http_args.update(self.format_xml)
url = 'objects/%(pid)s' % {'pid': pid}
return self.get(url, params=http_args)
def listDatastreams(self, pid):
"""
Get a list of all datastreams for a specified object.
Wrapper function for `Fedora REST API listDatastreams <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-listDatastreams>`_
:param pid: string object pid
:param parse: optional data parser function; defaults to returning
raw string data
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams ? [format, datetime]
return self.get('objects/%(pid)s/datastreams' % {'pid': pid},
params=self.format_xml)
def listMethods(self, pid, sdefpid=None):
'''List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods ? [format, datetime]
# /objects/{pid}/methods/{sdefpid} ? [format, datetime]
## NOTE: getting an error when sdefpid is specified; fedora issue?
uri = 'objects/%(pid)s/methods' % {'pid': pid}
if sdefpid:
uri += '/' + sdefpid
return self.get(uri, params=self.format_xml)
### API-M methods (management) ####
def addDatastream(self, pid, dsID, dsLabel=None, mimeType=None, logMessage=None,
controlGroup=None, dsLocation=None, altIDs=None, versionable=None,
dsState=None, formatURI=None, checksumType=None, checksum=None, content=None):
'''Add a new datastream to an existing object. On success,
the return response should have a status of 201 Created;
if there is an error, the response body includes the error message.
:param pid: object pid
:param dsID: id for the new datastream
:param dslabel: label for the new datastream (optional)
:param mimeType: mimetype for the new datastream (optional)
:param logMessage: log message for the object history (optional)
:param controlGroup: control | |
<gh_stars>10-100
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
XML directory service tests.
"""
from time import sleep
from uuid import UUID
from textwrap import dedent
from twisted.trial import unittest
from twisted.python.constants import Names, NamedConstant
from twisted.python.filepath import FilePath
from twisted.internet.defer import inlineCallbacks
from ..idirectory import DirectoryAvailabilityError, NoSuchRecordError
from ..expression import (
CompoundExpression, Operand,
MatchExpression, MatchType, MatchFlags
)
from ..xml import ParseError, DirectoryService, DirectoryRecord
from . import test_index
class BaseTest(object):
def service(self, subClass=None, xmlData=None):
return xmlService(
self.mktemp(),
xmlData=xmlData,
serviceClass=subClass
)
def assertRecords(self, records, uids):
self.assertEquals(
frozenset((record.uid for record in records)),
frozenset((uids)),
)
class DirectoryServiceConvenienceTestMixIn(object):
@inlineCallbacks
def test_recordWithUID(self):
service = self.service()
record = (yield service.recordWithUID(u"__null__"))
self.assertEquals(record, None)
record = (yield service.recordWithUID(u"__wsanchez__"))
self.assertEquals(record.uid, u"__wsanchez__")
@inlineCallbacks
def test_recordWithGUID(self):
service = self.service()
record = (
yield service.recordWithGUID(
UUID("6C495FCD-7E78-4D5C-AA66-BC890AD04C9D")
)
)
self.assertEquals(record, None)
record = (
yield service.recordWithGUID(
UUID("A3B1158F-0564-4F5B-81E4-A89EA5FF81B0")
)
)
self.assertEquals(record.uid, u"__dre__")
@inlineCallbacks
def test_recordsWithRecordType_unknown(self):
service = self.service()
records = (
yield service.recordsWithRecordType(UnknownConstant.unknown)
)
self.assertEquals(set(records), set())
@inlineCallbacks
def test_recordsWithRecordType(self):
service = self.service()
records = (
yield service.recordsWithRecordType(service.recordType.user)
)
self.assertRecords(
records,
(
u"__wsanchez__",
u"__glyph__",
u"__sagen__",
u"__cdaboo__",
u"__dre__",
u"__exarkun__",
u"__dreid__",
u"__alyssa__",
u"__joe__",
),
)
records = (
yield service.recordsWithRecordType(service.recordType.group)
)
self.assertRecords(
records,
(
u"__calendar-dev__",
u"__twisted__",
u"__developers__",
),
)
@inlineCallbacks
def test_recordWithShortName(self):
service = self.service()
record = (
yield service.recordWithShortName(
service.recordType.user,
u"null",
)
)
self.assertEquals(record, None)
record = (
yield service.recordWithShortName(
service.recordType.user,
u"wsanchez",
)
)
self.assertEquals(record.uid, u"__wsanchez__")
record = (
yield service.recordWithShortName(
service.recordType.user,
u"wilfredo_sanchez",
)
)
self.assertEquals(record.uid, u"__wsanchez__")
@inlineCallbacks
def test_recordsWithEmailAddress(self):
service = self.service()
records = (
yield service.recordsWithEmailAddress(
u"<EMAIL>"
)
)
self.assertRecords(records, (u"__wsanchez__",))
records = (
yield service.recordsWithEmailAddress(
u"<EMAIL>"
)
)
self.assertRecords(records, (u"__wsanchez__",))
records = (
yield service.recordsWithEmailAddress(
u"<EMAIL>"
)
)
self.assertRecords(records, (u"__sagen__", u"__dre__"))
@inlineCallbacks
def test_limitResults(self):
"""
Make sure limitResults does limit results.
"""
service = self.service()
records = (
yield service.recordsWithRecordType(
service.recordType.user,
limitResults=3
)
)
self.assertEquals(len(records), 3)
records = (
yield service.recordsWithRecordType(
service.recordType.user,
limitResults=1000
)
)
self.assertEquals(len(records), 9)
class DirectoryServiceRealmTestMixIn(object):
def test_realmNameImmutable(self):
def setRealmName():
service = self.service()
service.realmName = u"foo"
self.assertRaises(AttributeError, setRealmName)
class DirectoryServiceQueryTestMixIn(object):
@inlineCallbacks
def test_queryAnd(self):
service = self.service()
records = yield service.recordsFromExpression(
CompoundExpression(
(
service.query(u"emailAddresses", u"<EMAIL>"),
service.query(u"shortNames", u"sagen"),
),
operand=Operand.AND
)
)
self.assertRecords(records, (u"__sagen__",))
@inlineCallbacks
def test_queryAndNoneFirst(self):
"""
Test optimized case, where first expression yields no results.
"""
service = self.service()
records = yield service.recordsFromExpression(
CompoundExpression(
(
service.query(u"emailAddresses", u"<EMAIL>"),
service.query(u"shortNames", u"sagen"),
),
operand=Operand.AND
)
)
self.assertRecords(records, ())
@inlineCallbacks
def test_queryOr(self):
service = self.service()
records = yield service.recordsFromExpression(
CompoundExpression(
(
service.query(u"emailAddresses", u"<EMAIL>"),
service.query(u"shortNames", u"wsanchez"),
),
operand=Operand.OR
)
)
self.assertRecords(
records,
(u"__sagen__", u"__dre__", u"__wsanchez__")
)
@inlineCallbacks
def test_queryNot(self):
service = self.service()
records = yield service.recordsFromExpression(
CompoundExpression(
(
service.query(
u"emailAddresses", u"<EMAIL>"
),
service.query(
u"shortNames", u"sagen",
flags=MatchFlags.NOT
),
),
operand=Operand.AND
)
)
self.assertRecords(records, (u"__dre__",))
@inlineCallbacks
def test_queryNotNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
CompoundExpression(
(
service.query(u"emailAddresses", u"<EMAIL>"),
service.query(
u"fullNames", u"<NAME>",
flags=MatchFlags.NOT
),
),
operand=Operand.AND
)
)
self.assertRecords(records, (u"__sagen__",))
@inlineCallbacks
def test_queryCaseInsensitive(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"SagEn",
flags=MatchFlags.caseInsensitive
)
)
self.assertRecords(records, (u"__sagen__",))
@inlineCallbacks
def test_queryCaseInsensitiveNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"<NAME>",
flags=MatchFlags.caseInsensitive
)
)
self.assertRecords(records, (u"__sagen__",))
@inlineCallbacks
def test_queryStartsWith(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"wil",
matchType=MatchType.startsWith
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryStartsWithNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"Wilfredo",
matchType=MatchType.startsWith
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryStartsWithNot(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"w",
matchType=MatchType.startsWith,
flags=MatchFlags.NOT,
)
)
self.assertRecords(
records,
(
u"__alyssa__",
u"__calendar-dev__",
u"__cdaboo__",
u"__developers__",
u"__dre__",
u"__dreid__",
u"__exarkun__",
u"__glyph__",
u"__joe__",
u"__sagen__",
u"__twisted__",
),
)
@inlineCallbacks
def test_queryStartsWithNotAny(self):
"""
FIXME?: In the this case, the record __wsanchez__ has two
shortNames, and one doesn't match the query. Should it be
included or not? It is, because one matches the query, but
should NOT require that all match?
"""
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"wil",
matchType=MatchType.startsWith,
flags=MatchFlags.NOT,
)
)
self.assertRecords(
records,
(
u"__alyssa__",
u"__calendar-dev__",
u"__cdaboo__",
u"__developers__",
u"__dre__",
u"__dreid__",
u"__exarkun__",
u"__glyph__",
u"__joe__",
u"__sagen__",
u"__twisted__",
u"__wsanchez__",
),
)
@inlineCallbacks
def test_queryStartsWithNotNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"Andre",
matchType=MatchType.startsWith,
flags=MatchFlags.NOT,
)
)
self.assertRecords(
records,
(
u"__alyssa__",
u"__calendar-dev__",
u"__cdaboo__",
u"__developers__",
u"__dreid__",
u"__exarkun__",
u"__glyph__",
u"__joe__",
u"__sagen__",
u"__twisted__",
u"__wsanchez__",
),
)
@inlineCallbacks
def test_queryStartsWithCaseInsensitive(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"WIL",
matchType=MatchType.startsWith,
flags=MatchFlags.caseInsensitive,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryStartsWithCaseInsensitiveNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"wilfrEdo",
matchType=MatchType.startsWith,
flags=MatchFlags.caseInsensitive,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryContains(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"sanchez",
matchType=MatchType.contains,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryContainsNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"fred",
matchType=MatchType.contains,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryContainsNot(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"sanchez",
matchType=MatchType.contains,
flags=MatchFlags.NOT,
)
)
self.assertRecords(
records,
(
u"__alyssa__",
u"__calendar-dev__",
u"__cdaboo__",
u"__developers__",
u"__dre__",
u"__dreid__",
u"__exarkun__",
u"__glyph__",
u"__joe__",
u"__sagen__",
u"__twisted__",
),
)
@inlineCallbacks
def test_queryContainsNotNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"ranch",
matchType=MatchType.contains,
flags=MatchFlags.NOT,
)
)
self.assertRecords(
records,
(
u"__alyssa__",
u"__calendar-dev__",
u"__cdaboo__",
u"__developers__",
u"__dreid__",
u"__exarkun__",
u"__glyph__",
u"__joe__",
u"__sagen__",
u"__twisted__",
u"__wsanchez__",
),
)
@inlineCallbacks
def test_queryContainsCaseInsensitive(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"shortNames", u"Sanchez",
matchType=MatchType.contains,
flags=MatchFlags.caseInsensitive,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryContainsCaseInsensitiveNoIndex(self):
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"frEdo",
matchType=MatchType.contains,
flags=MatchFlags.caseInsensitive,
)
)
self.assertRecords(records, (u"__wsanchez__",))
@inlineCallbacks
def test_queryWithRecordTypes(self):
"""
Verify that results are limited to the requested recordTypes
"""
service = self.service()
records = yield service.recordsFromExpression(
service.query(
u"fullNames", u"e",
matchType=MatchType.contains,
),
recordTypes=(service.recordType.group,)
)
# Note: only contains groups; the users that would normally match
# have been filtered out
self.assertRecords(
records,
(u"__calendar-dev__", u"__developers__", u"__twisted__")
)
class DirectoryServiceMutableTestMixIn(object):
@inlineCallbacks
def test_updateRecord(self):
service = self.service()
record = (yield service.recordWithUID(u"__wsanchez__"))
fields = record.fields.copy()
fields[service.fieldName.fullNames] = [u"<NAME>"]
updatedRecord = DirectoryRecord(service, fields)
yield service.updateRecords((updatedRecord,))
# Verify change is present immediately
record = (yield service.recordWithUID(u"__wsanchez__"))
self.assertEquals(
set(record.fullNames),
set((u"<NAME>",))
)
# Verify change is persisted
service.flush()
record = (yield service.recordWithUID(u"__wsanchez__"))
self.assertEquals(
set(record.fullNames),
set((u"<NAME>",))
)
@inlineCallbacks
def test_addRecord(self):
service = self.service()
newRecord = DirectoryRecord(
service,
fields={
service.fieldName.uid: u"__plugh__",
service.fieldName.recordType: service.recordType.user,
service.fieldName.shortNames: (u"plugh",),
service.fieldName.password: u"",
}
)
yield service.updateRecords((newRecord,), create=True)
# Verify change is present immediately
record = (yield service.recordWithUID(u"__plugh__"))
self.assertEquals(set(record.shortNames), set((u"plugh",)))
self.assertEquals(record.password, u"")
# Verify change is persisted
service.flush()
record = (yield service.recordWithUID(u"__plugh__"))
self.assertEquals(set(record.shortNames), set((u"plugh",)))
def test_addRecordNoCreate(self):
service = self.service()
newRecord = DirectoryRecord(
service,
fields={
service.fieldName.uid: u"__plugh__",
service.fieldName.recordType: service.recordType.user,
service.fieldName.shortNames: (u"plugh",),
}
)
return self.assertFailure(
service.updateRecords((newRecord,)),
NoSuchRecordError
)
@inlineCallbacks
def test_removeRecord(self):
service = self.service()
yield service.removeRecords((u"__wsanchez__",))
# Verify change is present immediately
self.assertEquals((yield service.recordWithUID(u"__wsanchez__")), None)
# Verify change is persisted
service.flush()
self.assertEquals((yield service.recordWithUID(u"__wsanchez__")), None)
def test_removeRecordNoExist(self):
service = self.service()
return service.removeRecords((u"__plugh__",))
class DirectoryServiceTest(
unittest.TestCase,
BaseTest,
DirectoryServiceConvenienceTestMixIn,
DirectoryServiceRealmTestMixIn,
DirectoryServiceQueryTestMixIn,
DirectoryServiceMutableTestMixIn,
test_index.BaseDirectoryServiceTest,
):
serviceClass = DirectoryService
directoryRecordClass = DirectoryRecord
def test_repr(self):
service = self.service()
self.assertEquals(repr(service), u"<TestService (not loaded)>")
service.loadRecords()
self.assertEquals(repr(service), u"<TestService u'xyzzy'>")
class DirectoryServiceParsingTest(unittest.TestCase, BaseTest):
def test_reloadInterval(self):
service = self.service()
service.loadRecords(stat=False)
lastRefresh = service._lastRefresh
self.assertTrue(service._lastRefresh)
sleep(1)
service.loadRecords(stat=False)
self.assertEquals(lastRefresh, service._lastRefresh)
def test_reloadStat(self):
service = self.service()
service.loadRecords(loadNow=True)
lastRefresh = service._lastRefresh
self.assertTrue(service._lastRefresh)
sleep(1)
service.loadRecords(loadNow=True)
self.assertEquals(lastRefresh, service._lastRefresh)
def test_badXML(self):
service = self.service(xmlData="Hello")
self.assertRaises(ParseError, service.loadRecords)
def test_badRootElement(self):
service = self.service(xmlData=(dedent(
b"""
<?xml version="1.0" encoding="utf-8"?>
<frobnitz />
"""[1:]
)))
self.assertRaises(ParseError, service.loadRecords)
try:
service.loadRecords()
except ParseError as e:
self.assertTrue(str(e).startswith("Incorrect root element"), e)
else:
raise AssertionError("Expected ParseError")
def test_noRealmName(self):
service = self.service(xmlData=(dedent(
b"""
<?xml version="1.0" encoding="utf-8"?>
<directory />
"""[1:]
)))
self.assertRaises(ParseError, service.loadRecords)
try:
service.loadRecords()
except ParseError as e:
self.assertTrue(str(e).startswith("No realm name"), e)
else:
raise AssertionError("Expected ParseError")
def test_unknownFieldElementsClean(self):
service = self.service()
self.assertEquals(set(service.unknownFieldElements), set())
def test_unknownFieldElementsDirty(self):
service = self.service(xmlData=(dedent(
b"""
<?xml version="1.0" encoding="utf-8"?>
<directory realm="Unknown Record Types">
<record type="user">
<uid>__wsanchez__</uid>
<short-name>wsanchez</short-name>
<political-affiliation>Community and Freedom Party</political-affiliation>
</record>
</directory>
"""[1:]
)))
self.assertEquals(
set(service.unknownFieldElements),
set((u"political-affiliation",))
)
def test_unknownRecordTypesClean(self):
service = self.service()
self.assertEquals(set(service.unknownRecordTypes), set())
def test_unknownRecordTypesDirty(self):
service = self.service(xmlData=(dedent(
b"""
| |
or tuple
the width of the convolutional kernel. This can be either a two element tuple, giving
the kernel size along each dimension, or an integer to use the same size along both
dimensions.
stride: int or tuple
the stride between applications of the convolutional kernel. This can be either a two
element tuple, giving the stride along each dimension, or an integer to use the same
stride along both dimensions.
padding: str
the padding method to use, either 'SAME' or 'VALID'
activation_fn: object
the Tensorflow activation function to apply to the output
normalizer_fn: object
the Tensorflow normalizer function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.activation_fn = activation_fn
self.normalizer_fn = normalizer_fn
self.weights_initializer = weights_initializer
self.biases_initializer = biases_initializer
super(Conv2DTranspose, self).__init__(**kwargs)
if scope_name is None:
scope_name = self.name
self.scope_name = scope_name
try:
parent_shape = self.in_layers[0].shape
strides = stride
if isinstance(stride, int):
strides = (stride, stride)
self._shape = (parent_shape[0], parent_shape[1] * strides[0],
parent_shape[2] * strides[1], num_outputs)
except:
pass
def _build_layer(self, reuse):
if self.biases_initializer is None:
biases_initializer = None
else:
biases_initializer = self.biases_initializer()
return nn.ConvTranspose2d(
self.num_outputs,
self.kernel_size,
strides=self.stride,
padding=self.padding)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
if len(parent_tensor.get_shape()) == 3:
parent_tensor = torch.unsqueeze(parent_tensor, 3)
for reuse in (self._reuse, False):
try:
if torch.cuda.is_available():
if not self._built:
self._layer = self._build_layer(False)
self._non_pickle_fields.append('_layer')
layer = self._layer
else:
layer = self._build_layer(reuse)
out_tensor = layer(parent_tensor)
if self.normalizer_fn is not None:
out_tensor = self.normalizer_fn(out_tensor)
break
except ValueError:
if reuse:
# This probably means the variable hasn't been created yet, so try again
# with reuse set to false.
continue
raise
if set_tensors:
self._record_variable_scope(self.scope_name)
self.out_tensor = out_tensor
if torch.cuda.is_available() and not self._built:
self._built = True
self.variables = self._layer.variables
return out_tensor
class Conv3DTranspose(SharedVariableScope):
"""A transposed 3D convolution on the input.
This layer is typically used for upsampling in a deconvolutional network. It
expects its input to be a five dimensional tensor of shape (batch size, height, width, depth, # channels).
If there is only one channel, the fifth dimension may optionally be omitted.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=5,
stride=1,
padding='SAME',
activation_fn=nn.ReLU,
normalizer_fn=None,
biases_initializer=torch.zeros,
weights_initializer=nn.init.xavier_normal_,
scope_name=None,
**kwargs):
"""Create a Conv3DTranspose layer.
Parameters
----------
num_outputs: int
the number of outputs produced by the convolutional kernel
kernel_size: int or tuple
the width of the convolutional kernel. This can be either a three element tuple, giving
the kernel size along each dimension, or an integer to use the same size along both
dimensions.
stride: int or tuple
the stride between applications of the convolutional kernel. This can be either a three
element tuple, giving the stride along each dimension, or an integer to use the same
stride along both dimensions.
padding: str
the padding method to use, either 'SAME' or 'VALID'
activation_fn: object
the Tensorflow activation function to apply to the output
normalizer_fn: object
the Tensorflow normalizer function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.activation_fn = activation_fn
self.normalizer_fn = normalizer_fn
self.weights_initializer = weights_initializer
self.biases_initializer = biases_initializer
super(Conv3DTranspose, self).__init__(**kwargs)
if scope_name is None:
scope_name = self.name
self.scope_name = scope_name
try:
parent_shape = self.in_layers[0].shape
strides = stride
if isinstance(stride, int):
strides = (stride, stride, stride)
self._shape = (parent_shape[0], parent_shape[1] * strides[0],
parent_shape[2] * strides[1], parent_shape[3] * strides[2],
num_outputs)
except:
pass
def _build_layer(self, reuse):
if self.biases_initializer is None:
biases_initializer = None
else:
biases_initializer = self.biases_initializer()
return nn.ConvTranspose3d(
self.in_channels,
self.out_channels,
self.kernel_size,
strides=self.stride,
padding=self.padding)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
parent_tensor = inputs[0]
if len(parent_tensor.get_shape()) == 4:
parent_tensor = torch.unsqueeze(parent_tensor, 4)
for reuse in (self._reuse, False):
try:
if torch.cuda.is_available():
if not self._built:
self._layer = self._build_layer(False)
self._non_pickle_fields.append('_layer')
layer = self._layer
else:
layer = self._build_layer(reuse)
out_tensor = layer(parent_tensor)
if self.normalizer_fn is not None:
out_tensor = self.normalizer_fn(out_tensor)
break
except ValueError:
if reuse:
# This probably means the variable hasn't been created yet, so try again
# with reuse set to false.
continue
raise
if set_tensors:
self._record_variable_scope(self.scope_name)
self.out_tensor = out_tensor
if torch.cuda.is_available() and not self._built:
self._built = True
self.variables = self._layer.variables
return out_tensor
class MaxPool1D(Layer):
"""A 1D max pooling on the input.
This layer expects its input to be a three dimensional tensor of shape
(batch size, width, # channels).
"""
def __init__(self, kernel_size=2, strides=1, padding="SAME", **kwargs):
"""Create a MaxPool1D layer.
Parameters
----------
window_shape: int, optional
size of the window(assuming input with only one dimension)
strides: int, optional
stride of the sliding window
padding: str
the padding method to use, either 'SAME' or 'VALID'
"""
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.pooling_type = "MAX"
super(MaxPool1D, self).__init__(**kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = (parent_shape[0], parent_shape[1] // strides,
parent_shape[2])
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
in_tensor = inputs[0]
out_tensor = nn.AvgPool1d(
in_tensor,
kernnel_size=[self.kernel_size],
pooling_type=self.pooling_type,
padding=self.padding,
strides=[self.strides])
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class MaxPool2D(Layer):
def __init__(self,
kernel_size=[1, 2, 2, 1],
stride=[1, 2, 2, 1],
padding="SAME",
**kwargs):
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
super(MaxPool2D, self).__init__(**kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = tuple(
None if p is None else p // s for p, s in zip(parent_shape, strides))
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
in_tensor = inputs[0]
out_tensor = nn.MaxPool2d(
in_tensor, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class MaxPool3D(Layer):
"""A 3D max pooling on the input.
This layer expects its input to be a five dimensional tensor of shape
(batch size, height, width, depth, # channels).
"""
def __init__(self,
kernel_size=[1, 2, 2, 2, 1],
stride=[1, 2, 2, 2, 1],
padding='SAME',
**kwargs):
"""Create a MaxPool3D layer.
Parameters
----------
ksize: list
size of the window for each dimension of the input tensor. Must have
length of 5 and ksize[0] = ksize[4] = 1.
strides: list
stride of the sliding window for each dimension of input. Must have
length of 5 and strides[0] = strides[4] = 1.
padding: str
the padding method to use, either 'SAME' or 'VALID'
"""
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
super(MaxPool3D, self).__init__(**kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = tuple(
None if p is None else p // s for p, s in zip(parent_shape, strides))
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
in_tensor = inputs[0]
out_tensor = nn.MaxPool3d(
in_tensor, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
#class InputFifoQueue(Layer):
"""
This Queue Is used to allow asynchronous batching of inputs
During the fitting process
"""
# def __init__(self, shapes, names, capacity=5, **kwargs):
# self.shapes = shapes
# self.names = names
# self.capacity = capacity
# super(InputFifoQueue, self).__init__(**kwargs)
# def create_tensor(self, in_layers=None, **kwargs):
# TODO(rbharath): Note sure if this layer can be called with __call__
# meaningfully, so not going to support that functionality for now.
# if in_layers is None:
# in_layers = self.in_layers
# in_layers = convert_to_layers(in_layers)
# self.dtypes = [x.out_tensor.dtype for x in in_layers]
# self.queue = tf.FIFOQueue(self.capacity, self.dtypes, names=self.names)
# feed_dict = {x.name: x.out_tensor for x in in_layers}
# self.out_tensor = self.queue.enqueue(feed_dict)
# self.close_op = self.queue.close()
# self.out_tensors = self.queue.dequeue()
# self._non_pickle_fields += ['queue', 'out_tensors', 'close_op']"""
class GraphConv(Layer):
def __init__(self,
out_channel,
min_deg=0,
max_deg=10,
activation_fn=None,
**kwargs):
self.out_channel = out_channel
self.min_degree = min_deg
self.max_degree = max_deg
self.num_deg = 2 * max_deg + (1 - min_deg)
self.activation_fn = activation_fn
super(GraphConv, self).__init__(**kwargs)
try:
parent_shape = self.in_layers[0].shape
self._shape = (parent_shape[0], out_channel)
except:
pass
def _create_variables(self, in_channels):
# Generate the nb_affine weights and biases
W_list = [
initializations.glorot_uniform(
[in_channels, self.out_channel], name='kernel')
for k in range(self.num_deg)
]
b_list = [
model_ops.zeros(shape=[
self.out_channel,
], name='bias') for k in range(self.num_deg)
]
return (W_list, b_list)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
# in_layers = | |
dimension
if dim < 2 or dim > 3:
raise ValueError('Undefined transform for dimension: %d' % (dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned =\
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.637134,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 6.00062,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.134405,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.308256,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.766128,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.29084,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.469114,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.236793,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.996747,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.215177,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.57084,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.144738,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0121991,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.136895,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0902201,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.281633,
'Execution Unit/Register Files/Runtime Dynamic': 0.102419,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.322008,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.727191,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.53999,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000681577,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000681577,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000599852,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000235603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129602,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00325902,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00631342,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0867308,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.51682,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.182473,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.294577,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.00308,
'Instruction Fetch Unit/Runtime Dynamic': 0.573353,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0355488,
'L2/Runtime Dynamic': 0.00659167,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.57728,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.1268,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0757096,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0757095,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.93479,
'Load Store Unit/Runtime Dynamic': 1.57589,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.186687,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.373373,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0662558,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0667858,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.343016,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0299251,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.612941,
'Memory Management Unit/Runtime Dynamic': 0.0967108,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 21.7467,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.38074,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0177554,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.141189,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
params.add('detFrac', 0.65)
else:
params.add('detFrac', detFracInit)
if strutFracInit is None:
params.add('strutFrac', 0.07)
else:
params.add('strutFrac', strutFracInit)
if focalPlanePositionInit is None:
params.add('dxFocal', 0.0)
params.add('dyFocal', 0.0)
else:
params.add('dxFocal', focalPlanePositionInit[0])
params.add('dyFocal', focalPlanePositionInit[1])
if slitFracInit is None:
params.add('slitFrac', 0.05)
else:
params.add('slitFrac', slitFracInit)
if slitFrac_dy_Init is None:
params.add('slitFrac_dy', 0)
else:
params.add('slitFrac_dy', slitFrac_dy_Init)
# parameters dsecribing individual struts
if wide_0Init is None:
params.add('wide_0', 0)
else:
params.add('wide_0', wide_0Init)
if wide_23Init is None:
params.add('wide_23', 0)
else:
params.add('wide_23', wide_23Init)
if wide_43Init is None:
params.add('wide_43', 0)
else:
params.add('wide_43', wide_43Init)
# non-uniform illumination
if radiometricExponentInit is None:
params.add('radiometricExponent', 0.25)
else:
params.add('radiometricExponent', radiometricExponentInit)
if radiometricEffectInit is None:
params.add('radiometricEffect', 0)
else:
params.add('radiometricEffect', radiometricEffectInit)
if x_ilumInit is None:
params.add('x_ilum', 1)
else:
params.add('x_ilum', x_ilumInit)
if y_ilumInit is None:
params.add('y_ilum', 1)
else:
params.add('y_ilum', y_ilumInit)
# illumination due to fiber, parameters
if x_ilumInit is None:
params.add('x_fiber', 1)
else:
params.add('x_fiber', x_fiberInit)
if y_fiberInit is None:
params.add('y_fiber', 0)
else:
params.add('y_fiber', y_fiberInit)
if effective_ilum_radiusInit is None:
params.add('effective_ilum_radius', 0.9)
else:
params.add('effective_ilum_radius', effective_ilum_radiusInit)
if frd_sigmaInit is None:
params.add('frd_sigma', 0.02)
else:
params.add('frd_sigma', frd_sigmaInit)
if frd_lorentz_factorInit is None:
params.add('frd_lorentz_factor', 0.5)
else:
params.add('frd_lorentz_factor', frd_lorentz_factorInit)
if misalignInit is None:
params.add('misalign', 0)
else:
params.add('misalign', misalignInit)
# further pupil parameters
if det_vertInit is None:
params.add('det_vert', 1)
else:
params.add('det_vert', det_vertInit)
if slitHolder_frac_dxInit is None:
params.add('slitHolder_frac_dx', 0)
else:
params.add('slitHolder_frac_dx', slitHolder_frac_dxInit)
# convolving (postprocessing) parameters
if grating_linesInit is None:
params.add('grating_lines', 100000)
else:
params.add('grating_lines', grating_linesInit)
if scattering_slopeInit is None:
params.add('scattering_slope', 2)
else:
params.add('scattering_slope', scattering_slopeInit)
if scattering_amplitudeInit is None:
params.add('scattering_amplitude', 10**-2)
else:
params.add('scattering_amplitude', scattering_amplitudeInit)
if pixel_effectInit is None:
params.add('pixel_effect', 0.35)
else:
params.add('pixel_effect', pixel_effectInit)
if fiber_rInit is None:
params.add('fiber_r', 1.8)
else:
params.add('fiber_r', fiber_rInit)
if fluxInit is None:
params.add('flux', 1)
else:
params.add('flux', fluxInit)
self.params = params
self.optPsf = None
self.z_array = z_array
def constructModelImage_PFS_naturalResolution(
self,
params=None,
shape=None,
pixelScale=None,
use_optPSF=None,
extraZernike=None,
return_intermediate_images=False):
"""Construct model image given the set of parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing model; None to use self.params
shape : `(int, int)`
Shape for model image; None to use the shape of self.maskedImage
pixelScale : `float`
Pixel scale in arcseconds to use for model image;
None to use self.pixelScale.
use_optPSF : `bool`
If True, use previously generated optical PSF,
skip _getOptPsf_naturalResolution, and conduct only postprocessing
extraZernike : `np.array`, (N,)
Zernike parameteres beyond z22
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Return
----------
(if not return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : np.array, (2,)
Position where image is centered
(if return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
ilum : `np.array`, (N, N)
Illumination array
wf_grid_rot : `np.array`, (N, N)
Wavefront array
psf_position : np.array, (2,)
Position where image is centered
Notes
----------
Calls _getOptPsf_naturalResolution and optPsf_postprocessing
"""
if self.verbosity == 1:
print(' ')
print('Entering constructModelImage_PFS_naturalResolution')
if params is None:
params = self.params
if shape is None:
shape = self.image.shape
if pixelScale is None:
pixelScale = self.pixelScale
try:
parameter_values = params.valuesdict()
except AttributeError:
parameter_values = params
use_optPSF = self.use_optPSF
if extraZernike is None:
pass
else:
extraZernike = list(extraZernike)
self.extraZernike = extraZernike
# if you did not pass pure optical psf image, create one here
if use_optPSF is None:
# change outputs depending on if you want intermediate results
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
# if you claimed to supply optical psf image, but none is provided
# still create one
if self.optPsf is None:
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
self.optPsf = optPsf
else:
optPsf = self.optPsf
# at the moment, no difference in optPsf_postprocessing depending on return_intermediate_images
optPsf_final, psf_position = self._optPsf_postprocessing(
optPsf, return_intermediate_images=return_intermediate_images)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf', optPsf)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_final',
optPsf_final)
else:
pass
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, ilum, wf_grid_rot, psf_position
if self.verbosity == 1:
print('Finished with constructModelImage_PFS_naturalResolution')
print(' ')
def _optPsf_postprocessing(self, optPsf, return_intermediate_images=False):
"""Apply postprocessing to the pure optical psf image
Parameters
----------
optPsf : `np.array`, (N, N)
Optical image, only psf
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is potentially in order to help with debugging and inspect
the images created during the process
Returns
----------
(At the moment, the output is the same no matter what
return_intermediate_images is, but there is a possibility
to add intermediate outputs)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : `np.array`, (2,)
Position where the image is centered
Notes
----------
Takes optical psf and ``postprocesses`` it to generate final image.
The algorithm first reduces the oversampling and cuts the central part
of the image. This is done to speed up the calculations.
Then we apply various effects that are separate from
the pure optical PSF considerations.
We then finish with the centering algorithm to move our created image
to fit the input science image, invoking PSFPosition class.
The effects we apply are
1. scattered light
function apply_scattered_light
2. convolution with fiber
3. CCD difusion
4. grating effects
5. centering
"""
time_start_single = time.time()
if self.verbosity == 1:
print(' ')
print('Entering optPsf_postprocessing')
params = self.params
shape = self.image.shape
# all of the parameters for the creation of the image
param_values = params.valuesdict()
# how much is my generated image oversampled compared to final image
oversampling_original = (self.pixelScale_effective) / self.scale_ModelImage_PFS_naturalResolution
if self.verbosity == 1:
print('optPsf.shape: ' + str(optPsf.shape))
print('oversampling_original: ' + str(oversampling_original))
# print('type(optPsf) '+str(type(optPsf[0][0])))
# determine the size of the central cut, so that from the huge generated
# image we can cut out only the central portion (1.4 times larger
# than the size of actual final image)
size_of_central_cut = int(oversampling_original * self.image.shape[0] * 1.4)
if size_of_central_cut > optPsf.shape[0]:
# if larger than size of image, cut the image
# fail if not enough space to cut out the image
size_of_central_cut = optPsf.shape[0]
if self.verbosity == 1:
print('size_of_central_cut modified to ' + str(size_of_central_cut))
assert int(oversampling_original * self.image.shape[0] * 1.0) < optPsf.shape[0]
assert size_of_central_cut <= optPsf.shape[0]
if self.verbosity == 1:
print('size_of_central_cut: ' + str(size_of_central_cut))
# cut part which you need to form the final image
# set oversampling to 1 so you are not resizing the image, and dx=0 and
# dy=0 so that you are not moving around, i.e., you are just cutting the
# central region
optPsf_cut = PsfPosition.cut_Centroid_of_natural_resolution_image(
image=optPsf, size_natural_resolution=size_of_central_cut + 1, oversampling=1, dx=0, dy=0)
if self.verbosity == 1:
print('optPsf_cut.shape' + str(optPsf_cut.shape))
# we want to reduce oversampling to be roughly around 10 to make things computationaly easier
# if oversamplign_original is smaller than 20 (in case of dithered images),
# make resolution coarser by factor of 2
# otherwise set it to 11
if oversampling_original < 20:
oversampling = np.round(oversampling_original / 2)
else:
oversampling = 11
if self.verbosity == 1:
print('oversampling:' + str(oversampling))
# what will be the size of the image after you resize it to the from
# ``oversampling_original'' to ``oversampling'' ratio
size_of_optPsf_cut_downsampled = np.int(
np.round(size_of_central_cut / (oversampling_original / oversampling)))
if self.verbosity == 1:
print('size_of_optPsf_cut_downsampled: ' + str(size_of_optPsf_cut_downsampled))
# make sure that optPsf_cut_downsampled is an array which has an odd size
# - increase size by 1 if needed
if (size_of_optPsf_cut_downsampled % 2) == 0:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled + 1, ny=size_of_optPsf_cut_downsampled + 1,
scale=(oversampling_original / oversampling), method='no_pixel').array
else:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled, ny=size_of_optPsf_cut_downsampled,
scale=(oversampling_original / oversampling), method='no_pixel').array
if self.verbosity == 1:
print('optPsf_cut_downsampled.shape: ' + str(optPsf_cut_downsampled.shape))
if self.verbosity == 1:
print('Postprocessing parameters are:')
print(str(['grating_lines', 'scattering_slope', 'scattering_amplitude',
'pixel_effect', 'fiber_r']))
print(str([param_values['grating_lines'], param_values['scattering_slope'],
param_values['scattering_amplitude'], param_values['pixel_effect'],
param_values['fiber_r']]))
##########################################
# 1. scattered light
optPsf_cut_downsampled_scattered = self.apply_scattered_light(optPsf_cut_downsampled,
oversampling,
param_values['scattering_slope'],
param_values['scattering_amplitude'],
dithering=self.dithering)
##########################################
# 2. convolution with fiber
optPsf_cut_fiber_convolved = self.convolve_with_fiber(optPsf_cut_downsampled_scattered,
oversampling,
param_values['fiber_r'],
dithering=self.dithering)
##########################################
# 3. CCD difusion
optPsf_cut_pixel_response_convolved = self.convolve_with_CCD_diffusion(optPsf_cut_fiber_convolved,
oversampling,
param_values['pixel_effect'],
dithering=self.dithering)
##########################################
# 4. grating | |
<reponame>no7hings/Lynxi<filename>workspace/module/python-2.7/LxBasic/bscMtdCore.py
# coding:utf-8
from . import bscCfg
class Mtd_BscBasic(bscCfg.BscUtility):
pass
class Mtd_BscPython(Mtd_BscBasic):
@classmethod
def _bsc_mtd__set_python_module_load_(cls, moduleName):
loader = cls.MOD_pkgutil.find_loader(moduleName)
if loader:
return cls.MOD_importlib.import_module(moduleName)
@classmethod
def _bsc_mtd__set_python_module_reload_(cls, moduleName):
loader = cls.MOD_pkgutil.find_loader(moduleName)
if loader:
return cls.MOD_importlib.import_module(moduleName)
class Mtd_BscUtility(Mtd_BscBasic):
@classmethod
def _getSystemUsername(cls):
return cls.MOD_getpass.getuser()
@classmethod
def _getSystemHostname(cls):
return cls.MOD_socket.getfqdn(cls.MOD_socket.gethostname())
@classmethod
def _getSystemHost(cls):
return cls.MOD_socket.gethostbyname(cls.MOD_socket.gethostname())
@classmethod
def _bsc_mtd__os_path__set_directory_create_(cls, directoryString):
if cls.MTD_os_path.exists(directoryString) is False:
cls.MOD_os.makedirs(directoryString)
return True
return False
@classmethod
def _bsc_mtd__os_path__set_file_directory_create_(cls, fileString):
directoryString = cls.MTD_os_path.dirname(fileString)
cls._bsc_mtd__os_path__set_directory_create_(directoryString)
@classmethod
def _getSystemActiveTimestamp(cls):
return cls.MOD_time.time()
@classmethod
def _timestampToDatetag(cls, timestamp):
return cls.MOD_time.strftime('%Y_%m%d', cls.MOD_time.localtime(timestamp))
@classmethod
def _getActiveDatetag(cls):
return cls._timestampToDatetag(cls._getSystemActiveTimestamp())
@classmethod
def _timestamp2timetag(cls, timestamp):
return cls.MOD_time.strftime(
cls.DEF_time_tag_format,
cls.MOD_time.localtime(timestamp)
)
@classmethod
def _getActiveTimetag(cls):
return cls._timestamp2timetag(cls._getSystemActiveTimestamp())
@classmethod
def _timestampToPrettify(cls, timestamp):
return cls.MOD_time.strftime(
cls.DEF_time_prettify_format,
cls.MOD_time.localtime(timestamp)
)
@classmethod
def _getActivePrettifyTime(cls):
return cls._timestampToPrettify(cls._getSystemActiveTimestamp())
@classmethod
def _string2list(cls, string, includes=None):
lis = []
if isinstance(string, (str, unicode)):
if includes:
if string in includes:
lis = [string]
else:
lis = [string]
elif isinstance(string, (tuple, list)):
for i in string:
if includes:
if i in includes:
lis.append(i)
else:
lis.append(i)
return lis
@classmethod
def _isDevelop(cls):
return [False, True][cls._getOsEnvironRawWithKey(cls.DEF_util__environ_key__enable_develop, 'FALSE').lower() == 'true']
@classmethod
def _isTraceEnable(cls):
return [False, True][cls._getOsEnvironRawWithKey(cls.DEF_util__environ_key__enable_trace, 'FALSE').lower() == 'true']
@classmethod
def _getOsEnvironRawWithKey(cls, key, failobj=None):
return cls.MOD_os.environ.get(key, failobj)
@classmethod
def _getOsEnvironRawAsPath(cls, key, failobj=None):
if key in cls.MOD_os.environ:
return cls._getOsEnvironRawWithKey(key).replace('\\', cls.DEF_bsc__pathsep)
elif failobj is not None:
return failobj
return ''
@classmethod
def _getOsEnvironRawAsList(cls, key, failobj=None):
if key in cls.MOD_os.environ:
return cls._getOsEnvironRawWithKey(key).split(cls.MOD_os.pathsep)
elif failobj is not None:
return failobj
return []
@classmethod
def _getOsEnvironRawAsPathList(cls, key, failobj=None):
if key in cls.MOD_os.environ:
return [
i.replace('\\', cls.DEF_bsc__pathsep)
for i in
cls._getOsEnvironRawWithKey(key).split(cls.MOD_os.pathsep)
]
elif failobj is not None:
return failobj
return []
@classmethod
def _osPathToPythonStyle(cls, pathStr):
return pathStr.replace('\\', cls.DEF_bsc__pathsep)
@classmethod
def _isOsDirectory(cls, pathStr):
return cls.MTD_os_path.isdir(pathStr)
@classmethod
def _isOsFile(cls, pathStr):
if pathStr is not None:
return cls.MTD_os_path.isfile(pathStr)
return False
@classmethod
def _isOsSameFile(cls, sourceFileString, targetFileString):
return cls.MTD_os_path.normpath(sourceFileString) == cls.MTD_os_path.normpath(targetFileString)
@classmethod
def _getOsFileBase(cls, fileString):
return cls.MTD_os_path.splitext(fileString)[0]
@classmethod
def _getOsFileName(cls, fileString):
return cls.MTD_os_path.splitext(cls.MTD_os_path.basename(fileString))[0]
@classmethod
def _getOsFileDirname(cls, fileString):
return cls.MTD_os_path.dirname(fileString)
@classmethod
def _getOsFileBasename(cls, fileString):
return cls.MTD_os_path.basename(fileString)
@classmethod
def _getOsFileExt(cls, fileString):
return cls.MTD_os_path.splitext(fileString)[1]
@classmethod
def _toOsFileStringReplaceFileName(cls, fileString, newFileBasenameString):
osPath = cls._getOsFileDirname(fileString)
osExt = cls._getOsFileExt(fileString)
newFileString = u'{0}/{1}{2}'.format(osPath, newFileBasenameString, osExt)
return newFileString
@classmethod
def _isOsPathExist(cls, pathStr):
return cls.MTD_os_path.exists(pathStr)
@classmethod
def _isOsDirectoryExist(cls, directoryString):
return cls.MTD_os_path.isdir(directoryString)
@classmethod
def _isOsFileExist(cls, fileString):
return cls.MTD_os_path.isfile(fileString)
@classmethod
def _setOsPathOpen(cls, pathStr):
if cls._isOsPathExist(pathStr) is True:
cls.MOD_os.startfile(pathStr.replace(cls.DEF_bsc__pathsep, cls.MOD_os.sep))
@classmethod
def _setOsFileOpen(cls, pathStr):
if cls._isOsFileExist(pathStr) is True:
cls.MOD_os.startfile(pathStr.replace(cls.DEF_bsc__pathsep, cls.MOD_os.sep))
@classmethod
def _setOsDirectoryOpen(cls, pathStr):
if cls._isOsPathExist(pathStr) is True:
cls.MOD_os.startfile(pathStr.replace(cls.DEF_bsc__pathsep, cls.MOD_os.sep))
@classmethod
def _getOsFileMtimestamp(cls, fileString):
if cls._isOsFileExist(fileString):
return cls.MOD_os.stat(fileString).st_mtime
@classmethod
def _getOsFileSize(cls, fileString):
if cls._isOsFileExist(fileString):
return cls.MTD_os_path.getsize(fileString)
return 0
@classmethod
def _isAbsOsPath(cls, pathStr):
return cls.MTD_os_path.isabs(pathStr)
@classmethod
def _isOsFileTimeChanged(cls, sourceFileString, targetFileString):
if cls._isOsFileExist(sourceFileString) and cls._isOsFileExist(targetFileString):
if str(cls._getOsFileMtimestamp(sourceFileString)) != str(cls._getOsFileMtimestamp(targetFileString)):
return True
return False
return False
@classmethod
def _stringToHash(cls, text):
md5Obj = cls.MOD_hashlib.md5()
md5Obj.update(text)
return str(md5Obj.hexdigest()).upper()
@classmethod
def _getOsFileHash(cls, fileString):
if cls._isOsFileExist(fileString):
with open(fileString, u'rb') as f:
raw = f.read()
f.close()
if raw:
return cls._stringToHash(raw)
return u'D41D8CD98F00B204E9800998ECF8427E'
@classmethod
def _getOsFileHash_(cls, fileString):
if cls._isOsFileExist(fileString):
with open(fileString, u'rb') as f:
md5Obj = cls.MOD_hashlib.md5()
while True:
d = f.read(8096)
if not d:
break
md5Obj.update(d)
f.close()
return str(md5Obj.hexdigest()).upper()
return u'D41D8CD98F00B204E9800998ECF8427E'
@classmethod
def _isOsFileHashChanged(cls, sourceFileString, targetFileString):
if cls._isOsFileExist(sourceFileString) and cls._isOsFileExist(targetFileString):
if cls._getOsFileHash(sourceFileString) != cls._getOsFileHash(targetFileString):
return True
return False
return False
@classmethod
def _setOsFileRename(cls, fileString, newFileBasenameString):
if cls._isOsFileExist(fileString):
newFileString = cls._toOsFileStringReplaceFileName(fileString, newFileBasenameString)
if cls._isOsSameFile(fileString, newFileString) is False:
cls.MOD_os.rename(fileString, newFileString)
@classmethod
def _setOsFileRename_(cls, fileString, newFileString):
if cls._isOsSameFile(fileString, newFileString) is False:
cls.MOD_os.rename(fileString, newFileString)
@classmethod
def _setOsFileCopy(cls, sourceFileString, targetFileString, force=True):
cls._bsc_mtd__os_path__set_file_directory_create_(targetFileString)
# Check Same File
if not cls._isOsSameFile(sourceFileString, targetFileString):
if force is True:
cls.MOD_shutil.copy2(sourceFileString, targetFileString)
elif force is False:
try:
cls.MOD_shutil.copy2(sourceFileString, targetFileString)
except IOError:
print sourceFileString, targetFileString
@classmethod
def _setOsPathRemove(cls, pathStr):
if cls.MTD_os_path.isfile(pathStr):
cls.MOD_os.remove(pathStr)
elif cls.MTD_os_path.isdir(pathStr):
cls.MOD_os.removedirs(pathStr)
@classmethod
def _setOsFileMove_(cls, fileString, targetPathString):
basename = cls._getOsFileBasename(fileString)
targetFileString = cls._toOsFilename(targetPathString, basename)
cls._setOsFileMove(fileString, targetFileString)
@classmethod
def _setOsFileMove(cls, fileString, targetFileString):
if cls.MTD_os_path.isfile(fileString):
cls._bsc_mtd__os_path__set_file_directory_create_(targetFileString)
cls.MOD_shutil.move(fileString, targetFileString)
@classmethod
def setOsDirectoryHide(cls, directoryString):
if cls._isOsDirectoryExist(directoryString):
if u'Windows' in cls.MOD_platform.system():
command = u'attrib +h "' + directoryString + u'"'
command = command.encode(cls.MOD_locale.getdefaultlocale()[1])
cls.MOD_os.popen(command).close()
@classmethod
def _osPathString2RelativeName(cls, rootString, fullpathName):
return fullpathName[len(rootString) + 1:]
@classmethod
def _toOsFilename(cls, directoryString, basenameString):
return cls.MTD_os_path.join(directoryString, basenameString).replace('\\', cls.DEF_bsc__pathsep)
@classmethod
def _getPathnameListByOsDirectory(cls, rootString, extString, isFile, isFullpath, isAll):
def extFilterFnc_(fullpathName_):
if filterExtStringLis is not None:
for i in filterExtStringLis:
if fullpathName_.endswith(i):
return True
return False
return True
def addFnc_(fullpathName_):
if extFilterFnc_(fullpathName_) is True:
if isFullpath is True:
lis.append(fullpathName_)
else:
relativeName = cls._osPathString2RelativeName(rootString, fullpathName_)
lis.append(relativeName)
def recursionFnc_(directoryString_):
children = cls.MOD_os.listdir(directoryString_)
if children:
for i in children:
fullpathName = cls._toOsFilename(directoryString_, i)
if cls.MTD_os_path.isfile(fullpathName):
addFnc_(fullpathName)
else:
if isFile is False:
addFnc_(fullpathName)
if isAll is True:
recursionFnc_(fullpathName)
lis = []
if extString is not None:
filterExtStringLis = cls._string2list(extString)
else:
filterExtStringLis = None
if cls.MTD_os_path.exists(rootString):
recursionFnc_(rootString)
return lis
@classmethod
def _getOsFileTemporaryName(cls, fileString, timetag=None):
if timetag is None:
timetag = cls._getActiveTimetag()
temporaryDirectory = u'{}/{}'.format(cls.DEF_path_temporary_local, timetag)
temporaryFileString = cls._toOsFilename(temporaryDirectory, cls._getOsFileBasename(fileString))
cls._bsc_mtd__os_path__set_directory_create_(temporaryDirectory)
return temporaryFileString
@classmethod
def _toOsFileJoinTimetag(cls, fileString, timetag=None, useMode=0):
if timetag is None:
timetag = cls._getActiveTimetag()
if useMode == 0:
return (u'.{}'.format(timetag)).join(cls.MTD_os_path.splitext(fileString))
elif useMode == 1:
return u'{}/{}/{}'.format(cls._getOsFileDirname(fileString), timetag, cls._getOsFileBasename(fileString))
return fileString
@classmethod
def _setOsFileBackup(cls, fileString, backupFileString, timetag=None, useMode=0):
backupFileString_ = cls._toOsFileJoinTimetag(backupFileString, timetag, useMode)
cls._setOsFileCopy(fileString, backupFileString_)
@classmethod
def _getOsFileMtimetag(cls, fileString):
return cls._timestamp2timetag(cls._getOsFileMtimestamp(fileString))
@classmethod
def _toOsFileInfoJsonFileString(cls, fileString):
base = cls._getOsFileBase(fileString)
return base + u'.info.json'
@classmethod
def _infoDict(cls, fileString):
return {
cls.DEF_key_source: fileString,
cls.DEF_key_timestamp: cls._getSystemActiveTimestamp(),
cls.DEF_key_username: cls._getSystemUsername(),
cls.DEF_key_hostname: cls._getSystemHostname(),
cls.DEF_key_host: cls._getSystemHost()
}
@classmethod
def _toOsFileResultFileString(cls, fileString):
base = cls._getOsFileBase(fileString)
return base + u'.result.log'
@classmethod
def _getDevelopRoot(cls):
return cls._getOsEnvironRawAsPath(
cls.DEF_util__environ_key__path_develop,
cls.DEF_util__root__default_develop
)
@classmethod
def _getProductRoot(cls):
return cls._getOsEnvironRawAsPath(cls.DEF_util__environ_key__path_product, cls.DEF_util__root__default_product)
@classmethod
def _getPresetRoot(cls):
return cls._getOsEnvironRawAsPath(cls.DEF_util__environ_key__path_preset, cls._getProductRoot())
@classmethod
def _getToolkitRoot(cls):
return cls._getOsEnvironRawAsPath(cls.DEF_util__environ_key__path_toolkit, cls._getProductRoot())
@classmethod
def _getServerPath(cls):
if cls._isDevelop():
return cls._getDevelopRoot()
return cls._getProductRoot()
@classmethod
def _toPathString(cls, separator, *args):
if isinstance(args[0], (list, tuple)):
pathStringLis = args[0]
else:
pathStringLis = list(args)
string = ''
index = 0
for i in pathStringLis:
if i not in ['', None]:
if index is 0:
string = i
else:
string += u'{}{}'.format(separator, i)
index += 1
return string
@classmethod
def _toOsPathString(cls, *args):
return cls._toPathString(cls.DEF_bsc__pathsep, *args).replace('\\', cls.DEF_bsc__pathsep)
@classmethod
def _bsc_mtd__set_python_module_load_(cls, moduleName):
loader = cls.MOD_pkgutil.find_loader(moduleName)
if loader:
return cls.MOD_importlib.import_module(moduleName)
@classmethod
def _bsc_mtd__set_python_module_reload_(cls, moduleName):
loader = cls.MOD_pkgutil.find_loader(moduleName)
if loader:
return cls.MOD_importlib.import_module(moduleName)
@classmethod
def _toHtmlLogFileString(cls, fileString):
base = cls._getOsFileBase(fileString)
return u'{}.log.html'.format(base)
@classmethod
def _getQtProgressBar(cls, title, maxValue):
module = cls._bsc_mtd__set_python_module_load_(u'LxGui.qt.qtCommands')
if module is not None:
return module.setProgressWindowShow(title, maxValue)
@classmethod
def _setQtProgressBarUpdate(cls, progressBar, text=None):
if progressBar is not None:
progressBar.updateProgress(text)
@classmethod
def _timetagToChnPrettify(cls, timetag, useMode=0):
if timetag:
if cls._getOsFileTimetag(timetag) is not None:
year = int(timetag[:4])
month = int(timetag[5:7])
date = int(timetag[7:9])
hour = int(timetag[10:12])
minute = int(timetag[12:14])
second = int(timetag[15:16])
if year > 0:
timetuple = cls.MOD_datetime.datetime(year=year, month=month, day=date, hour=hour, minute=minute, second=second).timetuple()
return cls._timetupleToChnPrettify(timetuple, useMode)
return u'{0}{0}年{0}月{0}日{0}点分'.format('??')
return u'无记录'
@classmethod
def _getOsFileTimetag(cls, backupFileString):
lis = cls.MOD_re.findall(cls.DEF_time_tag_search_string, backupFileString)
if lis:
return lis[0]
@classmethod
def _getOsFileBackupNameDict(cls, fileString):
dic = {}
if fileString:
directoryName = cls._getOsFileDirname(fileString)
if cls._isOsDirectoryExist(directoryName):
backupName = cls._toOsFileJoinTimetag(fileString, cls.DEF_time_tag_search_string)
stringLis = cls.MOD_glob.glob(backupName)
if stringLis:
for i in stringLis:
dic[cls._getOsFileTimetag(i)] = i.replace('\\', cls.DEF_bsc__pathsep)
return dic
@classmethod
def _timestampToChnPrettify(cls, timestamp, useMode=0):
if isinstance(timestamp, float):
return cls._timetupleToChnPrettify(cls.MOD_time.localtime(timestamp), useMode)
else:
return u'无记录'
@classmethod
def _timetupleToChnPrettify(cls, timetuple, useMode=0):
year, month, date, hour, minute, second, week, dayCount, isDst = timetuple
if useMode == 0:
timetuple_ = cls.MOD_time.localtime(cls.MOD_time.time())
year_, month_, date_, hour_, minute_, second_, week_, dayCount_, isDst_ = timetuple_
#
monday = date - week
monday_ = date_ - week_
if timetuple_[:1] == timetuple[:1]:
dateString = u'{}月{}日'.format(str(month).zfill(2), str(date).zfill(2))
weekString = u''
subString = u''
if timetuple_[:2] == timetuple[:2]:
if monday_ == monday:
dateString = ''
weekString = u'{0}'.format(cls.DEF_time_week[int(week)][0])
if date_ == date:
subString = u'(今天)'
elif date_ == date + 1:
subString = u'(昨天)'
#
timeString = u'{}点{}分'.format(str(hour).zfill(2), str(minute).zfill(2), str(second).zfill(2))
#
string = u'{}{}{} {}'.format(dateString, weekString, subString, timeString)
return string
else:
return u'{}年{}月{}日'.format(str(year).zfill(4), str(month).zfill(2), str(date).zfill(2))
else:
dateString = u'{}年{}月{}日'.format(str(year).zfill(4), str(month).zfill(2), str(date).zfill(2))
timeString = u'{}点{}分{}秒'.format(str(hour).zfill(2), str(minute).zfill(2), str(second).zfill(2))
return u'{} {}'.format(dateString, timeString)
# Log
@classmethod
def _logDirectory(cls):
return u'{}/.log'.format(cls._getServerPath())
@classmethod
def _exceptionLogFile(cls):
return u'{}/{}.exception.log'.format(
cls._logDirectory(), cls._getActiveDatetag()
)
@classmethod
def _errorLogFile(cls):
return u'{}/{}.error.log'.format(
cls._logDirectory(), cls._getActiveDatetag()
)
@classmethod
def _resultLogFile(cls):
return u'{}/{}.result.log'.format(
cls._logDirectory(), cls._getActiveDatetag()
)
@classmethod
def _databaseLogFile(cls):
return u'{}/{}.database.log'.format(
cls._logDirectory(), cls._getActiveDatetag()
)
@classmethod
def _basicUniqueId(cls):
return '4908BDB4-911F-3DCE-904E-96E4792E75F1'
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generator to yield resampled volume data for training and validation
"""
# %%
from keras.models import load_model, Model
from matplotlib import pyplot as plt
import numpy as np
import os
from os import path
import random
import SimpleITK as sitk
from stl import mesh
from utils import data_loading_funcs as dlf
from utils import mhd_utils as mu
from utils import reg_evaluator as regev
from utils import volume_resampler_3d as vr
import tensorflow as tf
from utils import registration_reader as rr
import scipy
#from augment_data import augment
# %%
class VolumeDataGenerator(object):
"""Generate volume image for training or validation
#Arguments
"""
def __init__(self,
data_folder,
case_num_range,
case_num_range_2=None,
max_registration_error = 20.0):
self.data_folder = data_folder
cases = []
# Go through all the case
for caseIdx in range(case_num_range[0], case_num_range[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
if case_num_range_2 != None:
for caseIdx in range(case_num_range_2[0], case_num_range_2[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
self.good_cases = np.asarray(cases, dtype=np.int32)
self.num_cases = self.good_cases.size
random.seed()
self.e_t = 0.5
self.e_rot = 1
self.isMultiGauss = False
self.max_error = max_registration_error
print('VolumeDataGenerator: max_registration_error = {}'.format(self.max_error))
#self.width, self.height, self.depth = 96, 96, 32
# ----- #
def get_sample_multi_gauss(self,mean,cov):
return np.random.multivariate_normal(mean,cov)
def get_num_cases(self):
return self.num_cases
# ----- #
def _get_random_value(self, r, center, hasSign):
randNumber = random.random() * r + center
if hasSign:
sign = random.random() > 0.5
if sign == False:
randNumber *= -1
return randNumber
# ----- #
def get_array_from_itk_matrix(self, itk_mat):
mat = np.reshape(np.asarray(itk_mat), (3,3))
return mat
# ----- #
def generate(self, shuffle=True, shape=(96,96,96)):
"""
"""
currentIdx = 0
np.random.seed()
(width, height, depth) = shape
print('Shuffle = {}'.format(shuffle))
while True:
idx = currentIdx % self.num_cases
currentIdx += 1
# Shuffle cases
if idx == 0:
if shuffle:
case_array = np.random.permutation(self.good_cases)
else:
case_array = self.good_cases
case_no = case_array[idx]
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
#sampledFixed, sampledMoving, pos_neg, err, params = self.create_sample(450, shape)
print('Sample generated frome Case{:04d}'.format(case_no))
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
yield sample4D, err, params
# ----- #
def generate_batch(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_errors = []
batch_params = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_errors.append([err])
batch_params.append(params)
#yield (batch_samples, [np.asarray(batch_errors), np.asarray(batch_params)])
yield (batch_samples, np.asarray(batch_params))
#yield (batch_samples, np.asarray(batch_errors))
def generate_batch_classification(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 4), dtype=np.ubyte)
#batch_labels = []
batch_labels = []
batch_errs = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed_i, sampledFixed_f, sampledMoving_i, sampledMoving_f, label, err1, err2 = self.create_sample_classification(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 4), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed_i)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving_i)
sample4D[:,:,:,2] = sitk.GetArrayFromImage(sampledFixed_f)
sample4D[:,:,:,3] = sitk.GetArrayFromImage(sampledMoving_f)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_labels.append(label)
batch_errs.append([err1, err2])
yield (batch_samples, [np.asarray(batch_labels), np.asarray(batch_errs)])
def generate_batch_NIH(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_errors = []
batch_params = []
batch_segs = []
batch_trans = []
batch_case_nums = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params, segMesh, trans = self.create_sample_NIH(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_errors.append([err])
batch_params.append(params)
batch_segs.append(segMesh)
batch_trans.append(trans)
batch_case_nums.append(case_no)
yield (batch_samples, batch_params)
def generate_batch_NIH_transform_prediction(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_transforms = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_transforms.append(params)
#batch_errors.append([err])
yield (batch_samples, batch_transforms)
def generate_batch_NIH_transform_prediction_2D_multiview(self, batch_size=32, shape=(224,222,220)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
slice_num = 3
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
ax_batch_samples = np.zeros((current_batch_size, height, width, 2, slice_num), dtype=np.ubyte)
sag_batch_samples = np.zeros((current_batch_size, depth, height, 2, slice_num), dtype=np.ubyte)
cor_batch_samples = np.zeros((current_batch_size, depth, width, 2, slice_num), dtype=np.ubyte)
#batch_labels = []
batch_transforms = []
ax_transforms = []
sag_transforms = []
cor_transforms = []
batch_errors = []
batch_segs = []
batch_affines = []
batch_tX = []
batch_tY = []
batch_tZ = []
batch_rotX = []
batch_rotY = []
batch_rotZ = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
ax_sample = np.zeros((height, width, 2, slice_num), dtype=np.ubyte)
sag_sample = np.zeros((depth, height, 2, slice_num), dtype=np.ubyte)
cor_sample = np.zeros((depth, width, 2, slice_num), dtype=np.ubyte)
MR = sitk.GetArrayFromImage(sampledFixed)
TRUS = sitk.GetArrayFromImage(sampledMoving)
ax_sample[:,:,0,:] = np.reshape(MR[int(depth/2)-int((slice_num-1)/2):int(depth/2)+int((slice_num)/2)+1,:,:], (height, width, slice_num))
ax_sample[:,:,1,:] = np.reshape(TRUS[int(depth/2)-int((slice_num-1)/2):int(depth/2)+int((slice_num)/2)+1,:,:], (height, width, slice_num))
sag_sample[:,:,0,:] = np.reshape(MR[:,:,int(width/2)-int((slice_num-1)/2):int(width/2)+int((slice_num)/2)+1], (depth, height, slice_num))
sag_sample[:,:,1,:] = np.reshape(TRUS[:,:,int(width/2)-int((slice_num-1)/2):int(width/2)+int((slice_num)/2)+1], (depth, height, slice_num))
cor_sample[:,:,0,:] = np.reshape(MR[:,int(height/2)-int((slice_num-1)/2):int(height/2)+int((slice_num)/2)+1,:], (depth, width, slice_num))
cor_sample[:,:,1,:] = np.reshape(TRUS[:,int(height/2)-int((slice_num-1)/2):int(height/2)+int((slice_num)/2)+1,:], (depth, | |
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, | |
so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in | |
% macro
if not macro in self.sections:
message.warning('missing special word macro: [%s]' % macro)
# Check all text quotes have a corresponding tag.
for q in self.quotes.keys()[:]:
tag = self.quotes[q]
if not tag:
del self.quotes[q] # Undefine quote.
else:
if tag[0] == '#':
tag = tag[1:]
if not tag in self.tags:
message.warning('[quotes] %s missing tag definition: %s' % (q,tag))
# Check all specialsections section names exist.
for k,v in self.specialsections.items():
if not v:
del self.specialsections[k]
elif not v in self.sections:
message.warning('missing specialsections section: [%s]' % v)
paragraphs.validate()
lists.validate()
blocks.validate()
tables_OLD.validate()
tables.validate()
macros.validate()
message.linenos = None
def entries_section(self,section_name):
"""
Return True if conf file section contains entries, not a markup
template.
"""
for name in self.ENTRIES_SECTIONS:
if re.match(name,section_name):
return True
return False
def dump(self):
"""Dump configuration to stdout."""
# Header.
hdr = ''
hdr = hdr + '#' + writer.newline
hdr = hdr + '# Generated by AsciiDoc %s for %s %s.%s' % \
(VERSION,document.backend,document.doctype,writer.newline)
t = time.asctime(time.localtime(time.time()))
hdr = hdr + '# %s%s' % (t,writer.newline)
hdr = hdr + '#' + writer.newline
sys.stdout.write(hdr)
# Dump special sections.
# Dump only the configuration file and command-line attributes.
# [miscellanous] entries are dumped as part of the [attributes].
d = {}
d.update(self.conf_attrs)
d.update(self.cmd_attrs)
dump_section('attributes',d)
Title.dump()
dump_section('quotes',self.quotes)
dump_section('specialcharacters',self.specialchars)
d = {}
for k,v in self.specialwords.items():
if v in d:
d[v] = '%s "%s"' % (d[v],k) # Append word list.
else:
d[v] = '"%s"' % k
dump_section('specialwords',d)
dump_section('replacements',self.replacements)
dump_section('replacements2',self.replacements2)
dump_section('replacements3',self.replacements3)
dump_section('specialsections',self.specialsections)
d = {}
for k,v in self.tags.items():
d[k] = '%s|%s' % v
dump_section('tags',d)
paragraphs.dump()
lists.dump()
blocks.dump()
tables_OLD.dump()
tables.dump()
macros.dump()
# Dump remaining sections.
for k in self.sections.keys():
if not self.entries_section(k):
sys.stdout.write('[%s]%s' % (k,writer.newline))
for line in self.sections[k]:
sys.stdout.write('%s%s' % (line,writer.newline))
sys.stdout.write(writer.newline)
def subs_section(self,section,d):
"""Section attribute substitution using attributes from
document.attributes and 'd'. Lines containing undefinded
attributes are deleted."""
if section in self.sections:
return subs_attrs(self.sections[section],d)
else:
message.warning('missing section: [%s]' % section)
return ()
def parse_tags(self):
"""Parse [tags] section entries into self.tags dictionary."""
d = {}
parse_entries(self.sections.get('tags',()),d)
for k,v in d.items():
if v is None:
if k in self.tags:
del self.tags[k]
elif v == '':
self.tags[k] = (None,None)
else:
mo = re.match(r'(?P<stag>.*)\|(?P<etag>.*)',v)
if mo:
self.tags[k] = (mo.group('stag'), mo.group('etag'))
else:
raise EAsciiDoc,'[tag] %s value malformed' % k
def tag(self, name, d=None):
"""Returns (starttag,endtag) tuple named name from configuration file
[tags] section. Raise error if not found. If a dictionary 'd' is
passed then merge with document attributes and perform attribute
substitution on tags."""
if not name in self.tags:
raise EAsciiDoc, 'missing tag: %s' % name
stag,etag = self.tags[name]
if d is not None:
# TODO: Should we warn if substitution drops a tag?
if stag:
stag = subs_attrs(stag,d)
if etag:
etag = subs_attrs(etag,d)
if stag is None: stag = ''
if etag is None: etag = ''
return (stag,etag)
def parse_specialsections(self):
"""Parse specialsections section to self.specialsections dictionary."""
# TODO: This is virtually the same as parse_replacements() and should
# be factored to single routine.
d = {}
parse_entries(self.sections.get('specialsections',()),d,unquote=True)
for pat,sectname in d.items():
pat = strip_quotes(pat)
if not is_re(pat):
raise EAsciiDoc,'[specialsections] entry ' \
'is not a valid regular expression: %s' % pat
if sectname is None:
if pat in self.specialsections:
del self.specialsections[pat]
else:
self.specialsections[pat] = sectname
def parse_replacements(self,sect='replacements'):
"""Parse replacements section into self.replacements dictionary."""
d = OrderedDict()
parse_entries(self.sections.get(sect,()), d, unquote=True)
for pat,rep in d.items():
if not self.set_replacement(pat, rep, getattr(self,sect)):
raise EAsciiDoc,'[%s] entry in %s is not a valid' \
' regular expression: %s' % (sect,self.fname,pat)
@staticmethod
def set_replacement(pat, rep, replacements):
"""Add pattern and replacement to replacements dictionary."""
pat = strip_quotes(pat)
if not is_re(pat):
return False
if rep is None:
if pat in replacements:
del replacements[pat]
else:
replacements[pat] = strip_quotes(rep)
return True
def subs_replacements(self,s,sect='replacements'):
"""Substitute patterns from self.replacements in 's'."""
result = s
for pat,rep in getattr(self,sect).items():
result = re.sub(pat, rep, result)
return result
def parse_specialwords(self):
"""Parse special words section into self.specialwords dictionary."""
reo = re.compile(r'(?:\s|^)(".+?"|[^"\s]+)(?=\s|$)')
for line in self.sections.get('specialwords',()):
e = parse_entry(line)
if not e:
raise EAsciiDoc,'[specialwords] entry in %s is malformed: %s' \
% (self.fname,line)
name,wordlist = e
if not is_name(name):
raise EAsciiDoc,'[specialwords] name in %s is illegal: %s' \
% (self.fname,name)
if wordlist is None:
# Undefine all words associated with 'name'.
for k,v in self.specialwords.items():
if v == name:
del self.specialwords[k]
else:
words = reo.findall(wordlist)
for word in words:
word = strip_quotes(word)
if not is_re(word):
raise EAsciiDoc,'[specialwords] entry in %s ' \
'is not a valid regular expression: %s' \
% (self.fname,word)
self.specialwords[word] = name
def subs_specialchars(self,s):
"""Perform special character substitution on string 's'."""
"""It may seem like a good idea to escape special characters with a '\'
character, the reason we don't is because the escape character itself
then has to be escaped and this makes including code listings
problematic. Use the predefined {amp},{lt},{gt} attributes instead."""
result = ''
for ch in s:
result = result + self.specialchars.get(ch,ch)
return result
def subs_specialchars_reverse(self,s):
"""Perform reverse special character substitution on string 's'."""
result = s
for k,v in self.specialchars.items():
result = result.replace(v, k)
return result
def subs_specialwords(self,s):
"""Search for word patterns from self.specialwords in 's' and
substitute using corresponding macro."""
result = s
for word in self.specialwords.keys():
result = re.sub(word, _subs_specialwords, result)
return result
def expand_templates(self,entries):
"""Expand any template::[] macros in a list of section entries."""
result = []
for line in entries:
mo = macros.match('+',r'template',line)
if mo:
s = mo.group('attrlist')
if s in self.sections:
result += self.expand_templates(self.sections[s])
else:
message.warning('missing section: [%s]' % s)
result.append(line)
else:
result.append(line)
return result
def expand_all_templates(self):
for k,v in self.sections.items():
self.sections[k] = self.expand_templates(v)
def section2tags(self, section, d={}, skipstart=False, skipend=False):
"""Perform attribute substitution on 'section' using document
attributes plus 'd' attributes. Return tuple (stag,etag) containing
pre and post | placeholder tags. 'skipstart' and 'skipend' are
used to suppress substitution."""
assert section is not None
if section in self.sections:
body = self.sections[section]
else:
message.warning('missing section: [%s]' % section)
body = ()
# Split macro body into start and end tag lists.
stag = []
etag = []
in_stag = True
for s in body:
if in_stag:
mo = re.match(r'(?P<stag>.*)\|(?P<etag>.*)',s)
if mo:
if mo.group('stag'):
stag.append(mo.group('stag'))
if mo.group('etag'):
etag.append(mo.group('etag'))
in_stag = False
else:
stag.append(s)
else:
etag.append(s)
# Do attribute substitution last so {brkbar} can be used to escape |.
# But don't do attribute substitution on title -- we've already done it.
title = d.get('title')
if title:
d['title'] = chr(0) # Replace with unused character.
if not skipstart:
stag = subs_attrs(stag, d)
if not skipend:
etag = subs_attrs(etag, d)
# Put the {title} back.
if title:
stag = map(lambda x: x.replace(chr(0), title), stag)
etag = map(lambda x: x.replace(chr(0), title), etag)
d['title'] = title
return (stag,etag)
#---------------------------------------------------------------------------
# Deprecated old table classes follow.
# Naming convention is an _OLD name suffix.
# These will be removed from future versions of AsciiDoc
def join_lines_OLD(lines):
"""Return a list in which lines terminated with the backslash line
continuation character are joined."""
result = []
s = ''
continuation = False
for line in lines:
if line and line[-1] == '\\':
s = s + line[:-1]
continuation = True
continue
if continuation:
result.append(s+line)
s = ''
continuation = False
else:
result.append(line)
if continuation:
result.append(s)
return result
class Column_OLD:
"""Table column."""
def __init__(self):
self.colalign = None # 'left','right','center'
self.rulerwidth = None
self.colwidth = None # Output width in page units.
class Table_OLD(AbstractBlock):
COL_STOP = r"(`|'|\.)" # RE.
ALIGNMENTS = {'`':'left', "'":'right', '.':'center'}
FORMATS = ('fixed','csv','dsv')
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('template','fillchar','format','colspec',
'headrow','footrow','bodyrow','headdata',
'footdata', 'bodydata')
# Configuration parameters.
self.fillchar=None
self.format=None # 'fixed','csv','dsv'
self.colspec=None
self.headrow=None
self.footrow=None
self.bodyrow=None
self.headdata=None
self.footdata=None
self.bodydata=None
# Calculated parameters.
self.underline=None # RE matching current table underline.
self.isnumeric=False # True if numeric ruler.
self.tablewidth=None # Optional table width scale factor.
self.columns=[] # List of Columns.
# Other.
self.check_msg='' # Message set by previous self.validate() call.
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
"""Update table definition from section entries in 'entries'."""
for k,v in entries.items():
if k == 'fillchar':
if v and len(v) == 1:
self.fillchar = v
else:
raise | |
#!/usr/bin/python3
import os
import click
import sys
import csv
import time
import pandas as pd
import country_converter as coco
import hashlib
import phonenumbers
from tqdm import tqdm
from uszipcode import SearchEngine
HEADER_TRANSLATIONS = {
"email1": "Email",
"phone1": "Phone",
"person_country": "Country",
}
REQUIRED_HEADERS = {"<NAME>", "<NAME>", "Phone", "Email", "Country", "Zip"}
OPTIONAL_HEADERS = set() # TODO: Add optional headers that can be uploaded.
# All headers that can be in a Customer Match CSV.
ALL_HEADERS = REQUIRED_HEADERS.union(OPTIONAL_HEADERS)
DO_NOT_HASH = {"Country", "Zip"}
# ANSI codes to color/format terminal prints.
ANSI = {
"YELLOW": "\u001b[33m",
"RED": "\u001b[31m",
"CYAN": "\u001b[36m",
"BOLD": "\u001b[1m",
"RESET": "\u001b[0m",
}
class Error(ValueError):
"""Base class for other custom exceptions"""
pass
class FormatError(Error):
"""Raised when a file is not in the correct format."""
pass
class NoZipError(FormatError):
"""Raised when a zip code is not found in a spreadsheet. Sometimes recoverable."""
pass
# ==========================
# Formatted console prints
# ==========================
def warn(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['YELLOW']}WARNING:{ANSI['RESET']} {message}")
def notify(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['CYAN']}INFO:{ANSI['RESET']} {message}")
def check_path(filepath: str):
"""Checks that the path to a file exists. To check if a path to the file and the file itself exists,
use check_csv
Args:
filepath (str): The path to the file
Raises:
ValueError: If the path to the file does not exist
"""
path = os.path.dirname(filepath)
if path.strip() and not os.path.exists(path):
raise ValueError(f"The path {path} does not exist.")
def check_csv(filepath: str) -> csv.Dialect:
"""Runs checks on a CSV file, such as whether it exists and if it can be parsed, and returns
its dialect object
Args:
filepath (str): Path to the CSV file
Raises:
ValueError: If the path does not exist, or the file cannot be read as a CSV
Returns:
csv.Dialect: Parsed CSV dialect from the file
"""
# Check that the file exists, and is a file.
basename = os.path.basename(filepath)
if not os.path.exists(filepath):
raise ValueError(f"The path {filepath} does not exist.")
if not os.path.isfile(filepath):
raise ValueError(f"{basename} is not a file.")
# Try to open the file and verify it can be read as a CSV.
try:
file = open(filepath, encoding="utf8")
dialect = csv.Sniffer().sniff(file.read(100000))
file.seek(0)
file.close()
return dialect
except csv.Error as e:
raise ValueError(
f"Could not get a CSV dialect for file {basename}. Is it a CSV file? Is it maybe too large?"
)
def parse_google_fields(filepath: str, ignore_zip: bool = False) -> dict:
"""Parse the header of the CSV to get the Google field names.
Args:
filepath (str): Path to the CSV file.
ignore_zip (bool): Flag to ignore the zip code column, and not throw an error if it is missing.
Raises:
ValueError: If not all required headers can be found
Returns:
dict: A map from the field name that was found in the CSV to Google's field name.
eg: "first_name": "<NAME>"
"""
field_map = {}
found_headers = []
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
# For each field in the header column, try to translate
# them to a header recognized by Google.
for field in field_names:
header = None
# Check if there is a direct translation first:
if field in HEADER_TRANSLATIONS:
header = HEADER_TRANSLATIONS[field]
# Otherwise attempt to translate snake case:
elif (translated_field := field.replace("_", " ").title()) in ALL_HEADERS:
header = translated_field
# If we have not found this header yet, add it to the map.
# Otherwise, if we have found the header already, warn the user.
if header is not None and header not in found_headers:
notify(f"Detected header name '{header}' as '{field}' in CSV file")
field_map[field] = header
found_headers.append(header)
elif header in found_headers:
warn(
f"Duplicate header name '{header}' was extracted as '{field}'. Keeping column with header '{field_map[header]}'"
)
# Check if we have all required headers.
# All required headers are found if the required headers set is a subset of the headers found.
if not REQUIRED_HEADERS.issubset(field_map.values()):
missing_headers = REQUIRED_HEADERS.difference(field_map.values())
if len(missing_headers) == 1 and list(missing_headers)[0] == "Zip":
if not ignore_zip:
raise NoZipError(field_map)
else:
raise FormatError(
f"Not all required headers found. Missing: {', '.join(missing_headers)}"
)
return field_map
def parse_location_fields(filepath: str) -> dict:
"""Parse a header of a CSV file to get the country and city.
Args:
filepath (str): Path to the CSV file
Raises:
FormatError: If the city, country or both columns cannot be found.
Returns:
dict: A map from the field name that was found in the CSV to the standardized name.
eg: "person_city": "City"
"""
WANTED_FIELDS = {"state", "city"}
found_translations = []
field_map = {}
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
for field in field_names:
# Salesql CSVs prefix state and city by person_.
field = field.lower()
salesql_field = field.replace("person_", "")
possible_fields = {field, salesql_field}
if found_set := WANTED_FIELDS.intersection(possible_fields):
translation = list(found_set)[0]
notify(f"Detected header name '{translation}' as '{field}' in CSV file")
found_translations.append(translation)
field_map[field] = translation
if not WANTED_FIELDS.issubset(field_map.values()):
missing_fields = WANTED_FIELDS.difference(field_map.values())
raise FormatError(
f"Could not find state and city columns. Missing: {', '.join(missing_fields)}"
)
return field_map
def hash_element(element: any) -> str:
"""Produces a sha256 hash of an element of data.
Args:
element (any): The data to be hashed
Returns:
str: The sha256 hash hex digest
"""
element = str(element).encode("utf-8")
return hashlib.sha256(element).hexdigest()
def hash_series(series: pd.Series):
"""Hashes a series, usually represnting columns in a CSV.
Args:
series (pd.Series): [description]
Returns:
[type]: [description]
"""
# If the name of the series is a field
# that shouldn't be hashed (eg: Zip), don't hash it.
if series.name in DO_NOT_HASH:
return series
else:
return series.map(hash_element)
def hash_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Hashes all elements in a Pandas dataframe.
Args:
dataframe (pd.DataFrame): The dataframe to be hashed
Returns:
pd.DataFrame: The dataframe with all elements hashed
"""
notify(f"Hashing {dataframe.size} elements...")
start = time.time()
dataframe = dataframe.apply(hash_series, axis=0)
notify(
f"Finished hashing {dataframe.size} elements in {time.time() - start} seconds."
)
return dataframe
def get_dataframe(filepath: str) -> pd.DataFrame:
"""Gets a dataframe for a given CSV file.
Args:
filepath (str): Path to the CSV file.
Returns:
pd.DataFrame: [description]
"""
dialect = check_csv(filepath)
return pd.read_csv(
filepath,
warn_bad_lines=False,
error_bad_lines=False,
sep=dialect.delimiter,
low_memory=False,
dtype=str,
)
def translate_dataframe(dataframe: pd.DataFrame, field_map: dict) -> pd.DataFrame:
"""Translates a CSV file to use Google's desired field names in the header.
Any columns with field names that are not recognized by the Customer Match
specification are removed.
Args:
dataframe (pd.DataFrame): The DataFrame of the CSV file.
Returns:
pd.DataFrame: The pandas dataframe that was translated.
Can be exported to a CSV with the save_csv function.
"""
# Parse the headers into a field_map.
# Keep only the columns that have matching headers.
dataframe = dataframe[field_map.keys()]
# Reverse the map to rename columns to Google's expectation.
dataframe = dataframe.rename(columns=field_map)
return dataframe
def save_csv(dataframe: pd.DataFrame, output: str):
"""Saves a dataframe to a CSV file.
Args:
dataframe (pd.DataFrame): The dataframe to be saved
output (str): The filepath to be saved to
"""
dataframe.to_csv(output, index=False, encoding="utf-8")
notify(f"Succesfully saved Customer Match data file to {os.path.abspath(output)}.")
def get_zip(row: pd.Series, search: SearchEngine) -> str:
"""Get the zip code for a row in a dataframe with the city and state.
Args:
row (pd.Series): A series containing a city and state field.
search (SearchEngine): The search engine object to lookup the zipcode.
Returns:
str: The zipcode if found. None otherwise.
"""
try:
if row.count() == 2:
res = search.by_city_and_state(city=row["city"], state=row["state"])
return res[0].zipcode
else:
warn(f"NaN detected for {row['city']}, {row['state']}.")
return ""
except (AttributeError, IndexError):
warn(f"Zip lookup for {row['city']}, {row['state']} failed.")
return ""
def get_zips(dataframe: pd.DataFrame) -> pd.Series:
"""Gets the zips for a dataframe with city and state columns.
Args:
dataframe (pd.DataFrame): The dataframe, must have city and state columns.
Returns:
pd.Series: A series of zip codes correlating to the zips for each city and state.
"""
search = SearchEngine()
tqdm.pandas(desc="Getting zipcodes")
zips = dataframe.progress_apply(lambda row: get_zip(row, search), axis=1)
zips = zips.rename("Zip")
return zips
def convert_to_iso(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe's Country column to ISO2 format (United States => US)
Args:
dataframe (pd.DataFrame): A dataframe with a Country column.
Returns:
pd.DataFrame: The dataframe with the Country column in ISO2 format.
"""
notify(f"Converting {len(dataframe.index)} countries to ISO2 format...")
start = time.time()
iso2_names = coco.convert(names=dataframe["Country"], to="ISO2", not_found=None)
dataframe["Country"] = pd.Series(iso2_names)
notify(
f"Finished converting countries to ISO2 format in {time.time() - start} seconds."
)
return dataframe
def | |
position_ids, return_dict=False, init_cache=True
)
return unfreeze(init_variables["cache"])
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
params: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
past_key_values: dict = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# init input tensors if not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if head_mask is None:
head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
if self.config.add_cross_attention:
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
# down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
# changed by FlaxBertAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
token_type_ids=jnp.array(token_type_ids, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
head_mask=jnp.array(head_mask, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
deterministic=not train,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
else:
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
token_type_ids=jnp.array(token_type_ids, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
head_mask=jnp.array(head_mask, dtype="i4"),
deterministic=not train,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
rngs=rngs,
)
return outputs
class FlaxBertModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
add_pooling_layer: bool = True
def setup(self):
self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)
self.encoder = FlaxBertEncoder(self.config, dtype=self.dtype)
self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
head_mask: Optional[jnp.ndarray] = None,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# make sure `token_type_ids` is correctly initialized when not passed
if token_type_ids is None:
token_type_ids = jnp.zeros_like(input_ids)
# make sure `position_ids` is correctly initialized when not passed
if position_ids is None:
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
hidden_states = self.embeddings(
input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
)
outputs = self.encoder(
hidden_states,
attention_mask,
head_mask=head_mask,
deterministic=deterministic,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
if not return_dict:
# if pooled is None, don't return it
if pooled is None:
return (hidden_states,) + outputs[1:]
return (hidden_states, pooled) + outputs[1:]
return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=hidden_states,
pooler_output=pooled,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class FlaxBertModel(FlaxBertPreTrainedModel):
module_class = FlaxBertModule
append_call_sample_docstring(
FlaxBertModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC
)
class FlaxBertForPreTrainingModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.tie_word_embeddings:
shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
hidden_states = outputs[0]
pooled_output = outputs[1]
prediction_scores, seq_relationship_score = self.cls(
hidden_states, pooled_output, shared_embedding=shared_embedding
)
if not return_dict:
return (prediction_scores, seq_relationship_score) + outputs[2:]
return FlaxBertForPreTrainingOutput(
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForPreTraining(FlaxBertPreTrainedModel):
module_class = FlaxBertForPreTrainingModule
FLAX_BERT_FOR_PRETRAINING_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import BertTokenizer, FlaxBertForPreTraining
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = FlaxBertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
overwrite_call_docstring(
FlaxBertForPreTraining,
BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_PRETRAINING_DOCSTRING,
)
append_replace_return_docstrings(
FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
)
class FlaxBertForMaskedLMModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
else:
shared_embedding = None
# Compute the prediction scores
logits = self.cls(hidden_states, shared_embedding=shared_embedding)
if not return_dict:
return (logits,) + outputs[1:]
return FlaxMaskedLMOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class FlaxBertForMaskedLM(FlaxBertPreTrainedModel):
module_class = FlaxBertForMaskedLMModule
append_call_sample_docstring(
FlaxBertForMaskedLM, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC
)
class FlaxBertForNextSentencePredictionModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
return_dict = return_dict if return_dict is not None else self.config.return_dict
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
if not return_dict:
return (seq_relationship_scores,) + outputs[2:]
return FlaxNextSentencePredictorOutput(
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):
module_class = FlaxBertForNextSentencePredictionModule
FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import BertTokenizer, FlaxBertForNextSentencePrediction
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = FlaxBertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")
>>> outputs = model(**encoding)
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
overwrite_call_docstring(
FlaxBertForNextSentencePrediction,
BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING,
)
append_replace_return_docstrings(
FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC
)
class FlaxBertForSequenceClassificationModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
classifier_dropout = (
self.config.classifier_dropout
if self.config.classifier_dropout is not None
else self.config.hidden_dropout_prob
)
self.dropout = nn.Dropout(rate=classifier_dropout)
self.classifier = nn.Dense(
self.config.num_labels,
dtype=self.dtype,
)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return FlaxSequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):
module_class = FlaxBertForSequenceClassificationModule
append_call_sample_docstring(
FlaxBertForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
class FlaxBertForMultipleChoiceModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
self.classifier = nn.Dense(1, dtype=self.dtype)
def __call__(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
num_choices = input_ids.shape[1]
input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
# Model
outputs = self.bert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, deterministic=deterministic)
logits = self.classifier(pooled_output)
reshaped_logits = logits.reshape(-1, num_choices)
if not return_dict:
return (reshaped_logits,) + outputs[2:]
return FlaxMultipleChoiceModelOutput(
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):
module_class = FlaxBertForMultipleChoiceModule
overwrite_call_docstring(
FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
append_call_sample_docstring(
FlaxBertForMultipleChoice, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
)
class FlaxBertForTokenClassificationModule(nn.Module):
config: BertConfig
dtype: jnp.dtype = jnp.float32
def | |
'''
New t0 algorithm is getting close, but not close enough.
1. Run previous analysis using only exponential signal. In general, this should predict a t0 that is slightly ahead of the actual t0.
2. Determine a baseline by looking inbetween the LED pulses and taking the average. (Integrate and divide by length?)
3. From the previous t0, start looking backwards in the filtered signal (the signal before correlations) until we find a region that is below the baseline for at least 100 ms.
a. The new t0 should be the right end-point of this region plus one half the width of our FFT window from the spectrogram
'''
import numpy as np
import scipy.signal
import copy
import operator
import re
# import matplotlib.pyplot as plt
import copy
# from SBCcode.Tools import SBCtools
from SBCcode.DataHandling.WriteBinary import WriteBinaryNtupleFile as WB
def get_runs(dir, search_for="folders", do_sort=True, reverse=False):
# Author: <NAME>
# Inputs:
# dir: A directory containing folders of all runs
# search_for: A string indicating whether to extract run_ids from folders or from files
# Can be either "folders" or "files"
# do_sort: A boolean. If true, will return the sorted run_list from sort_runs above.
# reverse: Bool. If true, returns the reverse-sorted list.
# Outputs: An array of run-strings from a given directory. This is passed to sort_runs if do_sort is true
if search_for.lower().strip() == "folders":
base_dirs = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
elif search_for.lower().strip() == "files":
base_dirs = [d for d in os.listdir(dir) if os.path.isfile(os.path.join(dir, d))]
else:
raise ValueError("'search_for' must be either 'folders' or 'files. Got {}".format(search_for))
to_match = re.compile("([0-9]{8}_[0-9]+){1}")
out = []
for d in base_dirs:
match_q = re.search(to_match, d)
if match_q:
out.append(match_q.group(0))
if do_sort:
return sort_runs(out, reverse=reverse)
return out
def sort_runs(arr, reverse=False):
# Author: <NAME> 8/2/2018
# Input:
# arr: An array of run_ids as strings. Should look like ["20170623_0", "20170623_5", etc...]
# reverse: Bool. If true, returns the reverse-sorted list.
# Outputs: A natural-ish sorted version that puts the dates in order and the run numbers for each date in order
s = sorted([np.int32(runid.split("_")) for runid in arr], key=operator.itemgetter(0, 1), reverse=reverse)
return ["_".join(np.str(runid).strip("[]").split()) for runid in s]
def trim_runlist(arr, start=None, stop=None):
# Author: <NAME> 8/2/2018
# Inputs:
# arr: An array of run_ids as strings. Should look like ["20170623_0", "20170623_5", etc...]
# start: Start run number. If this is not supplied, will start at the beginning
# stop: Stop run number. If this is not supplied, will continue to end
# Outputs: A sorted, trimmed runlist that goes from start to stop
arr = sort_runs(arr)
start = arr[0] if start == None else start
stop = arr[-1] if stop == None else stop
start_date = int(start.split("_")[0])
start_run_num = int(start.split("_")[1])
stop_date = int(stop.split("_")[0])
stop_run_num = int(stop.split("_")[1])
out = []
for run in arr:
date = int(run.split("_")[0])
run_num = int(run.split("_")[1])
if start_date > date or date > stop_date:
continue
if (start_run_num > run_num and date == start_date) or (run_num > stop_run_num and date == stop_date):
continue
out.append(run)
return out
def extend_window(w, r):
# Inputs:
# w: An array of 2 elements. Normally, this will be a window like [t1, t2]
# r: A float used as a ratio to extend w
# Outputs: A rescaled version of w
mp = 0.5*(w[1]+w[0]) # Midpoint
new_len = (w[1]-w[0])*(1+r) # Length of new window
return [mp-new_len/2, mp+new_len/2]
def freq_filter(freqs, lower=None, upper=None):
# Inputs:
# freqs: An array of frequency bins
# lower: The lower frequency to cut-off at
# upper: The upper frequency to cut-off at
# Outputs: An array of indices where the the frequency in freqs is between lower and upper
if lower is None and upper is None:
return freqs
if lower is None:
return np.where([x <= upper for x in freqs])
if upper is None:
return np.where([x >= lower for x in freqs])
return np.where([lower <= x <= upper for x in freqs])
def closest_index(arr, el):
# Inputs:
# arr: A 1-dimensional array
# el: Any element
# Outputs: The FIRST index of the item in arr that is closest to el.
# Notes: Arr does NOT have to be sorted.
return np.argmin(np.abs(arr-el))
def spectrum_sums(spectrum, fr, n, lowerf=None, upperf=None):
# Inputs:
# spectrum: The output 2d spectrum from a spectogram
# fr: A list of frequency bins corresponding to the spectrum
# n: Number of bins
# lowerf: The lower frequency to cut-off at
# upperf: The upper frequency to cut-off at
# Outputs: A compressed 1d array where each element is the sum of a bin from spectrum, only counting
# frequencies between lowerf and upperf
out = []
good_indices = freq_filter(fr, lowerf, upperf)
for subn in range(n):
out.append(np.trapz(spectrum[good_indices[0], subn], dx=np.mean(np.diff(fr))))
return out
def rescale_window(w1, w2):
# Inputs:
# w1: An array with 2 elements
# w2: An array with 2 elements
# Outputs: A rescaled version of w2 so tha the endpoints of w2 match w1 but the number of elements remain the same
y1, y2 = min(w1), max(w1)
x1, x2 = min(w2), max(w2)
if x1 == x2:
return 0*w2
a = (y1-y2)/(x1-x2)
b = (x1*y2-x2*y1)/(x1-x2)
return a*w2+b
def corr_signal(tau, dt, t0, n, fit_type=0, shift=10):
# Inputs:
# tau: Time constant on exponential decay
# dt: Step size for the x-axis
# t0: Where the exponential signal will start. Not important when used with correlation
# N: Number of points requested
# fit_type: The type of signal to create. See corr_signal_type_templates.py for a better explanation.
# fit_type = 0 --> Exponential decay
# fit_type = 1 --> Constant 1 followed by exponential decay (continuous)
# fit_type = 2 --> Linear increase followed by exponential decay
# fit_type = 3 --> Log increase followed by exponential decay
# fit_type = 4 --> 0 value followed by an exponential decrease. Discontinuous.
# Outputs:
# t: t-values for plotting
# y: y-values of our filter signal.
# After careful analysis, we've determined that there reaches a point in the filtered piezo signal that
# exhibits a sharp increase followed by an exponential decay. This function returns a brief exponential
# decay function for use with convolution/correlation.
shift = int(np.ceil(shift))
t = np.linspace(t0, t0+dt*n, n)
y = np.exp(-(t-t0)/tau)
ycopy = copy.deepcopy(y)
if fit_type == 0:
pass
elif fit_type == 1:
for subn in range(len(y) - shift):
y[subn+shift] = ycopy[subn]
y[0:shift] = 1
elif fit_type == 2:
for subn in range(len(y) - shift):
y[subn + shift] = ycopy[subn]
y[0:shift] = (t[0:shift] - t0)/(shift*dt)
elif fit_type == 3:
for subn in range(len(y) - shift):
y[subn + shift] = ycopy[subn]
y[0:shift] = np.log((t[0:shift] + 1 - t0)) / np.log(shift*dt + 1)
elif fit_type == 4:
for subn in range(len(y) - shift):
y[subn+shift] = ycopy[subn]
y[0:shift] = 0
return t, y
def find_t0_from_corr(corrt, corry):
# Inputs:
# corrt: Time-values of the correlation signal
# corry: Y-values of the correlation signal
# Outputs: The time of the maximum in corry such that corrt is less than or equal to 0.
n = np.where(corrt >= 0)
corry[n] = 0
return corrt[np.argmax(corry)]
def within_baseline(arr, baseline, rms):
return np.all(arr<(baseline + 5*rms))
def calculate_t0(data, tau, lower, upper, piezo1_fit_type=0, piezo2_fit_type=4, view_plots=False):
# Inputs:
# data: Data returned from SBCcode's GetEvent function. Must have fastDAQ loaded.
# GetEvent is found within SBCcode/DataHandling/GetSBCEvent.py
# tau: The time constant we are trying to fit to the exponential decay that occurs
# immediately after the bubble forms
# lower: The lower frequency threshold for cutting off the spectrogram
# upper: The upper frequency threshold for cutting off the spectrogram
# piezo1_fit_type: The type of fit to use when trying to match the filtered piezo1 signal. Defaults to 0.
# piezo2_fit_type: The type of fit to use when trying to match the filtered piezo2 signal. Defaults to 4.
# For a description of fit_types, see corr_signal above or check corr_signal_types.py
# view_plots: Boolean. If true, will display some plots for analysis.
# Outputs: A dictionary of results for the Acoustic Analysis.
# Issues:
# 1. The actual run_id and | |
# ******************************************************
## Revision "$LastChangedDate: 2018-06-01 15:05:44 +0200 (Fri, 01 Jun 2018) $"
## Date "$LastChangedRevision: 1 $"
## Author "$LastChangedBy: arthurbeusen $"
## URL "$HeadURL: https://pbl.sliksvn.com/generalcode/test_allocation.py $"
# ******************************************************
'''
Test script to test the functionality of allocation functions.
'''
import os
import sys
__general = os.path.join(os.getcwd(), 'trunk')
if os.path.exists(__general):
sys.path.insert(0, __general)
print(__general + " is added to the python search path for modules.")
import allocranking
import allocweighing
#def allocranking(sq,sw,wReg,qmaxReg):
#===================================================================================
#INPUT (All input is changed during this function)
# sq Regional sum of values of the variable that must be allocated
# sw Regional sum of values of weighing factor
# wReg Weighting factor grid map
# qmaxReg Maximum value of allocation variable per grid cell
# Test 1
sq = 100.
wReg = [1.,1.,2.,1.5]
sw = sum(wReg)
qmaxReg = [10.,10.,75.,50.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [0.0,0.0,75.0,25.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 1 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Allocation ranking method")
print(qReg)
print(qReg_exp)
# Test 2
sq = 100.
wReg = [1.,1.,2.,1.]
sw = sum(wReg)
qmaxReg = [100.,20.,10.,50.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [20.0,20.0,10.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 2 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Allocation ranking method 2")
print(qReg)
print(qReg_exp)
# Test 3
sq = 100.
wReg = [1.,1.,2.,1.]
sw = sum(wReg)
qmaxReg = [1.,0.,0.,0.]
qReg = allocranking.allocranking(sq,sw,wReg,qmaxReg)
qReg_exp = [1.0,0.0,0.0,0.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 3 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Allocation ranking method 3")
print(qReg)
print(qReg_exp)
# Testing allocweighing
print("Start allocation weighing method testing.")
# Test 1
sq = 100.
wReg = [1.,1.,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,100.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [10.0,10.0,20.0,60.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 1 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Allocation weighing method")
print(qReg)
print(qReg_exp)
# Test 2
sq = 100.
wReg = [1.,1.,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,50.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [12.5,12.5,25.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 2 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Allocation weighing method 2")
print(qReg)
print(qReg_exp)
# Test 3
sq = 80.
wReg = [0,0,2.,6]
sw = sum(wReg)
qmaxReg = [100.,100.,100.,50.]
qReg = allocweighing.allocweighing(sq,sw,wReg,qmaxReg)
qReg_exp = [0,0,30.0,50.0]
ltest1 = 1
if (qReg != qReg_exp):
ltest1 = 0
print("Test 3 is not a succes. Values found: " + str(qReg) + " and " + str(qReg_exp))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Allocation weighing method 3")
print(qReg)
print(qReg_exp)
sys.exit(12)
# First three tests are on a grid with no nodata
# Test 1
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 1 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test1 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 2
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 2 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test2 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 3
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 3 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test3 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First three tests are on a grid with no nodata. Now with grid with no nodata but in the header the nodata_value specified.
# Test 4
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 4 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test4 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 5
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 5 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test5 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 6
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid2.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 6 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test6 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# First six tests are on a grid with no nodata. Now with grid with nodata.
# Test 7
# Multiply grid1 with a scalar of type integer
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 7 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test7 passed.")
else:
print("Multiplying with factor 1")
print(grid1_old.values)
print(grid1.values)
# Test 8
# Multiply grid1 with a scalar of type float
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(factor)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 8 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test8 passed.")
else:
print("Multiplying with factor 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 9
# Multiply grid1 with a grid of one
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
factor = 1.0
grid1_old = ascraster.duplicategrid(grid1)
gridone = ascraster.duplicategrid(grid1)
# Make all grid entries one.
for i in range(gridone.length):
gridone.set_data(i,1.0)
grid1.multiply(gridone)
# Grid1 and grid1_old must be the same
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 9 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test9 passed.")
else:
print("Multiplying with another grid with 1.0. Changing int grid into float")
print(grid1_old.values)
print(grid1.values)
# Test 10
# Multiply grid1 with nodata with a grid with no nodata
# Read ascii grid
grid1 = ascraster.Asciigrid(ascii_file='testgrid3.asc',numtype=int)
gridone = ascraster.Asciigrid(ascii_file='testgrid1.asc',numtype=int)
grid1_old = ascraster.duplicategrid(grid1)
grid1.multiply(gridone)
# Grid1 must have the multiplication of grid1 and gridone
# Calculation is done on a different way
for i in range(grid1_old.length):
val1 = grid1_old.get_data(i)
val2 = gridone.get_data(i)
if (val1 == None or val2 == None):
grid1_old.set_data(i,grid1_old.nodata_value)
else:
grid1_old.set_data(i,val1*val2)
ltest1 = 1
for i in range(grid1_old.length):
if (grid1.values[i] != grid1_old.values[i]):
ltest1 = 0
print('Test 10 is not a succes for item: ' + str(i) + ". Values found: " + str(grid1.values[i]) + " and " + str(grid1_old.values[i]))
if (ltest1 == 1):
print("Test10 passed.")
else:
print("Multiplying | |
<gh_stars>0
from functools import reduce
from operator import add
import concurrent.futures
import csv
import fire
import math
import numpy as np
import os
import pandas as pd
import pickle
import re
## Utilities ##
def map_dict(elem, dictionary):
if elem in dictionary:
return dictionary[elem]
else:
return np.nan
def str_aggregator(x, separator='|'):
assert isinstance(x, pd.Series)
wew = pd.unique(x.fillna('<UNK>')).tolist()
return separator.join(wew)
## Proper Classes ##
class ParseItemID(object):
''' This class builds the dictionaries depending on desired features '''
def __init__(self, *, dataset_dir):
self.dataset_dir = dataset_dir
self.dictionary = {}
self.feature_names = [
'RBCs', 'WBCs', 'platelets', 'hemoglobin', 'hemocrit',
'atypical lymphocytes', 'bands', 'basophils', 'eosinophils', 'neutrophils',
'lymphocytes', 'monocytes', 'polymorphonuclear leukocytes',
'temperature (F)', 'heart rate', 'respiratory rate', 'systolic', 'diastolic',
'pulse oximetry', 'troponin', 'HDL', 'LDL', 'BUN', 'INR', 'PTT', 'PT', 'triglycerides',
'creatinine', 'glucose', 'sodium', 'potassium', 'chloride', 'bicarbonate',
'blood culture', 'urine culture', 'surface culture', 'sputum culture', 'wound culture',
'Inspired O2 Fraction', 'central venous pressure', 'PEEP Set', 'tidal volume', 'anion gap',
'daily weight', 'tobacco', 'diabetes', 'history of CV events'
]
self.features = [
'$^RBC(?! waste)', '$.*wbc(?!.*apache)', '$^platelet(?!.*intake)', '$^hemoglobin',
'$hematocrit(?!.*Apache)', 'Differential-Atyps', 'Differential-Bands', 'Differential-Basos',
'Differential-Eos', 'Differential-Neuts', 'Differential-Lymphs', 'Differential-Monos',
'Differential-Polys', 'temperature f', 'heart rate', 'respiratory rate', 'systolic',
'diastolic', 'oxymetry(?! )', 'troponin', 'HDL', 'LDL', '$^bun(?!.*apache)', 'INR', 'PTT',
'$^pt\\b(?!.*splint)(?!.*exp)(?!.*leak)(?!.*family)(?!.*eval)(?!.*insp)(?!.*soft)',
'triglyceride', '$.*creatinine(?!.*apache)', '(?<!boost )glucose(?!.*apache).*',
'$^sodium(?!.*apache)(?!.*bicarb)(?!.*phos)(?!.*ace)(?!.*chlo)(?!.*citrate)(?!.*bar)(?!.*PO)',
'$.*(?<!penicillin G )(?<!urine )potassium(?!.*apache)', '^chloride', 'bicarbonate',
'blood culture', 'urine culture', 'surface culture', 'sputum culture', 'wound culture',
'Inspired O2 Fraction', '$Central Venous Pressure(?! )', 'PEEP set', 'tidal volume \(set\)',
'anion gap', 'daily weight', 'tobacco', 'diabetes', 'CV - past'
]
self.patterns = []
for feature in self.features:
if '$' in feature:
self.patterns.append(feature[1::])
else:
self.patterns.append('.*{0}.*'.format(feature))
# store d_items contents
d_items_path = os.path.join(self.dataset_dir, 'D_ITEMS.csv')
self.d_items = pd.read_csv(d_items_path)
self.d_items.columns = map(str.upper, self.d_items.columns)
self.d_items = self.d_items[['ITEMID', 'LABEL']]
self.d_items.dropna(how='any', axis=0, inplace=True)
self.script_features_names = [
'epoetin', 'warfarin', 'heparin', 'enoxaparin', 'fondaparinux',
'asprin', 'ketorolac', 'acetominophen', 'insulin', 'glucagon',
'potassium', 'calcium gluconate', 'fentanyl', 'magensium sulfate',
'D5W', 'dextrose', 'ranitidine', 'ondansetron', 'pantoprazole',
'metoclopramide', 'lisinopril', 'captopril', 'statin', 'hydralazine',
'diltiazem', 'carvedilol', 'metoprolol', 'labetalol', 'atenolol',
'amiodarone', 'digoxin(?!.*fab)', 'clopidogrel', 'nitroprusside',
'nitroglycerin', 'vasopressin', 'hydrochlorothiazide', 'furosemide',
'atropine', 'neostigmine', 'levothyroxine', 'oxycodone', 'hydromorphone',
'fentanyl citrate', 'tacrolimus', 'prednisone', 'phenylephrine',
'norepinephrine', 'haloperidol', 'phenytoin', 'trazodone', 'levetiracetam',
'diazepam', 'clonazepam', 'propofol', 'zolpidem', 'midazolam',
'albuterol', 'ipratropium', 'diphenhydramine', '0.9% Sodium Chloride',
'phytonadione', 'metronidazole', 'cefazolin', 'cefepime', 'vancomycin',
'levofloxacin', 'cipfloxacin', 'fluconazole', 'meropenem', 'ceftriaxone',
'piperacillin', 'ampicillin-sulbactam', 'nafcillin', 'oxacillin',
'amoxicillin', 'penicillin', 'SMX-TMP',
]
self.script_features = [
'epoetin', 'warfarin', 'heparin', 'enoxaparin', 'fondaparinux',
'aspirin', 'keterolac', 'acetaminophen', 'insulin', 'glucagon',
'potassium', 'calcium gluconate', 'fentanyl', 'magnesium sulfate',
'D5W', 'dextrose', 'ranitidine', 'ondansetron', 'pantoprazole',
'metoclopramide', 'lisinopril', 'captopril', 'statin', 'hydralazine',
'diltiazem', 'carvedilol', 'metoprolol', 'labetalol', 'atenolol',
'amiodarone', 'digoxin(?!.*fab)', 'clopidogrel', 'nitroprusside',
'nitroglycerin', 'vasopressin', 'hydrochlorothiazide', 'furosemide',
'atropine', 'neostigmine', 'levothyroxine', 'oxycodone', 'hydromorphone',
'fentanyl citrate', 'tacrolimus', 'prednisone', 'phenylephrine',
'norepinephrine', 'haloperidol', 'phenytoin', 'trazodone', 'levetiracetam',
'diazepam', 'clonazepam', 'propofol', 'zolpidem', 'midazolam',
'albuterol', '^ipratropium', 'diphenhydramine(?!.*%)(?!.*cream)(?!.*/)',
'^0.9% sodium chloride(?! )', 'phytonadione', 'metronidazole(?!.*%)(?! desensit)',
'cefazolin(?! )', 'cefepime(?! )', 'vancomycin', 'levofloxacin',
'cipfloxacin(?!.*ophth)', 'fluconazole(?! desensit)',
'meropenem(?! )', 'ceftriaxone(?! desensit)', 'piperacillin',
'ampicillin-sulbactam', 'nafcillin', 'oxacillin', 'amoxicillin',
'penicillin(?!.*Desen)', 'sulfamethoxazole'
]
self.script_patterns = ['.*' + feature +
'.*' for feature in self.script_features]
def prescriptions_init(self):
# ensure PRESCRIPTIONS.csv dataset file exists
prescriptions_fname = 'PRESCRIPTIONS.csv'
prescriptions_path = os.path.join(
self.dataset_dir, prescriptions_fname)
if not os.path.isfile(prescriptions_path):
raise FileNotFoundError(f'{prescriptions_fname} does not exist!')
columns = [
'ROW_ID', 'SUBJECT_ID', 'HADM_ID',
'DRUG', 'STARTDATE', 'ENDDATE'
]
self.prescriptions = pd.read_csv(prescriptions_path)
self.prescriptions.columns = map(str.upper, self.prescriptions.columns)
self.prescriptions = self.prescriptions[columns]
self.prescriptions.dropna(how='any', axis=0, inplace=True)
def query_prescriptions(self, feature_name):
pattern = '.*{0}.*'.format(feature_name)
condition = self.prescriptions['DRUG'].str.contains(
pattern, flags=re.IGNORECASE)
return self.prescriptions['DRUG'].where(condition).dropna().values
def extractor(self, feature_name, pattern):
condition = self.d_items['LABEL'].str.contains(
pattern, flags=re.IGNORECASE)
dictionary_value = self.d_items['ITEMID'].where(
condition).dropna().values.astype('int')
self.dictionary[feature_name] = set(dictionary_value)
def query(self, feature_name):
pattern = '.*{0}.*'.format(feature_name)
print(pattern)
condition = self.d_items['LABEL'].str.contains(
pattern, flags=re.IGNORECASE)
return self.d_items['LABEL'].where(condition).dropna().values
def query_pattern(self, pattern):
condition = self.d_items['LABEL'].str.contains(
pattern, flags=re.IGNORECASE)
return self.d_items['LABEL'].where(condition).dropna().values
def build_dictionary(self):
assert len(self.feature_names) == len(self.features)
for feature, pattern in zip(self.feature_names, self.patterns):
self.extractor(feature, pattern)
def reverse_dictionary(self, dictionary):
self.rev = {}
for key, value in dictionary.items():
for elem in value:
self.rev[elem] = key
class MimicParser:
''' This class structures the MIMIC III and builds features then makes 24 hour windows '''
def __init__(self, *, dataset_dir, artifacts_dir, redo=False):
self.name = 'mimic_assembler'
self.dataset_dir = dataset_dir
self.artifacts_dir = artifacts_dir
self.redo = redo
self.feature_types = {}
self.pid = ParseItemID(dataset_dir=dataset_dir)
self.pid.build_dictionary()
self.pid.reverse_dictionary(self.pid.dictionary)
def normalize_tobacco_values(self, df):
'''
values of 'tobacco' item is string and will be dropped if it stays string
so we convert these values into numbers beforehand
1. collate all unique values and assert we know them
'''
tobacco_ids = self.pid.dictionary['tobacco']
tobacco_mapping = {
'Current use or use within 1 month of admission': 1,
'Stopped more than 1 month ago, but less than 1 year ago': 0.75,
'Former user - stopped more than 1 year ago': 0.5,
'Never used': 0,
'1': 1,
'0': 0,
}
tobacco_values = np.unique(
df[df['ITEMID'].isin(tobacco_ids)]['VALUE']
).tolist()
print(f'[INFO] Unique tobacco values: {tobacco_values}')
if tobacco_values:
# assert values as known
for v in tobacco_values:
if str(v) not in tobacco_mapping.keys():
print(
f'[ERROR] Unknown tobacco value: {v}, type: {type(v)}')
# convert strings to numbers
predicate1 = df['ITEMID'].isin(tobacco_ids)
for k, v in tobacco_mapping.items():
predicate2 = df['VALUE'] == k
df.loc[predicate1 & predicate2, 'VALUE'] = v
df.loc[predicate1 & predicate2, 'VALUENUM'] = v
def collate_feature_types(self, df):
# add feature column for viewing convenience in excel file
if 'FEATURE' not in df.columns:
df['FEATURE'] = df['ITEMID'].apply(lambda x: self.pid.rev[x])
types = pd.pivot_table(
df,
index=['FEATURE', 'ITEMID'],
values=['VALUEUOM'],
aggfunc=str_aggregator,
)
# merge types with cache
types_as_dict = types.to_dict()
if 'VALUEUOM' in types_as_dict:
for k, v in types_as_dict['VALUEUOM'].items():
v = set(v.split('|'))
if k in self.feature_types:
self.feature_types[k] = self.feature_types[k].union(v)
else:
self.feature_types[k] = v
# remove added feature column
if 'FEATURE' in df.columns:
del df['FEATURE']
def export_feature_types(self):
# convert feature types from set to string
for k, v in self.feature_types.items():
self.feature_types[k] = ', '.join(v)
feature_types = {'MEASUREMENT': self.feature_types}
pd.DataFrame.from_dict(feature_types).to_excel('feature_types.xlsx')
def reduce_total(self, chunksize=10_000_000):
"""
This will filter out rows from CHARTEVENTS.csv that are not feauture relevant
"""
# ensure input csv exists
input_fname = 'CHARTEVENTS.csv'
input_path = os.path.join(self.dataset_dir, input_fname)
if not os.path.isfile(input_path):
raise FileNotFoundError(f'{input_fname} does not exist!')
# do nothing if output file already exists
output_fname = 'CHARTEVENTS_reduced.csv'
output_path = os.path.join(self.artifacts_dir, output_fname)
if not self.redo and os.path.isfile(output_path):
print(f'[reduce_total] {output_fname} already exists.')
return
# make a set of all the item IDs that is relevant
relevant_item_ids = reduce(
lambda x, y: x.union(y),
self.pid.dictionary.values(),
)
columns = [
'SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID',
'ITEMID', 'CHARTTIME', 'VALUE', 'VALUENUM'
]
iterator = pd.read_csv(
input_path,
iterator=True,
chunksize=chunksize,
low_memory=False,
)
for i, df_chunk in enumerate(iterator):
print(f'[reduce_total] Processing chunk#{i}')
# ensure column names are uppercased
df_chunk.columns = map(str.upper, df_chunk.columns)
# normalize tobacco values: string -> number
self.normalize_tobacco_values(df_chunk)
# select rows that has ITEMID that is feature relevant
# and drop rows that contain nan values in the columns
condition = df_chunk['ITEMID'].isin(relevant_item_ids)
df = df_chunk[condition].dropna(
axis=0,
how='any',
subset=columns,
)
# extract feature types defined in this df chunk
self.collate_feature_types(df)
if i == 0:
df.to_csv(
output_path,
index=False,
columns=columns,
)
else:
df.to_csv(
output_path,
index=False,
columns=columns,
header=None,
mode='a',
)
# output excel for the feature types
self.export_feature_types()
print(f'[reduce_total] DONE')
def create_day_blocks(self):
"""
Uses pandas to take shards and build them out
"""
# ensure input csv exists
input_fname = 'CHARTEVENTS_reduced.csv'
input_path = os.path.join(self.artifacts_dir, input_fname)
if not os.path.isfile(input_path):
raise FileNotFoundError(f'{input_fname} does not exist!')
# do nothing if output file already exists
output_fname = 'CHARTEVENTS_reduced_24_hour_blocks.csv'
output_path = os.path.join(self.artifacts_dir, output_fname)
if not self.redo and os.path.isfile(output_path):
print(f'[create_day_blocks] {output_fname} already exists.')
return
df = pd.read_csv(input_path)
df['CHARTDAY'] = df['CHARTTIME'].astype(
'str').str.split(' ').apply(lambda x: x[0])
df['HADMID_DAY'] = df['HADM_ID'].astype('str') + '_' + df['CHARTDAY']
df['FEATURES'] = df['ITEMID'].apply(lambda x: self.pid.rev[x])
# save a mapping of HADMID_DAY -> patient ID
hadm_dict = dict(zip(df['HADMID_DAY'], df['SUBJECT_ID']))
df_src = pd.pivot_table(
df,
index='HADMID_DAY',
columns='FEATURES',
values='VALUENUM',
fill_value=np.nan,
dropna=False,
)
df_std = pd.pivot_table(
df,
index='HADMID_DAY',
columns='FEATURES',
values='VALUENUM',
aggfunc=np.std,
fill_value=0,
dropna=False,
)
df_std.columns = [f'{i}_std' for i in list(df_src.columns)]
df_min = pd.pivot_table(
df,
index='HADMID_DAY',
columns='FEATURES',
values='VALUENUM',
aggfunc=np.amin,
fill_value=np.nan,
dropna=False,
)
df_min.columns = [f'{i}_min' for i in list(df_src.columns)]
df_max = pd.pivot_table(
df,
index='HADMID_DAY',
columns='FEATURES',
values='VALUENUM',
aggfunc=np.amax,
fill_value=np.nan,
dropna=False,
)
df_max.columns = [f'{i}_max' for i in list(df_src.columns)]
df2 = pd.concat([df_src, df_std, df_min, df_max], axis=1)
# remove aggregates of tobacco and daily weights
del df2['tobacco_std']
del df2['tobacco_min']
del df2['tobacco_max']
del df2['daily weight_std']
del df2['daily weight_min']
del df2['daily weight_max']
rel_columns = list(df2.columns)
rel_columns = [i for i in rel_columns if '_' not in i]
for col in rel_columns:
unique = np.unique(df2[col])
finite_mask = np.isfinite(unique)
if len(unique[finite_mask]) <= 2:
print(f'[create_day_blocks] Will delete | |
# Copyright 2020 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from apps.utils import convert_jd, extract_properties
from apps.utils import extract_fink_classification_single
from apps.plotting import draw_cutout, draw_scores, all_radio_options
import numpy as np
import urllib
def card_sn_scores(data) -> dbc.Card:
""" Card containing the score evolution
Parameters
----------
data: java.util.TreeMap
Results from a HBase client query
Returns
----------
card: dbc.Card
Card with the scores drawn inside
"""
graph_lc = dcc.Graph(
id='lightcurve_scores',
style={
'width': '100%',
'height': '15pc'
},
config={'displayModeBar': False}
)
graph_scores = dcc.Graph(
id='scores',
figure=draw_scores(data),
style={
'width': '100%',
'height': '15pc'
},
config={'displayModeBar': False}
)
card = dbc.Card(
dbc.CardBody(
[
graph_lc,
dbc.Row(
dbc.RadioItems(id='switch-mag-flux-score', inline=True),
),
html.Br(),
graph_scores
]
),
className="mt-3"
)
return card
def card_cutouts(data):
""" Add a card containing cutouts
Parameters
----------
data: java.util.TreeMap
Results from a HBase client query
Returns
----------
card: dbc.Card
Card with the cutouts drawn inside
"""
card = dbc.Card(
dbc.CardBody(
[
dbc.Row([
dbc.Col(html.H5(children="Science", className="text-center")),
dbc.Col(html.H5(children="Template", className="text-center")),
dbc.Col(html.H5(children="Difference", className="text-center"))
]),
dbc.Row([
dcc.Graph(
id='science-stamps',
style={
'display': 'inline-block',
},
config={'displayModeBar': False}
),
dcc.Graph(
id='template-stamps',
style={
'display': 'inline-block',
},
config={'displayModeBar': False}
),
dcc.Graph(
id='difference-stamps',
style={
'display': 'inline-block',
},
config={'displayModeBar': False}
),
], justify='around', no_gutters=True),
html.Br(),
html.Br(),
dcc.Graph(
id='lightcurve_cutouts',
style={
'width': '100%',
'height': '15pc'
},
config={'displayModeBar': False}
),
dbc.Row(
dbc.RadioItems(
options=[{'label': k, 'value': k} for k in all_radio_options.keys()],
value="Difference magnitude",
id="switch-mag-flux",
inline=True
)
)
]
),
className="mt-3"
)
return card
def card_variable_plot(data):
""" Add a card to fit for variable stars
Parameters
----------
data: java.util.TreeMap
Results from a HBase client query
Returns
----------
card: dbc.Card
Card with the variable drawn inside
"""
card = dbc.Card(
dbc.CardBody(
[
dcc.Graph(
id='variable_plot',
style={
'width': '100%',
'height': '25pc'
},
config={'displayModeBar': False}
)
]
),
className="mt-3"
)
return card
nterms_base = dbc.FormGroup(
[
dbc.Label("Number of base terms"),
dbc.Input(
placeholder="1",
value=1,
type="number",
id='nterms_base',
debounce=True,
min=0, max=4
),
dbc.Label("Number of band terms"),
dbc.Input(
placeholder="1",
value=1,
type="number",
id='nterms_band',
debounce=True,
min=0, max=4
),
dbc.Label("Set manually the period (days)"),
dbc.Input(
placeholder="Optional",
value=None,
type="number",
id='manual_period',
debounce=True
)
], style={'width': '100%', 'display': 'inline-block'}
)
submit_varstar_button = dbc.Button(
'Fit data',
id='submit_variable',
style={'width': '100%', 'display': 'inline-block'},
block=True
)
def card_variable_button(data):
""" Add a card containing button to fit for variable stars
"""
pdf = extract_properties(
data, [
'i:objectId',
'i:jd',
'd:cdsxmatch',
'i:objectidps1',
'i:distpsnr1',
'i:neargaia',
'i:distnr',
]
)
pdf = pdf.sort_values('i:jd', ascending=False)
id0 = pdf['i:objectId'].values[0]
cdsxmatch = pdf['d:cdsxmatch'].values[0]
distnr = pdf['i:distnr'].values[0]
objectidps1 = pdf['i:objectidps1'].values[0]
distpsnr1 = pdf['i:distpsnr1'].values[0]
neargaia = pdf['i:neargaia'].values[0]
classification = extract_fink_classification_single(data)
card = dbc.Card(
[
html.H5("ObjectID: {}".format(id0), className="card-title"),
html.H6(
"Fink class: {}".format(classification),
className="card-subtitle"
),
dcc.Markdown(
"""
---
```python
# Neighbourhood
SIMBAD: {}
PS1: {}
Distance (PS1): {:.2f} arcsec
Distance (Gaia): {:.2f} arcsec
Distance (ZTF): {:.2f} arcsec
```
""".format(
cdsxmatch, objectidps1, float(distpsnr1),
float(neargaia), float(distnr))
),
dbc.Row(nterms_base),
dbc.Row(submit_varstar_button)
],
className="mt-3", body=True
)
return card
submit_mulens_button = dbc.Button(
'Fit data',
id='submit_mulens',
style={'width': '100%', 'display': 'inline-block'},
block=True
)
def card_mulens_button(data):
""" Add a card containing button to fit for microlensing events
"""
pdf = extract_properties(
data, [
'i:objectId',
'i:jd',
'd:cdsxmatch',
'i:objectidps1',
'i:distpsnr1',
'i:neargaia',
'i:distnr',
]
)
pdf = pdf.sort_values('i:jd', ascending=False)
id0 = pdf['i:objectId'].values[0]
cdsxmatch = pdf['d:cdsxmatch'].values[0]
distnr = pdf['i:distnr'].values[0]
objectidps1 = pdf['i:objectidps1'].values[0]
distpsnr1 = pdf['i:distpsnr1'].values[0]
neargaia = pdf['i:neargaia'].values[0]
classification = extract_fink_classification_single(data)
card = dbc.Card(
[
html.H5("ObjectID: {}".format(id0), className="card-title"),
html.H6(
"Fink class: {}".format(classification),
className="card-subtitle"
),
dcc.Markdown(
"""
---
```python
# Neighbourhood
SIMBAD: {}
PS1: {}
Distance (PS1): {:.2f} arcsec
Distance (Gaia): {:.2f} arcsec
Distance (ZTF): {:.2f} arcsec
```
""".format(
cdsxmatch, objectidps1, float(distpsnr1),
float(neargaia), float(distnr))
),
dbc.Row(submit_mulens_button)
],
className="mt-3", body=True
)
return card
def card_mulens_plot(data):
""" Add a card to fit for microlensing events
Parameters
----------
data: java.util.TreeMap
Results from a HBase client query
Returns
----------
card: dbc.Card
Card with the microlensing fit drawn inside
"""
card = dbc.Card(
dbc.CardBody(
[
dcc.Graph(
id='mulens_plot',
style={
'width': '100%',
'height': '25pc'
},
config={'displayModeBar': False}
)
]
),
className="mt-3"
)
return card
def card_explanation_variable():
""" Explain what is used to fit for variable stars
"""
msg = """
_Fill the fields on the right, and press `Fit data` to
perform a time series analysis of the data:_
- _Number of base terms: number of frequency terms to use for the base model common to all bands (default=1)_
- _Number of band terms: number of frequency terms to use for the residuals between the base model and each individual band (default=1)_
_The fit is done using [gatspy](https://zenodo.org/record/47887)
described in [VanderPlas & Ivezic (2015)](https://ui.adsabs.harvard.edu/abs/2015ApJ...812...18V/abstract).
We use a multiband periodogram (LombScargleMultiband) to find the best period.
Alternatively, you can manually set the period in days._
"""
card = dbc.Card(
dbc.CardBody(
dcc.Markdown(msg)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
)
return card
def card_explanation_mulens():
""" Explain what is used to fit for microlensing events
"""
msg = """
_Press `Fit data` to perform a time series analysis of the data. Fitted parameters will be displayed on the right panel._
_The fit is done using [pyLIMA](https://github.com/ebachelet/pyLIMA)
described in [Bachelet et al (2017)](https://ui.adsabs.harvard.edu/abs/2017AJ....154..203B/abstract).
We use a simple PSPL model to fit the data._
"""
card = dbc.Card(
dbc.CardBody(
dcc.Markdown(msg)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
)
return card
def card_id(data):
""" Add a card containing basic alert data
"""
pdf = extract_properties(
data, [
'i:objectId',
'i:candid',
'i:jd',
'i:ra',
'i:dec',
'd:cdsxmatch',
'i:objectidps1',
'i:distpsnr1',
'i:neargaia',
'i:distnr',
'i:magpsf',
'i:magnr',
'i:fid'
]
)
pdf = pdf.sort_values('i:jd', ascending=False)
id0 = pdf['i:objectId'].values[0]
candid0 = pdf['i:candid'].values[0]
ra0 = pdf['i:ra'].values[0]
dec0 = pdf['i:dec'].values[0]
date0 = convert_jd(float(pdf['i:jd'].values[0]))
cdsxmatch = pdf['d:cdsxmatch'].values[0]
distnr = pdf['i:distnr'].values[0]
objectidps1 = pdf['i:objectidps1'].values[0]
distpsnr1 = pdf['i:distpsnr1'].values[0]
neargaia = pdf['i:neargaia'].values[0]
magpsfs = pdf['i:magpsf'].astype(float).values
magnrs = pdf['i:magnr'].astype(float).values
fids = pdf['i:fid'].values
if float(distnr) < 2:
deltamagref = np.round(magnrs[0] - magpsfs[0], 3)
else:
deltamagref = None
mask = fids == fids[0]
if np.sum(mask) > 1:
deltamaglatest = np.round(magpsfs[mask][0] - magpsfs[mask][1], 3)
else:
deltamaglatest = None
classification = extract_fink_classification_single(data)
card = dbc.Card(
[
html.H5("ObjectID: {}".format(id0), className="card-title"),
html.H6("Fink class: {}".format(classification), className="card-subtitle"),
dcc.Markdown(
"""
```python
# General properties
Date: {}
RA: {} deg
Dec: {} deg
```
---
```python
# Variability
Dmag (latest): {}
Dmag (reference): {}
```
---
```python
# Neighbourhood
SIMBAD: {}
PS1: {}
Distance (PS1): {:.2f} arcsec
Distance (Gaia): {:.2f} arcsec
Distance (ZTF): {:.2f} arcsec
```
---
""".format(
date0, ra0, dec0,
deltamaglatest, deltamagref,
cdsxmatch, objectidps1, float(distpsnr1),
float(neargaia), float(distnr))
),
dbc.ButtonGroup([
dbc.Button('TNS', id='TNS', target="_blank", href='https://wis-tns.weizmann.ac.il/search?ra={}&decl={}&radius=5&coords_unit=arcsec'.format(ra0, dec0)),
dbc.Button('SIMBAD', id='SIMBAD', target="_blank", href="http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={}%20{}&Radius=0.08".format(ra0, dec0)),
dbc.Button('NED', id='NED', target="_blank", href="http://ned.ipac.caltech.edu/cgi-bin/objsearch?search_type=Near+Position+Search&in_csys=Equatorial&in_equinox=J2000.0&ra={}&dec={}&radius=1.0&obj_sort=Distance+to+search+center&img_stamp=Yes".format(ra0, dec0)),
dbc.Button('SDSS', id='SDSS', target="_blank", href="http://skyserver.sdss.org/dr13/en/tools/chart/navi.aspx?ra={}&dec={}".format(ra0, dec0)),
])
],
className="mt-3", body=True
)
return card
def card_sn_properties(data):
""" Add a card containing SN alert data
"""
pdf = extract_properties(
data, [
'i:objectId',
'i:ra',
'i:dec',
'i:jd',
'd:cdsxmatch',
'd:snn_snia_vs_nonia',
'd:snn_sn_vs_all',
'd:rfscore',
'i:classtar',
'i:ndethist',
'i:drb',
'i:distnr',
'i:magpsf',
'i:magnr',
'i:fid'
]
)
pdf = pdf.sort_values('i:jd', ascending=False)
id0 = pdf['i:objectId'].values[0]
snn_snia_vs_nonia = pdf['d:snn_snia_vs_nonia'].values[0]
snn_sn_vs_all = pdf['d:snn_sn_vs_all'].values[0]
rfscore = pdf['d:rfscore'].values[0]
classtar = pdf['i:classtar'].values[0]
ndethist = pdf['i:ndethist'].values[0]
drb = pdf['i:drb'].values[0]
ra0 = pdf['i:ra'].values[0]
dec0 = pdf['i:dec'].values[0]
distnr = pdf['i:distnr'].values[0]
magpsfs = pdf['i:magpsf'].astype(float).values
magnrs = pdf['i:magnr'].astype(float).values
fids = pdf['i:fid'].values
if float(distnr) < 2:
deltamagref = np.round(magnrs[0] - magpsfs[0], 3)
else:
deltamagref = None
mask = fids == fids[0]
if np.sum(mask) > 1:
deltamaglatest = np.round(magpsfs[mask][0] - magpsfs[mask][1], 3)
else:
deltamaglatest = None
classification = extract_fink_classification_single(data)
card = dbc.Card(
[
html.H5("ObjectID: {}".format(id0), className="card-title"),
html.H6(
"Fink class: {}".format(classification),
className="card-subtitle"
),
dcc.Markdown(
"""
---
```python
# SuperNNova classification
SN Ia score: {:.2f}
SNe score: {:.2f}
# Random Forest classification
RF score: {:.2f}
```
---
```python
# Variability
Dmag (latest): {}
Dmag (reference): {}
```
---
```python
# Extra properties
Classtar: {:.2f}
Detection in the survey: {}
DL Real bogus: {:.2f}
```
---
""".format(
float(snn_snia_vs_nonia),
float(snn_sn_vs_all),
float(rfscore),
deltamaglatest,
deltamagref,
float(classtar),
ndethist,
float(drb)
)
),
html.Br(),
dbc.ButtonGroup([
dbc.Button('TNS', id='TNS', target="_blank", href='https://wis-tns.weizmann.ac.il/search?ra={}&decl={}&radius=5&coords_unit=arcsec'.format(ra0, dec0)),
dbc.Button('SIMBAD', id='SIMBAD', target="_blank", href="http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={}%20{}&Radius=0.08".format(ra0, dec0)),
dbc.Button('NED', id='NED', target="_blank", | |
-t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1 :
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]):
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_config_colors(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = key + '_' + prefix
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def threesec():
list = [['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105807_W-C-RC_2009-01-23_CALIB_0.0.cat','W-C-RC'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105787_W-J-V_2009-01-23_CALIB_0.0.cat','W-J-V'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0050786_W-C-IC_2006-12-21_CALIB_0.0.cat','W-C-IC']]
match_many(list,True)
def match_many(list,color=False):
if color:
make_ssc_config_colors(list)
print color
else:
make_ssc_config_few(list)
import os
files = []
for file,prefix in list:
print file
command = 'ldacaddkey -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \
B_WCS_assoc 0.0003 FLOAT "" \
Theta_assoc 0.0 FLOAT "" \
Flag_assoc 0 SHORT "" ' % {'inputcat':file,'outputcat':file + '.assoc1'}
os.system(command)
#command = 'ldacrenkey -i %(inputcat)s -o %(outputcat)s -k ALPHA_J2000 Ra DELTA_J2000 Dec' % {'inputcat':file + '.assoc1','outputcat':file+'.assoc2'}
#os.system(command)
files.append(file+'.assoc1')
import re
files_input = reduce(lambda x,y:x + ' ' + y,files)
os.system('mkdir /usr/work/pkelly/assoc/')
files_output = reduce(lambda x,y:x + ' ' + y,['/usr/work/pkelly/assoc/'+re.split('\/',z)[-1] +'.assd' for z in files])
print files
print files_input, files_output
command = 'associate -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c ' + os.environ['bonn'] + '/photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output}
print command
os.system(command)
outputcat = '/tmp/final.cat'
command = 'make_ssc -i %(inputcats)s \
-o %(outputcat)s\
-t OBJECTS -c /tmp/tmp.ssc ' % {'inputcats':files_output,'outputcat':outputcat}
os.system(command)
def match_inside(SUPA1,SUPA2,FLAT_TYPE):
dict1 = get_files(SUPA1,FLAT_TYPE)
search_params1 = initialize(dict1['filter'],dict1['OBJNAME'])
search_params1.update(dict1)
dict2 = get_files(SUPA2,FLAT_TYPE)
search_params2 = initialize(dict2['filter'],dict2['OBJNAME'])
search_params2.update(dict2)
import os
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(OBJNAME)s/' % {'OBJNAME':search_params1['OBJNAME']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'OBJNAME':search_params1['OBJNAME']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/SELF/')
from glob import glob
catalog1 = search_params1['pasted_cat']
catalog2 = search_params2['pasted_cat']
#os.system('ldacrentab -i ' + catalog2 + ' -t OBJECTS STDTAB -o ' + catalog2.replace('cat','std.cat'))
filter = search_params1['filter'] #exposures[exposure]['keywords']['filter']
OBJECT = search_params1['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
outcat = path + 'PHOTOMETRY/ILLUMINATION/SELF/matched_' + SUPA1 + '_' + filter + '_' + '_self.cat'
file = 'matched_' + SUPA1 + '.cat'
os.system('rm ' + outcat)
command = 'match_simple_cats.sh ' + catalog1 + ' ' + catalog2 + ' ' + outcat
print command
os.system(command)
save_exposure({'matched_cat_self':outcat},SUPA1,FLAT_TYPE)
print outcat
def getTableInfo():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])) and IMAGE!='SUPA0011082':
ROTS[ROT].append(IMAGE)
return ROTS
def diffCalcNew():
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'$') != -1:
print column
res = re.split('\$',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])):
ROTS[ROT].append(IMAGE)
print ROTS
#good = 0
#for i in range(len(tbdata)):
# array = []
# for y in ROTS[ROT]:
# array += [tbdata.field(ROT+'$'+y+'$CLASS_STAR')[i] for y in ROTS[ROT]]
# array.sort()
# if array[-1]>0.9 and array[-2]>0.9:
# good += 1
#print good, len(tbdata)
#raw_input()
def starConstruction(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open('/tmp/final.cat')
table = p[1].data
from copy import copy
w = []
for ROT in EXPS.keys():
for y in EXPS[ROT]:
w.append(copy(table.field(ROT+'$'+y+'$MAG_AUTO')))
medians = []
stds = []
for i in range(len(w[0])):
non_zero = []
for j in range(len(w)):
if w[j][i] != 0:
non_zero.append(w[j][i])
if len(non_zero) != 0:
medians.append(float(scipy.median(non_zero)))
stds.append(float(scipy.std(non_zero)))
else:
medians.append(float(-99))
stds.append(99)
print medians[0:99]
tnew = mk_tab([[medians,'median'],[stds,'std']])
tall = merge(tnew,p)
print 'done merging'
def selectGoodStars(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open('/tmp/final.cat')
#print p[1].columns
#raw_input()
table = p[1].data
star_good = [] #= scipy.zeros(len(table))
supas = []
from copy import copy
''' if there is an image that does not match, throw it out '''
Finished = False
while not Finished:
temp = copy(table)
for ROT in EXPS.keys():
for y in EXPS[ROT]:
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') != 0.0
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = temp.field(ROT+'$'+y+'$MAG_AUTO') < 27
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
mask = 0 < temp.field(ROT+'$'+y+'$MAG_AUTO')
good_entries = temp[mask]
temp = good_entries
print len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, temp.field(ROT+'$'+y+'$MaxVal')[0:10],temp.field(ROT+'$'+y+'$BackGr')[0:10]
mask = (temp.field(ROT+'$'+y+'$MaxVal') + temp.field(ROT+'$'+y+'$BackGr')) < 26000
good_entries = temp[mask]
temp = good_entries
good_number = len(good_entries.field(ROT+'$'+y+'$MAG_AUTO'))
print ROT,y, good_number , EXPS
if good_number == 0:
TEMP = {}
for ROTTEMP in EXPS.keys():
TEMP[ROTTEMP] = []
for yTEMP in EXPS[ROTTEMP]:
if y != yTEMP:
TEMP[ROTTEMP].append(yTEMP)
EXPS = TEMP
break
if good_number != 0:
Finished = True
print len(temp), 'temp'
zps = {}
print EXPS.keys(), EXPS
for ROT in EXPS.keys():
for y in EXPS[ROT]:
s = good_entries.field(ROT+'$'+y+'$MAG_AUTO').sum()
print s
print s/len(good_entries)
zps[y] = s/len(good_entries)
print zps
from copy import copy
tab = {}
for ROT in EXPS.keys():
for y in EXPS[ROT]:
for key in [ROT+'$'+y+'$Xpos_ABS',ROT+'$'+y+'$Ypos_ABS',ROT+'$'+y+'$MAG_AUTO',ROT+'$'+y+'$MAGERR_AUTO',ROT+'$'+y+'$MaxVal',ROT+'$'+y+'$BackGr',ROT+'$'+y+'$CLASS_STAR',ROT+'$'+y+'$Flag','SDSSstdMag_corr','SDSSstdMagErr_corr','SDSSstdMagColor_corr','SDSSstdMagClean_corr']:
tab[key] = copy(table.field(key))
for i in range(len(table)):
mags_ok = False
star_ok = False
class_star_array = []
include_star = []
in_box = []
name = []
mags_diff_array = []
mags_good_array = []
for ROT in EXPS.keys():
#for y in EXPS[ROT]:
# if table.field(ROT+'$'+y+'$MAG_AUTO')[i] != 0.0:
mags_diff_array += [zps[y] - tab[ROT+'$'+y+'$MAG_AUTO'][i] for y in EXPS[ROT]]
mags_good_array += [tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 for y in EXPS[ROT]]
in_box += [1500 < tab[ROT+'$'+y+'$Xpos_ABS'][i] < 8500 and 1500 < tab[ROT+'$'+y+'$Ypos_ABS'][i] < 6500]
include_star += [((tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 25000 and tab[ROT+'$'+y+'$Flag'][i]==0 and tab[ROT+'$'+y+'$MAG_AUTO'][i] < 40 and tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0 and tab[ROT+'$'+y+'$MAGERR_AUTO'][i]<0.05) for y in EXPS[ROT]]
#for y in EXPS[ROT]:
# print (tab[ROT+'$'+y+'$MaxVal'][i] + tab[ROT+'$'+y+'$BackGr'][i]) < 27500 , tab[ROT+'$'+y+'$Flag'][i]==0 , tab[ROT+'$'+y+'$MAG_AUTO'][i] < 40 , tab[ROT+'$'+y+'$MAG_AUTO'][i]!=0.0
name += [{'name':EXPS[ROT][z],'rotation':ROT} for z in range(len(EXPS[ROT]))]
class_star_array += [tab[ROT+'$'+y+'$CLASS_STAR'][i] for y in EXPS[ROT]]
class_star_array.sort()
#if len(mags_array) > 1:
# if 1: #abs(mags_array[0] - mags_array[1]) < 0.5:
# mags_ok = True
# if 1: #abs(class_star_array[-1]) > 0.01: # MAIN PARAMETER!
# star_ok = True
if abs(class_star_array[-1]) > -9: # MAIN PARAMETER!
star_ok = True
if star_ok: #mags_ok and star_ok:
list = []
for k in range(len(mags_good_array)):
if mags_good_array[k]:
list.append(mags_diff_array[k])
if len(list) > 1:
median_mag_diff = scipy.median(list)
#print median_mag_diff, mags_diff_array, class_star_array, include_star
file_list=[]
for j in range(len(include_star)):
if include_star[j] and abs(mags_diff_array[j] - median_mag_diff) < 0.3: # MAIN PARAMETER!
file_list.append(name[j])
if tab['SDSSstdMag_corr'][i] != 0.0: sdss_exists = 1
else: sdss_exists = 0
if 40. > tab['SDSSstdMag_corr'][i] > 0.0: sdss = 1 # and tab['SDSSstdMagClean_corr'][i]==1: sdss = 1
else: sdss = 0
#if 40. > tab['SDSSstdMag_corr'][i] > 0.0: sdss = 1
if len(file_list) > 1:
star_good.append(i)
supas.append({'table index':i,'supa files':file_list, 'sdss':sdss, 'sdss_exists':sdss_exists})
if i%2000==0: print i
return EXPS, star_good, supas
def diffCalc(SUPA1,FLAT_TYPE):
dict = get_files(SUPA1,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['OBJNAME'])
search_params.update(dict)
import astropy, astropy.io.fits as pyfits, sys, os, re, string, copy
print | |
<gh_stars>0
from . import TestConfig
from app.api_views.operation import *
from app.logic.operation import getCountry, getOperation
from app.logic.person import getPerson, getPohto, deletePerson
import json
import os
from io import BytesIO
class TestOperationApi(TestConfig):
def test_addcountry(self):
# first login as admin
admin_phone = os.getenv('admin_phone')
admin_password = <PASSWORD>('admin_pass')
if not admin_phone or not admin_password:
raise ValueError('Environment variables not found!')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
# post requset
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
adminToken = data['data']['token']
# add new country
headers = {'token':adminToken}
data = {'name': 'usa', 'phone_code':1, 'phone_length':10, 'iso_name':'USA'}
result = self.client_app.post("/api/addcountry", data=json.dumps(data),\
headers=headers,content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(data['message'], None)
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
country = getCountry(name='usa')
self.assertTrue(country, 'no country')
self.assertEqual(country.name, 'usa', 'no country')
self.assertEqual(country.phone_code, 1, 'no country')
self.assertEqual(country.iso_name, 'USA', 'no country')
def test_addcountry2(self):
""" try add country without valid data"""
# first login as admin
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('admin_pass')
if not admin_phone or not admin_password:
raise ValueError('Environment variables not found!')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
# post requset
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
adminToken = data['data']['token']
# add new country
headers = {'token':adminToken}
data = {}
result = self.client_app.post("/api/addcountry", data=json.dumps(data),\
headers=headers,content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'failure')
self.assertEqual(data['message'], 'required data not submitted')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 400)
data = {'name': 'usa', 'phone_length':10}
result = self.client_app.post("/api/addcountry", data=json.dumps(data),\
headers=headers,content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'failure')
self.assertEqual(data['message'], 'required data not submitted')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 400)
# already exists
data = {'name': 'sudan', 'phone_code':249, 'phone_length':9, 'iso_name':'SD'}
result = self.client_app.post("/api/addcountry", data=json.dumps(data),\
headers=headers,content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'failure')
self.assertEqual(data['message'], 'Country already exists.')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 202)
def test_getcountry(self):
""" test the /getcountry route"""
result = self.client_app.get("/api/getcountry", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['country'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_getcountry2(self):
""" test the /getcountry route with perm"""
result = self.client_app.get("/api/getcountry/1", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['country'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_getstatusoperation(self):
""" test the /getstatusoperation route"""
result = self.client_app.get("/api/getstatusoperation", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['status_operation'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_getstatusoperation2(self):
""" test the /getstatusoperation route with perm"""
result = self.client_app.get("/api/getstatusoperation/1", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['status_operation'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_gettypeoperation(self):
""" test the /gettypeoperation route"""
result = self.client_app.get("/api/gettypeoperation", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['type_operation'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_gettypeoperation2(self):
""" test the /gettypeoperation route with perm"""
result = self.client_app.get("/api/gettypeoperation/1", content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['type_operation'])
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
class TestOperationApi2(TestConfig):
def test_addoperation(self):
""" add new operation"""
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('<PASSWORD>')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
# add new operation
headers = {'token':token}
file_path = os.path.join(current_app.config['UPLOAD_FOLDER'], os.getenv('IMEGE_TEST_NAME'))
# send 3 photos
photos = []
for _ in range(3):
photos.append((open(file_path, 'rb'), os.getenv('IMEGE_TEST_NAME')))
from app.models import Age
age = Age.query.first()
data = {'photos':photos, 'date':'2020-11-15',
'type_id':2, 'country_id':1,
'object_type':'Person', 'person_name':'mustafa', 'gender':'male', 'age_id':age.id}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
# close the files
for file in photos:
file[0].close()
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
# the person get added
person = getPerson()
self.assertTrue(person[0])
photos = getPohto(object=person[0])
self.assertTrue(photos)
self.assertEqual(len(photos), 3)
# delete person to get rid of the photos
deletePerson(id=person[0].id)
# no photos after delete
self.assertEqual(len(getPohto()), 0, 'photos not deleted')
# get new token with auto generate and then try add new operation
#
headers = {'token':token}
# check login again - generate token
result = self.client_app.post("/api/checklogin", headers=headers, content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
# new user token
newtoken = data['data'].get('token')
self.assertTrue(token, 'no new token returned')
headers = {'token':newtoken}
# now add new operation
#
data = {'date':'2020-11-15',
'type_id':2, 'country_id':1,
'object_type':'Person', 'person_name':'mustafa', 'gender':'male', 'age_id':age.id}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
def test_addoperation2(self):
""" add operations then get it """
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('<PASSWORD>')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
from app.models import Age
age = Age.query.first()
# add new operation
headers = {'token':token}
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Person', 'person_name':'mustafa', 'gender':'male','age_id':age.id}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
# add new operation2
headers = {'token':token}
data = {'date':'2020-10-25',
'type_id':2, 'country_id':'2',
'object_type':'Car', "brand":'brand', "model":'model',
"plate_number_letters":"klj", "plate_number_numbers":"123",
"car_type": "1"}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
# get the operation
data = {}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 2)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# get the operation wuth country id filter
data = {'country_id':1}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 1)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# get the operation with object=Peron filter
data = {'object':"Person"}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 1)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# get the operation with object=Car filter
data = {'object':"Car"}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 1)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# get the operation with wrong object filter
data = {'object':"wrong"}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 0)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# get the operation with not exist data
data = {'id':"4"}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data, 'no operations')
self.assertEqual(len(data['data']['operations']), 0)
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_addoperation3(self):
''' add operation with full data then get it '''
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = <PASSWORD>('<PASSWORD>')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
# add new operation
headers = {'token':token}
lat = 48.856613
lng = 2.352222
from app.models import Age
age = Age.query.first()
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Person', 'person_name':'mustafa', 'age_id':age.id,
'details': 'this long paragraph of details', 'gender':'male', 'skin':2, 'shelter':True,
'lat':lat, 'lng':lng}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
# get the operation
data = {}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data['data']['operations'])
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(data['data']['operations'][0]['details'], 'this long paragraph of details', 'no details')
self.assertEqual(data['data']['operations'][0]['lat'], lat, 'no lat')
self.assertEqual(data['data']['operations'][0]['lng'], lng, 'no lng')
self.assertEqual(data['data']['operations'][0]['object']['skin'], 2, 'no skin')
self.assertEqual(data['data']['operations'][0]['object']['shelter'], True, 'no shelter')
self.assertEqual(data['data']['operations'][0]['object']['age_id'], age.id, 'not same age')
self.assertEqual(data['data']['operations'][0]['user']['name'], 'admin', 'no user')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
# try add with shelter is False or witout shelter
#
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Person', 'person_name':'mustafa', 'age_id':age.id,
'details': 'this long paragraph of details', 'gender':'male', 'skin':2, 'shelter':False,
'lat':lat, 'lng':lng}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Person', 'person_name':'mustafa', 'age_id':age.id,
'details': 'this long paragraph of details', 'gender':'male', 'skin':2,
'lat':lat, 'lng':lng}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
def test_addoperation4(self):
''' add new user then add operation with it'''
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('admin_pass')
if not admin_phone or not admin_password:
raise ValueError('Environment variables not found!')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
# post requset
resultAdmin = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(resultAdmin.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['token'], 'no token returned')
self.assertEqual(resultAdmin.status_code, 200)
# add user
# the user country
country = getCountry(phoneCode=249)
userData = {'name':'mustafa', 'phone':'0123456789', 'password':'<PASSWORD>',\
'status':'active', 'permission':'user', 'country_id':country.id}
headers = {'token':data['data']['token']}
result = self.client_app.post("/api/adduser", data=json.dumps(userData),\
headers=headers,content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
# login with the new user
data = {'phone':'0123456789', 'password':'<PASSWORD>', 'country_id':country.id}
resultAdmin = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(resultAdmin.data.decode())
self.assertEqual(data['status'], 'success')
self.assertTrue(data['data']['token'], 'no token returned')
self.assertEqual(resultAdmin.status_code, 200)
token = data['data']['token']
from app.models import Age
age = Age.query.first()
# try add operation with this user
headers = {'token':token}
data = {'date':'2020-11-15',
'type_id':2, 'country_id':1,
'object_type':'Person', 'person_name':'mustafa', 'gender':'male', 'age_id':age.id}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
def test_addoperation5(self):
''' add operation with full data then get it - with 'car' object'''
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('admin_pass')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
# add new operation
headers = {'token':token}
lat = 48.856613
lng = 2.352222
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Car', 'model':'toyota', 'car_type':'1', 'brand':'brand',
'plate_number_letters':'fds', 'plate_number_numbers': '321',
'details': 'this long paragraph of details',
'lat':lat, 'lng':lng}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
# get the operation
data = {}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data['data']['operations'])
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(data['data']['operations'][0]['details'], 'this long paragraph of details', 'no details')
self.assertEqual(data['data']['operations'][0]['lat'], lat, 'no lat')
self.assertEqual(data['data']['operations'][0]['lng'], lng, 'no lng')
self.assertEqual(data['data']['operations'][0]['object']['model'], 'toyota', 'no car model')
self.assertEqual(data['data']['operations'][0]['object']['brand'], 'brand', 'not same brand')
self.assertEqual(data['data']['operations'][0]['user']['name'], 'admin', 'no user')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
def test_addoperation6(self):
''' add operation with full data then get it - with 'car' object'''
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('<PASSWORD>')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
# add new operation
headers = {'token':token}
lat = 48.856613
lng = 2.352222
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Car', 'model':'toyota', 'car_type':'1', 'brand':'brand',
'plate_number_letters':'fds', 'plate_number_numbers': '321',
'details': 'this long paragraph of details',
'lat':lat, 'lng':lng,
'city':'giza', 'state':'cairo'}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 201)
# get the operation
data = {}
result=self.client_app.get('/api/getoperation', query_string=data, content_type='application/json')
data = json.loads(result.data.decode())
self.assertTrue(data['data']['operations'])
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(data['data']['operations'][0]['details'], 'this long paragraph of details', 'no details')
self.assertEqual(data['data']['operations'][0]['lat'], lat, 'no lat')
self.assertEqual(data['data']['operations'][0]['lng'], lng, 'no lng')
self.assertEqual(data['data']['operations'][0]['city'], 'giza', 'not the same city')
self.assertEqual(data['data']['operations'][0]['state'], 'cairo', 'not the same stete')
self.assertEqual(data['data']['operations'][0]['object']['model'], 'toyota', 'no car model')
self.assertEqual(data['data']['operations'][0]['object']['brand'], 'brand', 'not same brand')
self.assertEqual(data['data']['operations'][0]['user']['name'], 'admin', 'no user')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 200)
"""
def test_addoperationArabic(self):
''' try add arabic data for the operation'''
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.<PASSWORD>('<PASSWORD>')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
from app.models import Age
age = Age.query.first()
# add new operation
headers = {'token':token}
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Person', 'person_name':'مصطفى', 'gender':'male','age_id':age.id}
result = self.client_app.post("/api/addoperation", data=data, headers=headers,\
content_type="multipart/form-data")
data = json.loads(result.data.decode())
self.assertEqual(data['message'], None)
self.assertEqual(data['status'], 'success')
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(result.status_code, 201)
"""
def test_addoperation7(self):
''' add new operation with Accident object'''
# first log-in
admin_phone = os.getenv('admin_phone')
admin_password = os.getenv('admin_pass')
# the user country
country = getCountry(phoneCode=20)
data = {'phone':admin_phone, 'password':<PASSWORD>, 'country_id':country.id}
result = self.client_app.post("/api/login", data=json.dumps(data), content_type='application/json')
data = json.loads(result.data.decode())
self.assertEqual(data['status'], 'success')
self.assertEqual(result.status_code, 200)
token = data['data']['token']
headers = {'token':token}
from app.models import Age
age = Age.query.first()
# now add the operation
# the cars and the persons must be a json string
cars = [{'model':'toyota', 'car_type':'1', 'brand':'brand',
'plate_number_letters':'fds', 'plate_number_numbers': '321'},
{'model':'toyota', 'car_type':'1', 'brand':'brand',
'plate_number_letters':'fdrs', 'plate_number_numbers': '3241'}]
persons = [{'person_name':'mustafa', 'gender':'male','age_id':age.id},
{'person_name':'mustafa2', 'gender':'male','age_id':age.id}]
data = {'date':'2020-11-15',
'type_id':1, 'country_id':'1',
'object_type':'Accident',
'cars': json.dumps(cars),
'persons': | |
@param noCenter if True, do not remove the average before computing the covariance,
it means we assume variables are already centered
@return correlation factor or correlation, sigma1, sigma2 if deviations is True
"""
if len(values) <= 1:
raise ValueError( # pragma: no cover
"expecting more than one observation, not %d" % len(values))
mx = 0.
my = 0.
vx = 0.
vy = 0.
co = 0.
nb = 0.
for a, b in values:
nb += 1
mx += a
my += b
vx += a ** 2
vy += b ** 2
co += a * b
mx /= nb
my /= nb
vx /= nb
vy /= nb
co /= nb
if not noCenter:
vx -= mx ** 2
vy -= my ** 2
co -= mx * my
vx = vx ** 0.5
vy = vy ** 0.5
v = vx * vy
if v != 0:
co /= v
if deviations:
return co, vx, vy # pragma: no cover
return co
def _private_getclass(self):
"""
the class often creates another class of the same type,
this function returns the class object
"""
return self.__class__
def __init__(self, file, numeric_column=None, sep="\t", encoding=None,
read_n_lines=-1, sheet=0, **options):
"""
It can either take a filename, an object TableFormula,
a list of columns and values.
:param file: filename or a list of column names or a dictionary,
file can also be a `pandas DataFrame
<http://pandas.pydata.org/pandas-docs/dev/dsintro.html#dataframe>`_.
:param numeric_column: depends on file types(see below examples)
:param sep: column separator if file is a filename
:param read_n_lines: read the first n lines(or all if it is -1)
:param sheet: in case the file is an Excel file, this parameter precises the sheet number or name
:param suffix_nb: if True, adds an integer to the column name if it is a duplicate
Example:
::
table = TableFormula("name d_a d_b d_c#A 1 2 3#A 1.1 2.1 3.1#B 3 4 5".replace(" ", "\\t").replace("#","\\n"))
or
::
table = TableFormula("file.txt", ["nb"])
or
::
table = TableFormula(["date", "Y", "Y2", "xl"], values)
or
::
data = [{ "one":1, "two":2 }, {"two":2.1, "three":3 }]
table = TableFormula(data)
or
::
data = { 1:{ "one":2.3, "two":2.2 }, 2:{"one":2.1, "two":3 }
table = TableFormula("__byrow__", data)
or
::
table = TableFormula(numpy.matrix(...))
or
::
table = TableFormula(numpy.array(...))
@warning In this second case, rows and header are not copied.
"""
if numeric_column is None:
numeric_column = []
if isinstance(file, str):
if os.path.exists(file):
self._read_file(file, numeric_column, sep,
encoding, read_n_lines, sheet=sheet)
elif file == "__byrow__" and isinstance(numeric_column, dict):
self._fill_by_row(numeric_column)
else:
lines = file.split("\n")
if len(lines) == 1:
raise FileNotFoundError( # pragma: no cover
"A file was probably expected but was not found: '{}'."
"".format(file))
self._readlines(lines, numeric_column, sep)
elif isinstance(file, list):
if len(file) == 0:
raise ValueError( # pragma: no cover
"Empty data and columns are not allowed.")
if isinstance(file[0], dict):
self.index = {}
self.values = []
for row in file:
for k, v in row.items():
if k not in self.index:
self.index[k] = len(self.index)
# we sort the labels to avoid instabilities
labels = [k for k, v in self.index.items()]
labels.sort()
self.index = {}
for la in labels:
self.index[la] = len(self.index)
for row in file:
line = [None for k in self.index]
for k, v in row.items():
line[self.index[k]] = v
self.values.append(line)
self.header = [None for k in self.index]
for k, v in self.index.items():
self.header[v] = k
n = len(self.index)
for row in self.values:
while len(row) < n:
row.append(None)
elif isinstance(numeric_column, numpy.matrix):
self.header = file
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
self.values = [[float(numeric_column[i, j]) for j in range(
numeric_column.shape[1])] for i in range(numeric_column.shape[0])]
elif isinstance(numeric_column, numpy.ndarray):
self.header = file
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
self.values = [[float(numeric_column[i, j]) for j in range(
numeric_column.shape[1])] for i in range(numeric_column.shape[0])]
elif isinstance(file[0], list):
if len(file) == 1:
self.header = file[0]
self.values = file[1:] + numeric_column
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
else:
self.header = file[0]
self.values = file[1:]
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
elif isinstance(file[0], str):
self.header = file
self.values = numeric_column
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
else:
raise RuntimeError( # pragma: no cover
"This case should not happen: " + str(type(file[0])))
elif isinstance(file, numpy.matrix): # pragma: no cover
self.header = ["c%d" % d for d in range(file.shape[1])]
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
self.values = [[float(file[i, j]) for j in range(
file.shape[1])] for i in range(file.shape[0])]
elif isinstance(file, numpy.ndarray):
self.header = ["c%d" % d for d in range(file.shape[1])]
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
self.values = [[float(file[i, j]) for j in range(
file.shape[1])] for i in range(file.shape[0])]
else:
if isinstance(file, pandas.DataFrame):
def convert(x):
return None if isinstance(x, float) and numpy.isnan(x) else x
df = file
self.header = [_ for _ in df.columns]
hi = 'index'
while hi in self.header:
hi += "_"
self.header.insert(0, hi)
self.values = []
for i, row in enumerate(df.values):
row = [df.index[i]] + [convert(x) for x in row]
self.values.append(row)
self.index = {}
for i, h in enumerate(self.header):
self.index[h] = i
else:
raise TypeError( # pragma: no cover
"File has an unexpected type: " + str(type(file)))
unique = {}
for i, c in enumerate(self.header):
if c in unique:
if options.get("suffix_nb", False):
c = "%s_%d" % (c, i)
self.header[i] = c
else:
raise KeyError( # pragma: no cover
"column '{0}' already exists in '{1}'".format(c, self.header))
unique[c] = True
def __add__(self, other):
"""
do an addition, add values if types are matching
:param other: matrix or float or string
:return: new matrix, keep the header of the first matrix
"""
if len(self) != len(other):
raise ValueError( # pragma: no cover
"both matrices should have the same number of rows")
if len(self.header) != len(other.header):
raise ValueError( # pragma: no cover
"both matrices should have the same number of columns")
values = []
for row, rowo in zip(self.values, other.values):
r = []
for a, b in zip(row, rowo):
if type(a) == type(b):
x = a + b
else:
x = None
r.append(x)
values.append(r)
return self._private_getclass()(self.header, values)
def __mul__(self, other):
"""
do a multiplication(by a number)
:param other: matrix or float or string
:return: new matrix, keep the header of the first matrix
"""
if not isinstance(other, float) and not isinstance(other, int):
raise TypeError( # pragma: no cover
"other should be a number")
values = []
for row in self.values:
r = []
for a in row:
if a is not None:
x = a * other
else:
x = None
r.append(x)
values.append(r)
return self._private_getclass()(self.header, values)
def multiplication_term_term(self, other):
"""
do a multiplication term by term(similar to an addition),
add values if types are matching
:param other: matrix or float or string
:return: new matrix, keep the header of the first matrix
"""
if len(self) != len(other):
raise ValueError( # pragma: no cover
"both matrices should have the same number of rows")
if len(self.header) != len(other.header):
raise ValueError( # pragma: no cover
"both matrices should have the same number of columns")
values = []
for row, rowo in zip(self.values, other.values):
r = []
for a, b in zip(row, rowo):
if type(a) == type(b) and not isinstance(a, str):
x = a * b
else:
x = None
r.append(x)
values.append(r)
return self._private_getclass()(self.header, values)
def replicate(self, times):
"""replicates all rows a given number of times
:param times: number of multiplication
:return: new matrix, keep the header of the first matrix
"""
values = []
for i in range(0, times):
values.extend(copy.copy(self.values))
return self._private_getclass()(self.header, values)
@property
def size(self):
"""
returns the size(nb rows, nb columns)
"""
return len(self), len(self.header)
@property
def shape(self):
"""
returns the size(nb rows, nb columns)
"""
return self.size
def _fill_by_row(self, values):
"""
fill the table
:param values: dictionary { <int_row_index>: { <column name>: value} }
"""
mx = max(values.keys()) + | |
VMs
:param username: user id
:type username: string
:param cloudname: cloud name
:type cloudname: string
:param itemkeys: a list of lists, The first item in a sublist
is used as header name, the folling ones are
the path to the value that user wants in the
dict, for example:
itemkeys = [
['id', 'id'],
['name', 'name'],
['vcpus', 'vcpus'],
['ram', 'ram'],
['disk', 'disk'],
['refresh time', 'cm_refrsh']
]
The first id is the header name, second id is a path.
:type itemkeys: list
:param output: designed for shell command otherwise
leave it as False, if True the function will return
server names and ids
:type output: boolean
:param print_format: provide the printing format, such as table, json...
:type print_format: string
:param serverdata: if provided, the function will print this data
instead of vms of a cloud, please learn the VM's
data format in the database
:type serverdata: dict
:param group: provide a group name for VMs so that the function will
only print the VMs in the group
:type group: string
:return: if param output is True, return the flavor names and their ids
:rtype: list
'''
# GroupManagement loads Mongodb connections automatically by import.
# I moved this import inside of this function which is limiting import
# scope, so that other functions in this file can freely be used without
# loading the db connection. hyungro lee 12/01/2014
from cloudmesh.experiment.group import GroupManagement
self._connect_to_mongo()
if refresh:
self.mongo.activate(cm_user_id=username, names=[cloudname])
self.mongo.refresh(
cm_user_id=username,
names=[cloudname],
types=['images', 'flavors', 'servers'])
if serverdata:
servers_dict = serverdata
else:
servers_dict = self.mongo.servers(
clouds=[cloudname], cm_user_id=username)[cloudname]
for k, v in servers_dict.iteritems():
if '_id' in v:
del v['_id']
if group:
GroupManage = GroupManagement(username)
groups_list = GroupManage.get_groups_names_list()
if group not in groups_list:
servers_dict = {}
else:
vms_in_group_list = GroupManage.list_items_of_group(group, _type="VM")["VM"]
temp = {}
for k, v in servers_dict.iteritems():
if v['name'] in vms_in_group_list:
temp[k] = v
servers_dict = temp
images_dict = self.mongo.images(
clouds=[cloudname], cm_user_id=username)
flavors_dict = self.mongo.flavors(
clouds=[cloudname], cm_user_id=username)
if output:
server_names = []
server_ids = []
headers = ['index']
else:
headers = []
index = 1
to_print = []
def _getFromDict(dataDict, mapList):
# ref:
# http://stackoverflow.com/questions/14692690/access-python-nested-dictionary-items-via-a-list-of-keys
return reduce(lambda d, k: d[k], mapList, dataDict)
for i, v in servers_dict.iteritems():
values = []
cm_type = v['cm_type']
if output:
values.append(str(index))
server_names.append(v['name'])
server_ids.append(v['id'])
for k in itemkeys[cm_type]:
headers.append(k[0])
try:
val = _getFromDict(v, k[1:])
# ----------------------------------------
# special handler
# ----------------------------------------
if k[0] == 'flavor':
if val in flavors_dict[cloudname]:
val = flavors_dict[cloudname][val]['name']
else:
val = "unavailable"
elif k[0] == 'image':
if val in images_dict[cloudname]:
val = images_dict[cloudname][val]['name']
else:
val = "unavailable"
elif k[0] == 'addresses':
val = address_string(val)
# ----------------------------------------
values.append(str(val))
except:
# print sys.exc_info()
values.append(None)
index = index + 1
to_print.append(values)
count = index - 1
# Output format supports json and plain text in a grid table.
if print_format == "json":
print(json.dumps(servers_dict, indent=4))
elif print_format == "csv":
with open(".temp.csv", "wb") as f:
w = csv.DictWriter(f, servers_dict.keys())
w.writeheader()
w.writerow(servers_dict)
else:
# sentence = "cloud '{0}'".format(cloudname)
# print "+" + "-" * (len(sentence) - 2) + "+"
# print sentence
if to_print:
print(tabulate(to_print, headers, tablefmt="grid"))
# sentence = "count: {0}".format(count)
# print sentence
# print "+" + "-" * (len(sentence) - 2) + "+"
if output:
return [server_names, server_ids]
# ------------------------------------------------------------------------
class CloudCommand(CloudManage):
'''
a class of functions serve shell command "cloud"
'''
def __init__(self, arguments):
#
# TODO create init msg with flag if cm_congig is loaded
#
self.arguments = arguments
try:
self.config = cm_config()
except:
Console.error("There is a problem with the configuration yaml files")
self.username = self.config['cloudmesh']['profile']['username']
def _cloud_list(self):
"""
list the cloud names along with their activation status
"""
if self.arguments["--column"]:
col_option = [
'active', 'user', 'label', 'host',
'type/version', 'type', 'heading']
if self.arguments["--column"] == 'all':
col_option.append('credentials')
col_option.append('defaults')
elif self.arguments["--column"] == 'semiall':
pass
else:
col_option = [x.strip()
for x in self.arguments["--column"].split(',')]
if not set(col_option).issubset(set(['active',
'label',
'host',
'type/version',
'type',
'heading',
'user',
'credentials',
'defaults'])):
Console.error("ERROR: one or more column type doesn't exist, available columns are: "
"active,label,host,type/version,type,heading,user,credentials,defaults "
"('all' to diplay all, 'semiall' to display all except credentials and defauts)")
return
else:
col_option = ['active']
headers = ['cloud'] + col_option
standard_headers = []
combined_headers = []
def attribute_name_map(name):
if name == "cloud":
return "cm_cloud"
elif name == "label":
return "cm_label"
elif name == "host":
return "cm_host"
elif name == "type/version":
return "cm_type_version"
elif name == "type":
return "cm_type"
elif name == "heading":
return "cm_heading"
elif name == "user":
return "cm_user_id"
elif name == "credentials":
return "credentials"
elif name == "defaults":
return "default"
else:
return name
for item in headers:
temp = attribute_name_map(item)
standard_headers.append(temp)
combined_headers.append([item, temp])
combined_headers.remove(['cloud', 'cm_cloud'])
clouds = self.get_clouds(self.username)
clouds = clouds.sort([('cm_cloud', 1)])
self._connect_to_mongo()
activeclouds = self.mongo.active_clouds(self.username)
if clouds.count() == 0:
Console.warning(
"no cloud in database, please import cloud first"
"(cloud add <cloudYAMLfile> [--force])")
else:
d = {}
for cloud in clouds:
res = {}
for key in standard_headers:
# -------------------------------------------------
# special informations from other place
# -------------------------------------------------
if key == "active":
if cloud['cm_cloud'] in activeclouds:
res["active"] = 'True'
elif key == "default":
defaultinfo = self.get_cloud_defaultinfo(
self.username, cloud['cm_cloud'])
res["default"] = str(defaultinfo)
# -------------------------------------------------
else:
try:
res[key] = str(cloud[key])
except:
pass
d[cloud['cm_cloud']] = res
if self.arguments['--format']:
if self.arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format among table, json and csv")
return
else:
p_format = self.arguments['--format']
else:
p_format = None
shell_commands_dict_output(self.username,
d,
print_format=p_format,
firstheader="cloud",
header=combined_headers,
oneitem=False,
title=None,
count=False)
def _cloud_info(self):
"""
print detailed information for a cloud
"""
def printing(cloud):
if '_id' in cloud:
del cloud['_id']
# cloud = dict_uni_to_ascii(cloud)
# banner(cloud['cm_cloud'])
# -------------------------------------------------
# special informations from other place
# -------------------------------------------------
self._connect_to_mongo()
# print "#", 70 * "-"
if cloud['cm_cloud'] in self.mongo.active_clouds(self.username):
cloud["active"] = "True"
else:
cloud["active"] = "False"
defaultinfo = self.get_cloud_defaultinfo(
self.username, cloud['cm_cloud'])
cloud["default flavor"] = defaultinfo['flavor']
cloud["default image"] = defaultinfo['image']
# print "#", 70 * "#", "\n"
# -------------------------------------------------
if self.arguments['--format']:
if self.arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format among table, json and csv")
return
else:
p_format = self.arguments['--format']
else:
p_format = None
shell_commands_dict_output(self.username,
cloud,
print_format=p_format,
# "cloud '{0}' information".format(cloud['cm_cloud']),
title=None,
oneitem=True)
if self.arguments['CLOUD']:
cloud = self.get_clouds(
self.username, getone=True, cloudname=self.arguments['CLOUD'])
if cloud is None:
Console.warning(
"ERROR: could not find cloud '{0}'".format(self.arguments['CLOUD']))
else:
printing(cloud)
elif self.arguments['--all']:
clouds = self.get_clouds(self.username)
clouds = clouds.sort([('cm_cloud', 1)])
if clouds.count() == 0:
Console.info(
"no cloud in database, please import cloud information by 'cloud add <cloudYAMLfile>'")
return
for cloud in clouds:
printing(cloud)
else:
selected_cloud = self.get_selected_cloud(self.username)
cloud = self.get_clouds(
self.username, getone=True, cloudname=selected_cloud)
if cloud is None:
Console.warning(
"no cloud information of '{0}' in database".format(selected_cloud))
return
printing(cloud)
def _cloud_select(self):
"""
select a cloud as a temporary defaut cloud to work with, so that the
user doesn't need to type a cloud name everywhere in command line
"""
if self.arguments['CLOUD']:
cloud = self.get_clouds(
self.username, getone=True, cloudname=self.arguments['CLOUD'])
if cloud is None:
Console.warning(
"no cloud information of '{0}' in database, please import it by 'cloud add <cloudYAMLfile>'".format(
self.arguments['CLOUD']))
return
self.update_selected_cloud(self.username, self.arguments['CLOUD'])
Console.ok("cloud '{0}' is selected".format(self.arguments['CLOUD']))
else:
clouds = self.get_clouds(self.username)
cloud_names = []
for cloud in clouds:
cloud_names.append(cloud['cm_cloud'].encode("ascii"))
cloud_names.sort()
res = menu_return_num(
title="select a cloud", menu_list=cloud_names, tries=3)
if res == 'q':
return
self.update_selected_cloud(self.username, cloud_names[res])
Console.ok("cloud '{0}' is selected".format(cloud_names[res]))
def _cloud_alias(self):
"""
rename a cloud
"""
if self.arguments['CLOUD']:
name = self.arguments['CLOUD']
else:
name = self.get_selected_cloud(self.username)
if self.get_clouds(self.username, getone=True, cloudname=name) is None:
log.error("no cloud information of '{0}' in database".format(name))
return
if yn_choice("rename cloud '{0}' to '{1}'?".format(name,
self.arguments['NAME']),
default='n',
tries=3):
self.update_cloud_name(self.username, name, self.arguments['NAME'])
else:
return
def _cloud_activate(self):
"""
activate a cloud
"""
# DEBUG
try:
_args = locals()
del (_args['self'])
log.debug("[{0}()] called with [{1}]".format(sys._getframe().f_code.co_name,
str(_args)))
except:
pass
if self.arguments['CLOUD']:
name = self.arguments['CLOUD']
else:
name = self.get_selected_cloud(self.username)
if self.get_clouds(self.username, getone=True, cloudname=name) is None:
log.error("no cloud information of '{0}' in database".format(name))
return
# confirmation
# if yn_choice("activate cloud '{0}'?".format(name), default = 'n', tries = 3):
# res = self.activate_cloud(self.username, name)
# if res == 0:
# return
# elif res == 1:
# print "cloud '{0}' activated.".format(name)
# else:
# return
res = | |
<gh_stars>0
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import unittest
import numpy as np
from scipy.stats import norm, truncnorm
import chi
class TestGaussianModel(unittest.TestCase):
"""
Tests the chi.GaussianModel class.
"""
@classmethod
def setUpClass(cls):
cls.pop_model = chi.GaussianModel()
def test_compute_log_likelihood(self):
n_ids = 10
# Test case I: psis = 1, mu = 1, sigma = 1
# Score reduces to
# -nids * np.log(2pi) / 2
# Test case I.1:
psis = [1] * n_ids
mu = 1
sigma = 1
ref_score = - n_ids * np.log(2 * np.pi) / 2
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# Test case I.2:
psis = [5] * n_ids
mu = 5
sigma = 1
ref_score = - n_ids * np.log(2 * np.pi) / 2
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# Test case II: psis != mu, sigma = 1.
# Score reduces to
# -nids * (np.log(2pi)/2 + (psi - mu)^2/2)
# Test case II.1:
psis = [2] * n_ids
mu = 1
sigma = 1
ref_score = \
- n_ids * np.log(2 * np.pi) / 2 \
- n_ids * (psis[0] - mu)**2 / 2
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# Test case II.2:
psis = [2] * n_ids
mu = 10
sigma = 1
ref_score = \
- n_ids * np.log(2 * np.pi) / 2 \
- n_ids * (psis[0] - mu)**2 / 2
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# # Test case III: Any parameters
# Test case III.1
psis = np.arange(10)
mu = 1
sigma = 1
ref_score = \
- n_ids * np.log(2 * np.pi) / 2 \
- n_ids * np.log(sigma) \
- np.sum((psis - mu)**2) / (2 * sigma ** 2)
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# Test case III.2
psis = np.arange(10)
mu = 10
sigma = 15
ref_score = \
- n_ids * np.log(2 * np.pi) / 2 \
- n_ids * np.log(sigma) \
- np.sum((psis - mu)**2) / (2 * sigma ** 2)
parameters = [mu, sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertAlmostEqual(score, ref_score)
# Test case IV: sigma negative or zero
# Test case IV.1
psis = [np.exp(10)] * n_ids
mu = 1
sigma = 0
parameters = [mu] + [sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertEqual(score, -np.inf)
# Test case IV.2
psis = [np.exp(10)] * n_ids
mu = 1
sigma = -1
parameters = [mu] + [sigma]
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertEqual(score, -np.inf)
def test_compute_pointwise_ll(self):
# Test case I.1:
psis = np.arange(10)
mu = 1
sigma = 1
ref_scores = \
- np.log(2 * np.pi) / 2 \
- np.log(sigma) \
- (psis - mu)**2 / (2 * sigma ** 2)
parameters = [mu, sigma]
pw_scores = self.pop_model.compute_pointwise_ll(parameters, psis)
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertEqual(len(pw_scores), 10)
self.assertAlmostEqual(np.sum(pw_scores), score)
self.assertAlmostEqual(pw_scores[0], ref_scores[0])
self.assertAlmostEqual(pw_scores[1], ref_scores[1])
self.assertAlmostEqual(pw_scores[2], ref_scores[2])
self.assertAlmostEqual(pw_scores[3], ref_scores[3])
self.assertAlmostEqual(pw_scores[4], ref_scores[4])
self.assertAlmostEqual(pw_scores[5], ref_scores[5])
self.assertAlmostEqual(pw_scores[6], ref_scores[6])
self.assertAlmostEqual(pw_scores[7], ref_scores[7])
self.assertAlmostEqual(pw_scores[8], ref_scores[8])
self.assertAlmostEqual(pw_scores[9], ref_scores[9])
# Test case I.2:
psis = np.linspace(3, 5, 10)
mu = 2
sigma = 4
ref_scores = \
- np.log(2 * np.pi) / 2 \
- np.log(sigma) \
- (psis - mu)**2 / (2 * sigma ** 2)
parameters = [mu, sigma]
pw_scores = self.pop_model.compute_pointwise_ll(parameters, psis)
score = self.pop_model.compute_log_likelihood(parameters, psis)
self.assertEqual(len(pw_scores), 10)
self.assertAlmostEqual(np.sum(pw_scores), score)
self.assertAlmostEqual(pw_scores[0], ref_scores[0])
self.assertAlmostEqual(pw_scores[1], ref_scores[1])
self.assertAlmostEqual(pw_scores[2], ref_scores[2])
self.assertAlmostEqual(pw_scores[3], ref_scores[3])
self.assertAlmostEqual(pw_scores[4], ref_scores[4])
self.assertAlmostEqual(pw_scores[5], ref_scores[5])
self.assertAlmostEqual(pw_scores[6], ref_scores[6])
self.assertAlmostEqual(pw_scores[7], ref_scores[7])
self.assertAlmostEqual(pw_scores[8], ref_scores[8])
self.assertAlmostEqual(pw_scores[9], ref_scores[9])
# Test case IV: sigma negative or zero
# Test case IV.1
psis = [np.exp(10)] * 3
mu = 1
sigma = 0
parameters = [mu] + [sigma]
scores = self.pop_model.compute_pointwise_ll(parameters, psis)
self.assertEqual(scores[0], -np.inf)
self.assertEqual(scores[1], -np.inf)
self.assertEqual(scores[2], -np.inf)
# Test case IV.2
psis = [np.exp(10)] * 3
mu = 1
sigma = -10
parameters = [mu] + [sigma]
scores = self.pop_model.compute_pointwise_ll(parameters, psis)
self.assertEqual(scores[0], -np.inf)
self.assertEqual(scores[1], -np.inf)
self.assertEqual(scores[2], -np.inf)
def test_compute_sensitivities(self):
n_ids = 10
# Test case I: psis = mu, sigma = 1
# Sensitivities reduce to
# dpsi = 0
# dmu = 0
# dsigma = -n_ids
# Test case I.1:
psis = [1] * n_ids
mu = 1
sigma = 1
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = 0
ref_dmu = 0
ref_dsigma = -n_ids
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma)
# Test case I.2:
psis = [10] * n_ids
mu = 10
sigma = 1
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = 0
ref_dmu = 0
ref_dsigma = -n_ids
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma)
# Test case II: psis != mu, sigma = 1
# Sensitivities reduce to
# dpsi = mu - psi
# dmu = psi - mu
# dsigma = nids * ((psi - mu)^2 - 1)
# Test case II.1:
psis = np.array([1] * n_ids)
mu = 10
sigma = 1
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = mu - psis[0]
ref_dmu = np.sum(psis - mu)
ref_dsigma = - n_ids + np.sum((psis - mu)**2)
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma)
# Test case II.2:
psis = np.array([7] * n_ids)
mu = 5
sigma = 1
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = mu - psis[0]
ref_dmu = np.sum(psis - mu)
ref_dsigma = - n_ids + np.sum((psis - mu)**2)
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma)
# Test case III: psis != mu, sigma != 1
# Sensitivities reduce to
# dpsi = (mu - psi) / std**2
# dmu = sum((psi - mu)) / std**2
# dsigma = -nids / std + sum((psi - mu)^2) / std**2
# Test case III.1:
psis = np.array([1] * n_ids)
mu = 10
sigma = 2
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = (mu - psis[0]) / sigma**2
ref_dmu = np.sum(psis - mu) / sigma**2
ref_dsigma = - n_ids / sigma + np.sum((psis - mu)**2) / sigma**3
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma, 5)
# Test case III.2:
psis = np.array([7] * n_ids)
mu = 0.5
sigma = 0.1
# Compute ref scores
parameters = [mu, sigma]
ref_ll = self.pop_model.compute_log_likelihood(parameters, psis)
ref_dpsi = (mu - psis[0]) / sigma**2
ref_dmu = np.sum(psis - mu) / sigma**2
ref_dsigma = - n_ids / sigma + np.sum((psis - mu)**2) / sigma**3
# Compute log-likelihood and sensitivities
score, sens = self.pop_model.compute_sensitivities(parameters, psis)
self.assertAlmostEqual(score, ref_ll)
self.assertEqual(len(sens), n_ids + 2)
self.assertAlmostEqual(sens[0], ref_dpsi)
self.assertAlmostEqual(sens[1], ref_dpsi)
self.assertAlmostEqual(sens[2], ref_dpsi)
self.assertAlmostEqual(sens[3], ref_dpsi)
self.assertAlmostEqual(sens[4], ref_dpsi)
self.assertAlmostEqual(sens[5], ref_dpsi)
self.assertAlmostEqual(sens[6], ref_dpsi)
self.assertAlmostEqual(sens[7], ref_dpsi)
self.assertAlmostEqual(sens[8], ref_dpsi)
self.assertAlmostEqual(sens[9], ref_dpsi)
self.assertAlmostEqual(sens[10], ref_dmu)
self.assertAlmostEqual(sens[11], ref_dsigma)
# Test case IV: Compare gradients to numpy.gradient
epsilon = 0.001
n_parameters = n_ids + self.pop_model.n_parameters()
parameters = np.ones(shape=n_parameters)
| |
text="\nУстановить сертифиакт(ы) для локального пользователя\nили для всех сразу? (хранилище mRoot)")
dialogWindow.set_title("Вопрос")
dialogWindow.add_buttons("Локально", Gtk.ResponseType.CANCEL, "Для всех", Gtk.ResponseType.OK)
dialogWindow.show_all()
response = dialogWindow.run()
dialogWindow.destroy()
return response
def install_HDIMAGE(self, widget):
find_hdimage = os.popen(f"/opt/cprocsp/sbin/{arch}/cpconfig -hardware reader -view | grep HDIMAGE").readlines()
if not find_hdimage[0]:
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL,
text="Введите пароль root пользователя")
dialogWindow.set_title("Создание HDIMAGE хранилища")
dialogBox = dialogWindow.get_content_area()
pinEntry = Gtk.Entry()
pinEntry.set_visibility(False)
dialogBox.pack_end(pinEntry, False, False, 0)
dialogWindow.show_all()
response = dialogWindow.run()
if response == Gtk.ResponseType.OK:
pin = pinEntry.get_text()
ROOTPSW = pin # сделать окно диалог с запросом рут пароля. указать что будет создано хранилище
output = os.popen(
f'su - root -c "/opt/cprocsp/sbin/{arch}/cpconfig -hardware reader -add HDIMAGE store" '
f'<<< "{ROOTPSW}"').readlines()
dialogWindow.destroy()
return True
else:
dialogWindow.destroy()
return False
def select_container_to_import_cert(self, liststore):
# сделать окно выбора контейнера из списка распознанных системой
# команда из ТГ по экспортированияю контейнера попробовать выполнить автоинсталл,
# в случае неудачи сделать по БЗ с явным указание открытого ключа
self.liststore_all_containers = liststore
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL)
dialogWindow.set_title("Выберите контейнер")
dialogWindow.set_resizable(True)
dialogBox = dialogWindow.get_content_area()
treeview = Gtk.TreeView(model=self.liststore_all_containers)
max_len = 0
for elem in self.liststore_all_containers:
if max_len < len(elem[0]):
max_len = len(elem[0])
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Контейнеры", renderer_text, text=0)
treeview.append_column(column_text)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_cell_all_toggled)
column_toggle = Gtk.TreeViewColumn("Выбранный", renderer_toggle, active=1)
treeview.append_column(column_toggle)
sel = treeview.get_selection()
sel.set_mode(Gtk.SelectionMode.NONE)
scrolled_tree = Gtk.ScrolledWindow()
scrolled_tree.add(treeview)
if max_len < 40:
dialogWindow.set_size_request(380, 200)
scrolled_tree.set_size_request(380, 200)
else:
dialogWindow.set_size_request(580, 200)
scrolled_tree.set_size_request(580, 200)
dialogBox.pack_end(scrolled_tree, True, True, 0)
dialogWindow.show_all()
response = dialogWindow.run()
dialogWindow.destroy()
if (response == Gtk.ResponseType.CANCEL):
return False
else:
return True
def install_container_from_token(self, liststore):
# сделать окно выбора контейнера из списка распознанных системой
# команда из ТГ по экспортированияю контейнера попробовать выполнить автоинсталл,
# в случае неудачи сделать по БЗ с явным указание открытого ключа
self.liststore_containers = liststore
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL)
dialogWindow.set_title("Выберите контейнер с токена")
dialogWindow.set_resizable(True)
dialogBox = dialogWindow.get_content_area()
treeview = Gtk.TreeView(model=self.liststore_containers)
max_len = 0
for elem in self.liststore_containers:
if max_len < len(elem[0]):
max_len = len(elem[0])
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Контейнеры", renderer_text, text=0)
treeview.append_column(column_text)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_cell_toggled)
column_toggle = Gtk.TreeViewColumn("Выбранный", renderer_toggle, active=1)
treeview.append_column(column_toggle)
sel = treeview.get_selection()
sel.set_mode(Gtk.SelectionMode.NONE)
scrolled_tree = Gtk.ScrolledWindow()
scrolled_tree.add(treeview)
if max_len < 40:
dialogWindow.set_size_request(380, 200)
scrolled_tree.set_size_request(380, 200)
else:
dialogWindow.set_size_request(580, 200)
scrolled_tree.set_size_request(580, 200)
dialogBox.pack_end(scrolled_tree, True, True, 0)
dialogWindow.show_all()
response = dialogWindow.run()
dialogWindow.destroy()
if (response == Gtk.ResponseType.CANCEL):
return False
else:
return True
def choose_dest_stores(self, liststore):
self.liststore_dest_stores = liststore
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL)
dialogWindow.set_title("Выберите 1 хранилище для экспортирования контейнера")
dialogWindow.set_resizable(True)
dialogBox = dialogWindow.get_content_area()
treeview = Gtk.TreeView(model=self.liststore_dest_stores)
max_len = 0
for elem in self.liststore_dest_stores:
if max_len < len(elem[0]):
max_len = len(elem[0])
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Хранилища", renderer_text, text=0)
treeview.append_column(column_text)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_cell_dest_toggled)
column_toggle = Gtk.TreeViewColumn("Выбранный", renderer_toggle, active=1)
treeview.append_column(column_toggle)
sel = treeview.get_selection()
sel.set_mode(Gtk.SelectionMode.NONE)
scrolled_tree = Gtk.ScrolledWindow()
scrolled_tree.add(treeview)
if max_len < 40:
dialogWindow.set_size_request(380, 200)
scrolled_tree.set_size_request(380, 200)
else:
dialogWindow.set_size_request(580, 200)
scrolled_tree.set_size_request(580, 200)
dialogBox.pack_end(scrolled_tree, True, True, 0)
dialogWindow.show_all()
response = dialogWindow.run()
dialogWindow.destroy()
if (response == Gtk.ResponseType.CANCEL):
return False
else:
return True
def choose_open_cert_to_close_container(self, container):
dialog = Gtk.FileChooserDialog(title="Выберите сертификат пользователя", parent=self,
action=Gtk.FileChooserAction.OPEN,
)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("Сертификаты")
filter.add_mime_type("Сертификаты")
filter.add_pattern("*.cer")
filter.add_pattern("*.CER")
dialog.add_filter(filter)
domain_name = os.popen("echo $USERNAME").readlines()[0].strip()
if "\\" in domain_name:
domain_name = domain_name.split("\\")[1]
elif "@" in domain_name:
domain_name = domain_name.split("@")[0]
find_name = os.popen(f"find /home/ -maxdepth 2 -name *{domain_name}*").readlines()
dialog.set_current_folder(f"{find_name}")
dialog.set_select_multiple(False)
response = dialog.run()
if response == Gtk.ResponseType.OK:
file_name = dialog.get_filename()
dialog.destroy()
if file_name:
output = os.popen(
f"/opt/cprocsp/bin/{arch}/certmgr -inst -store uMy -file '{file_name}' -cont '{container}'").readlines()
for l in output:
if "[ErrorCode: 0x00000000]" in l:
return [True]
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
return [False, "Отменено пользователем"]
def install_cert_from_or_to_container(self, container, name_cont):
# Требуется попробовать получить открытую часть хранилища автоматически, установленного локально в HDIMAGE
self.output_code_token = False
name = container.split("\\")[-1:][0].strip()
csptest = subprocess.Popen(['/opt/cprocsp/bin/%s/csptest' % arch, '-keyset', '-enum_cont', '-unique', '-fqcn',
'-verifyc'], stdout=subprocess.PIPE)
output = csptest.communicate()[0].decode('cp1251').encode('utf-8').decode("utf-8")
certs = []
for line in output.split("\n"):
if name_cont in line and name in line:
certs.append(line)
for cert in certs:
cert = cert.split("|")[1].strip()
output = os.popen(f"/opt/cprocsp/bin/{arch}/certmgr -inst -store uMy -cont '{cert}'").readlines()
for l in output:
if "[ErrorCode: 0x00000000]" in l:
self.output_code_token = True
return True
# Вариант с открытой частью не удался, предлагаем пользователю самому выбрать сертификат для закрытой части.
if not self.output_code_token:
dialog = Gtk.FileChooserDialog(title="Выберите сертификат пользователя", parent=self,
action=Gtk.FileChooserAction.OPEN,
)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("Сертификаты")
filter.add_mime_type("Сертификаты")
filter.add_pattern("*.cer")
filter.add_pattern("*.CER")
dialog.add_filter(filter)
domain_name = os.popen("echo $USERNAME").readlines()[0].strip()
if "\\" in domain_name:
domain_name = domain_name.split("\\")[1]
elif "@" in domain_name:
domain_name = domain_name.split("@")[0]
find_name = os.popen(f"find /home/ -maxdepth 2 -name *{domain_name}*").readlines()
dialog.set_current_folder(f"{find_name}")
dialog.set_select_multiple(False)
response = dialog.run()
if response == Gtk.ResponseType.OK:
file_name = dialog.get_filename()
dialog.destroy()
if file_name:
output = os.popen(
f"/opt/cprocsp/bin/{arch}/certmgr -inst -store uMy -file '{file_name}' -cont '{container}'").readlines()
for l in output:
if "[ErrorCode: 0x00000000]" in l:
self.output_code_token = True
return True
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
self.output_code_token = False
return False
def install_new_cert_to_container(self, container):
dialog = Gtk.FileChooserDialog(title="Выберите сертификат пользователя", parent=self,
action=Gtk.FileChooserAction.OPEN,
)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("Сертификаты")
filter.add_mime_type("Сертификаты")
filter.add_pattern("*.cer")
filter.add_pattern("*.CER")
dialog.add_filter(filter)
domain_name = os.popen("echo $USERNAME").readlines()[0].strip()
if "\\" in domain_name:
domain_name = domain_name.split("\\")[1]
elif "@" in domain_name:
domain_name = domain_name.split("@")[0]
find_name = os.popen(f"find /home/ -maxdepth 2 -name *{domain_name}*").readlines()
dialog.set_current_folder(f"{find_name[0].strip()}")
dialog.set_select_multiple(False)
response = dialog.run()
if response == Gtk.ResponseType.OK:
file_name = dialog.get_filename()
dialog.destroy()
if file_name:
p = subprocess.Popen(['/opt/cprocsp/bin/%s/certmgr' % arch, '-inst', '-file', file_name, '-cont',
container, '-inst_to_cont'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
return error.decode("utf-8").strip()
else:
return "success"
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
return "Отменено пользователем"
def return_output_code_token(self):
return self.output_code_token
def choose_folder_dialog(self, widget):
dialog = Gtk.FileChooserDialog(title="Выберите директорию", parent=self,
action=Gtk.FileChooserAction.SELECT_FOLDER,
)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
"Выбрать директорию", Gtk.ResponseType.OK)
dialog.set_default_size(800, 400)
# если на ПК не обнаружен СН то стандартная папка по умолчанию /run/media/USERNAME, иначе будет другая папка
# по дефолту - /media/
dialog.set_current_folder(f"/home")
response = dialog.run()
if response == Gtk.ResponseType.OK:
file_name = dialog.get_filename()
dialog.destroy()
return file_name
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
return False
else:
dialog.destroy()
return False
def install_container_from_flash(self, liststore):
# сделать окно выбора контейнера из списка распознанных системой
# команда из ТГ по экспортированияю контейнера попробовать выполнить автоинсталл,
# в случае неудачи сделать по БЗ с явным указание открытого ключа
self.liststore_flashes = liststore
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL)
dialogWindow.set_title("Выберите контейнер с usb-flash накопителя")
dialogWindow.set_resizable(True)
dialogBox = dialogWindow.get_content_area()
treeview = Gtk.TreeView(model=self.liststore_flashes)
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Контейнеры", renderer_text, text=0)
treeview.append_column(column_text)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_cell_toggled_flash)
max_len = 0
for elem in self.liststore_flashes:
if max_len < len(elem[0]):
max_len = len(elem[0])
column_toggle = Gtk.TreeViewColumn("Выбранный", renderer_toggle, active=1)
treeview.append_column(column_toggle)
sel = treeview.get_selection()
sel.set_mode(Gtk.SelectionMode.NONE)
scrolled_tree = Gtk.ScrolledWindow()
scrolled_tree.add(treeview)
if max_len < 40:
dialogWindow.set_size_request(380, 200)
scrolled_tree.set_size_request(380, 200)
else:
dialogWindow.set_size_request(580, 200)
scrolled_tree.set_size_request(580, 200)
dialogBox.pack_end(scrolled_tree, True, True, 0)
dialogWindow.show_all()
response = dialogWindow.run()
dialogWindow.destroy()
if (response == Gtk.ResponseType.CANCEL):
return False
else:
return True
def call_secretnet_configs(self, for_what, udev):
file = f"/etc/udev/rules.d/87-{udev}_usb.rules"
if not os.path.exists(file):
dialogWindow = Gtk.MessageDialog(parent=self,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.OK_CANCEL,
text="Введите пароль root пользователя, после применения\nпотребуется переподключить usb-flash накопитель")
dialogWindow.set_title(f"Настройка udev правила для {for_what}")
dialogBox = dialogWindow.get_content_area()
pinEntry = Gtk.Entry()
pinEntry.set_visibility(False)
dialogBox.pack_end(pinEntry, False, False, 0)
dialogWindow.show_all()
response = dialogWindow.run()
if response == Gtk.ResponseType.OK:
pin = pinEntry.get_text()
ROOTPSW = pin # сделать окно диалог с запросом рут пароля.
os.system("""su - root -c 'cat << EOF > /etc/udev/rules.d/87-domain_usb.rules \n""" \
"""ENV{ID_FS_USAGE}=="filesystem|other|crypto", ENV{UDISKS_FILESYSTEM_SHARED}="1"\n""" \
"""EOF'""" + f"<<< '{ROOTPSW}'")
os.system(f"""su - root -c 'udevadm control --reload' <<< '{ROOTPSW}' """)
dialogWindow.destroy()
return "just_installed"
else:
dialogWindow.destroy()
return "canceled"
else:
return "installed"
def install_cert_from_or_to_usb_flash(self, container):
# Требуется попробовать получить открытую часть хранилища автоматически, установленного локально в HDIMAGE
self.output_code_flash = False
csptest = subprocess.Popen(['/opt/cprocsp/bin/%s/csptest' % arch, '-keyset', '-enum_cont', '-unique', '-fqcn',
'-verifyc'], stdout=subprocess.PIPE)
output = csptest.communicate()[0].decode('cp1251').encode('utf-8').decode("utf-8")
certs = []
for line in output.split("\n"):
if "HDIMAGE" in line and container in line:
certs.append(line)
for cert in certs:
cert = cert.split("|")[1].strip()
output = os.popen(f"/opt/cprocsp/bin/{arch}/certmgr -inst -store uMy -cont '{cert}'").readlines()
for l in output:
if "[ErrorCode: 0x00000000]\n" in l:
self.output_code_flash = True
return True
# Вариант с открытой частью не удался, предлагаем пользователю самому выбрать сертификат для закрытой части.
if not self.output_code_flash:
dialog = Gtk.FileChooserDialog(title="Выберите сертификат пользователя", parent=self,
action=Gtk.FileChooserAction.OPEN,
)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.set_name("Сертификаты")
filter.add_mime_type("Сертификаты")
filter.add_pattern("*.cer")
filter.add_pattern("*.CER")
dialog.add_filter(filter)
domain_name = os.popen("echo $USERNAME").readlines()[0]
if "\\" in domain_name:
domain_name = domain_name.split("\\")[1]
elif | |
"ServerResMessage",
0x009F: "Reset",
0x0100: "KeepAliveAckMessage",
0x0101: "StartMulticastMediaReception",
0x0102: "StartMulticastMediaTransmission",
0x0103: "StopMulticastMediaReception",
0x0104: "StopMulticastMediaTransmission",
0x0105: "OpenReceiveChannel",
0x0106: "CloseReceiveChannel",
0x0107: "ConnectionStatisticsReq",
0x0108: "SoftKeyTemplateResMessage",
0x0109: "SoftKeySetResMessage",
0x0110: "SelectSoftKeysMessage",
0x0111: "CallStateMessage",
0x0112: "DisplayPromptStatusMessage",
0x0113: "ClearPromptStatusMessage",
0x0114: "DisplayNotifyMessage",
0x0115: "ClearNotifyMessage",
0x0116: "ActivateCallPlaneMessage",
0x0117: "DeactivateCallPlaneMessage",
0x0118: "UnregisterAckMessage",
0x0119: "BackSpaceReqMessage",
0x011A: "RegisterTokenAck",
0x011B: "RegisterTokenReject",
0x0042: "DeviceToUserDataResponseVersion1Message",
0x011C: "StartMediaFailureDetection",
0x011D: "DialedNumberMessage",
0x011E: "UserToDeviceDataMessage",
0x011F: "FeatureStatMessage",
0x0120: "DisplayPriNotifyMessage",
0x0121: "ClearPriNotifyMessage",
0x0122: "StartAnnouncementMessage",
0x0123: "StopAnnouncementMessage",
0x0124: "AnnouncementFinishMessage",
0x0127: "NotifyDtmfToneMessage",
0x0128: "SendDtmfToneMessage",
0x0129: "SubscribeDtmfPayloadReqMessage",
0x012A: "SubscribeDtmfPayloadResMessage",
0x012B: "SubscribeDtmfPayloadErrMessage",
0x012C: "UnSubscribeDtmfPayloadReqMessage",
0x012D: "UnSubscribeDtmfPayloadResMessage",
0x012E: "UnSubscribeDtmfPayloadErrMessage",
0x012F: "ServiceURLStatMessage",
0x0130: "CallSelectStatMessage",
0x0131: "OpenMultiMediaChannelMessage",
0x0132: "StartMultiMediaTransmission",
0x0133: "StopMultiMediaTransmission",
0x0134: "MiscellaneousCommandMessage",
0x0135: "FlowControlCommandMessage",
0x0136: "CloseMultiMediaReceiveChannel",
0x0137: "CreateConferenceReqMessage",
0x0138: "DeleteConferenceReqMessage",
0x0139: "ModifyConferenceReqMessage",
0x013A: "AddParticipantReqMessage",
0x013B: "DropParticipantReqMessage",
0x013C: "AuditConferenceReqMessage",
0x013D: "AuditParticipantReqMessage",
0x013F: "UserToDeviceDataVersion1Message",
}
class Skinny(Packet):
name="Skinny"
fields_desc = [ LEIntField("len",0),
LEIntField("res",0),
LEIntEnumField("msg",0,skinny_messages) ]
_rtp_payload_types = {
# http://www.iana.org/assignments/rtp-parameters
0: 'G.711 PCMU', 3: 'GSM',
4: 'G723', 5: 'DVI4',
6: 'DVI4', 7: 'LPC',
8: 'PCMA', 9: 'G722',
10: 'L16', 11: 'L16',
12: 'QCELP', 13: 'CN',
14: 'MPA', 15: 'G728',
16: 'DVI4', 17: 'DVI4',
18: 'G729', 25: 'CelB',
26: 'JPEG', 28: 'nv',
31: 'H261', 32: 'MPV',
33: 'MP2T', 34: 'H263' }
class RTP(Packet):
name="RTP"
fields_desc = [ BitField('version', 2, 2),
BitField('padding', 0, 1),
BitField('extension', 0, 1),
BitFieldLenField('numsync', None, 4, count_of='sync'),
BitField('marker', 0, 1),
BitEnumField('payload', 0, 7, _rtp_payload_types),
ShortField('sequence', 0),
IntField('timestamp', 0),
IntField('sourcesync', 0),
FieldListField('sync', [], IntField("id",0), count_from=lambda pkt:pkt.numsync) ]
### SEBEK
class SebekHead(Packet):
name = "Sebek header"
fields_desc = [ XIntField("magic", 0xd0d0d0),
ShortField("version", 1),
ShortEnumField("type", 0, {"read":0, "write":1,
"socket":2, "open":3}),
IntField("counter", 0),
IntField("time_sec", 0),
IntField("time_usec", 0) ]
def mysummary(self):
return self.sprintf("Sebek Header v%SebekHead.version% %SebekHead.type%")
# we need this because Sebek headers differ between v1 and v3, and
# between v3 type socket and v3 others
class SebekV1(Packet):
name = "Sebek v1"
fields_desc = [ IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
StrFixedLenField("command", "", 12),
FieldLenField("data_length", None, "data",fmt="I"),
StrLenField("data", "", length_from=lambda x:x.data_length) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v1 %SebekHead.type% (%SebekV1.command%)")
else:
return self.sprintf("Sebek v1 (%SebekV1.command%)")
class SebekV3(Packet):
name = "Sebek v3"
fields_desc = [ IntField("parent_pid", 0),
IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
IntField("inode", 0),
StrFixedLenField("command", "", 12),
FieldLenField("data_length", None, "data",fmt="I"),
StrLenField("data", "", length_from=lambda x:x.data_length) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3.command%)")
else:
return self.sprintf("Sebek v3 (%SebekV3.command%)")
class SebekV2(SebekV3):
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2.command%)")
else:
return self.sprintf("Sebek v2 (%SebekV2.command%)")
class SebekV3Sock(Packet):
name = "Sebek v2 socket"
fields_desc = [ IntField("parent_pid", 0),
IntField("pid", 0),
IntField("uid", 0),
IntField("fd", 0),
IntField("inode", 0),
StrFixedLenField("command", "", 12),
IntField("data_length", 15),
IPField("dip", "127.0.0.1"),
ShortField("dport", 0),
IPField("sip", "127.0.0.1"),
ShortField("sport", 0),
ShortEnumField("call", 0, { "bind":2,
"connect":3, "listen":4,
"accept":5, "sendmsg":16,
"recvmsg":17, "sendto":11,
"recvfrom":12}),
ByteEnumField("proto", 0, IP_PROTOS) ]
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV3Sock.command%)")
else:
return self.sprintf("Sebek v3 socket (%SebekV3Sock.command%)")
class SebekV2Sock(SebekV3Sock):
def mysummary(self):
if isinstance(self.underlayer, SebekHead):
return self.underlayer.sprintf("Sebek v%SebekHead.version% %SebekHead.type% (%SebekV2Sock.command%)")
else:
return self.sprintf("Sebek v2 socket (%SebekV2Sock.command%)")
class MGCP(Packet):
name = "MGCP"
longname = "Media Gateway Control Protocol"
fields_desc = [ StrStopField("verb","AUEP"," ", -1),
StrFixedLenField("sep1"," ",1),
StrStopField("transaction_id","1234567"," ", -1),
StrFixedLenField("sep2"," ",1),
StrStopField("endpoint","<EMAIL>"," ", -1),
StrFixedLenField("sep3"," ",1),
StrStopField("version","MGCP 1.0 NCS 1.0","\x0a", -1),
StrFixedLenField("sep4","\x0a",1),
]
#class MGCP(Packet):
# name = "MGCP"
# longname = "Media Gateway Control Protocol"
# fields_desc = [ ByteEnumField("type",0, ["request","response","others"]),
# ByteField("code0",0),
# ByteField("code1",0),
# ByteField("code2",0),
# ByteField("code3",0),
# ByteField("code4",0),
# IntField("trasid",0),
# IntField("req_time",0),
# ByteField("is_duplicate",0),
# ByteField("req_available",0) ]
#
class GPRS(Packet):
name = "GPRSdummy"
fields_desc = [
StrStopField("dummy","","\x65\x00\x00",1)
]
class HCI_Hdr(Packet):
name = "HCI header"
fields_desc = [ ByteEnumField("type",2,{1:"command",2:"ACLdata",3:"SCOdata",4:"event",5:"vendor"}),]
def mysummary(self):
return self.sprintf("HCI %type%")
class HCI_ACL_Hdr(Packet):
name = "HCI ACL header"
fields_desc = [ ByteField("handle",0), # Actually, handle is 12 bits and flags is 4.
ByteField("flags",0), # I wait to write a LEBitField
LEShortField("len",None), ]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-4
p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
return p
class L2CAP_Hdr(Packet):
name = "L2CAP header"
fields_desc = [ LEShortField("len",None),
LEShortEnumField("cid",0,{1:"control"}),]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-4
p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
return p
class L2CAP_CmdHdr(Packet):
name = "L2CAP command header"
fields_desc = [
ByteEnumField("code",8,{1:"rej",2:"conn_req",3:"conn_resp",
4:"conf_req",5:"conf_resp",6:"disconn_req",
7:"disconn_resp",8:"echo_req",9:"echo_resp",
10:"info_req",11:"info_resp"}),
ByteField("id",0),
LEShortField("len",None) ]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-4
p = p[:2]+chr(l&0xff)+chr((l>>8)&0xff)+p[4:]
return p
def answers(self, other):
if other.id == self.id:
if self.code == 1:
return 1
if other.code in [2,4,6,8,10] and self.code == other.code+1:
if other.code == 8:
return 1
return self.payload.answers(other.payload)
return 0
class L2CAP_ConnReq(Packet):
name = "L2CAP Conn Req"
fields_desc = [ LEShortEnumField("psm",0,{1:"SDP",3:"RFCOMM",5:"telephony control"}),
LEShortField("scid",0),
]
class L2CAP_ConnResp(Packet):
name = "L2CAP Conn Resp"
fields_desc = [ LEShortField("dcid",0),
LEShortField("scid",0),
LEShortEnumField("result",0,["no_info","authen_pend","author_pend"]),
LEShortEnumField("status",0,["success","pend","bad_psm",
"cr_sec_block","cr_no_mem"]),
]
def answers(self, other):
return self.scid == other.scid
class L2CAP_CmdRej(Packet):
name = "L2CAP Command Rej"
fields_desc = [ LEShortField("reason",0),
]
class L2CAP_ConfReq(Packet):
name = "L2CAP Conf Req"
fields_desc = [ LEShortField("dcid",0),
LEShortField("flags",0),
]
class L2CAP_ConfResp(Packet):
name = "L2CAP Conf Resp"
fields_desc = [ LEShortField("scid",0),
LEShortField("flags",0),
LEShortEnumField("result",0,["success","unaccept","reject","unknown"]),
]
def answers(self, other):
return self.scid == other.scid
class L2CAP_DisconnReq(Packet):
name = "L2CAP Disconn Req"
fields_desc = [ LEShortField("dcid",0),
LEShortField("scid",0), ]
class L2CAP_DisconnResp(Packet):
name = "L2CAP Disconn Resp"
fields_desc = [ LEShortField("dcid",0),
LEShortField("scid",0), ]
def answers(self, other):
return self.scid == other.scid
class L2CAP_InfoReq(Packet):
name = "L2CAP Info Req"
fields_desc = [ LEShortEnumField("type",0,{1:"CL_MTU",2:"FEAT_MASK"}),
StrField("data","")
]
class L2CAP_InfoResp(Packet):
name = "L2CAP Info Resp"
fields_desc = [ LEShortField("type",0),
LEShortEnumField("result",0,["success","not_supp"]),
StrField("data",""), ]
def answers(self, other):
return self.type == other.type
class NetBIOS_DS(Packet):
name = "NetBIOS datagram service"
fields_desc = [
ByteEnumField("type",17, {17:"direct_group"}),
ByteField("flags",0),
XShortField("id",0),
IPField("src","127.0.0.1"),
ShortField("sport",138),
ShortField("len",None),
ShortField("ofs",0),
NetBIOSNameField("srcname",""),
NetBIOSNameField("dstname",""),
]
def post_build(self, p, pay):
p += pay
if self.len is None:
l = len(p)-14
p = p[:10]+struct.pack("!H", l)+p[12:]
return p
# ShortField("length",0),
# ShortField("Delimitor",0),
# ByteField("command",0),
# ByteField("data1",0),
# ShortField("data2",0),
# ShortField("XMIt",0),
# ShortField("RSPCor",0),
# StrFixedLenField("dest","",16),
# StrFixedLenField("source","",16),
#
# ]
#
# IR
class IrLAPHead(Packet):
name = "IrDA Link Access Protocol Header"
fields_desc = [ XBitField("Address", 0x7f, 7),
BitEnumField("Type", 1, 1, {"Response":0,
"Command":1})]
class IrLAPCommand(Packet):
name = "IrDA Link Access Protocol Command"
fields_desc = [ XByteField("Control", 0),
XByteField("Format identifier", 0),
XIntField("Source address", 0),
XIntField("Destination address", 0xffffffffL),
XByteField("Discovery flags", 0x1),
ByteEnumField("Slot number", 255, {"final":255}),
XByteField("Version", 0)]
class IrLMP(Packet):
name = "IrDA Link Management Protocol"
fields_desc = [ XShortField("Service hints", 0),
XByteField("Character set", 0),
StrField("Device name", "") ]
#NetBIOS
# Name Query Request
# Node Status Request
class NBNSQueryRequest(Packet):
name="NBNS query request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x0110),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"})]
# Name Registration Request
# Name Refresh Request
# Name Release Request or Demand
class NBNSRequest(Packet):
name="NBNS request"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x2910),
ShortField("QDCOUNT",1),
ShortField("ANCOUNT",0),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",1),
NetBIOSNameField("QUESTION_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
ShortEnumField("RR_NAME",0xC00C,{0xC00C:"Label String Pointer to QUESTION_NAME"}),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0),
ShortField("RDLENGTH", 6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER NODE TYPE",00,2,{00:"B node",01:"P node",02:"M node",03:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response
# Name Registration Response
class NBNSQueryResponse(Packet):
name="NBNS query response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("QUESTION_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("QUESTION_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 0x493e0),
ShortField("RDLENGTH", 6),
ShortField("NB_FLAGS", 0),
IPField("NB_ADDRESS", "127.0.0.1")]
# Name Query Response (negative)
# Name Release Response
class NBNSQueryResponseNegative(Packet):
name="NBNS query response (negative)"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8506),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",6),
BitEnumField("G",0,1,{0:"Unique name",1:"Group name"}),
BitEnumField("OWNER NODE TYPE",00,2,{00:"B node",01:"P node",02:"M node",03:"H node"}),
BitEnumField("UNUSED",0,13,{0:"Unused"}),
IPField("NB_ADDRESS", "127.0.0.1")]
# Node Status Response
class NBNSNodeStatusResponse(Packet):
name="NBNS Node Status Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0x8500),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x21, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL",0),
ShortField("RDLENGTH",83),
ByteField("NUM_NAMES",1)]
# Service for Node Status Response
class NBNSNodeStatusResponseService(Packet):
name="NBNS Node Status Response Service"
fields_desc = [StrFixedLenField("NETBIOS_NAME","WINDOWS ",15),
ByteEnumField("SUFFIX",0,{0:"workstation",0x03:"messenger service",0x20:"file server service",0x1b:"domain master browser",0x1c:"domain controller", 0x1e:"browser election service"}),
ByteField("NAME_FLAGS",0x4),
ByteEnumField("UNUSED",0,{0:"unused"})]
# End of Node Status Response packet
class NBNSNodeStatusResponseEnd(Packet):
name="NBNS Node Status Response"
fields_desc = [SourceMACField("MAC_ADDRESS"),
BitField("STATISTICS",0,57*8)]
# Wait for Acknowledgement Response
class NBNSWackResponse(Packet):
name="NBNS Wait for Acknowledgement Response"
fields_desc = [ShortField("NAME_TRN_ID",0),
ShortField("FLAGS", 0xBC07),
ShortField("QDCOUNT",0),
ShortField("ANCOUNT",1),
ShortField("NSCOUNT",0),
ShortField("ARCOUNT",0),
NetBIOSNameField("RR_NAME","windows"),
ShortEnumField("SUFFIX",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file server service",0x4141+0x10b:"domain master browser",0x4141+0x10c:"domain controller", 0x4141+0x10e:"browser election service"}),
ByteField("NULL",0),
ShortEnumField("RR_TYPE",0x20, {0x20:"NB",0x21:"NBSTAT"}),
ShortEnumField("RR_CLASS",1,{1:"INTERNET"}),
IntField("TTL", 2),
ShortField("RDLENGTH",2),
BitField("RDATA",10512,16)] #10512=0010100100010000
class NBTDatagram(Packet):
name="NBT Datagram Packet"
fields_desc= [ByteField("Type", 0x10),
ByteField("Flags", 0x02),
ShortField("ID", 0),
IPField("SourceIP", "127.0.0.1"),
ShortField("SourcePort", 138),
ShortField("Length", 272),
ShortField("Offset", 0),
NetBIOSNameField("SourceName","windows"),
ShortEnumField("SUFFIX1",0x4141,{0x4141:"workstation",0x4141+0x03:"messenger service",0x4141+0x200:"file | |
<filename>src/sol/parser.py
"""
Data parsing functions applicable to all transactions
"""
import logging
import re
from datetime import datetime, timezone
from sol import util_sol
from sol.api_rpc import RpcAPI
from sol.constants import BILLION, CURRENCY_SOL, INSTRUCTION_TYPE_DELEGATE, MINT_SOL, PROGRAM_STAKE
from sol.tickers.tickers import Tickers
from sol.TxInfoSol import TxInfoSol
def parse_tx(txid, data, wallet_info):
""" Parses data returned by RcpAPI.fetch_tx(). Returns TxInfoSol object """
wallet_address = wallet_info.wallet_address
result = data.get("result", None)
# Handle old transaction where api fails. Return transaction with just txid, nothing else.
if result is None:
logging.warning("Unable to fetch txid=%s. Probably old transaction where api "
"fails.", txid)
txinfo = TxInfoSol(txid, "", "", wallet_address)
return txinfo
# Handle old transaction where timestamp missing (something like before 12/2020)
if not result.get("blockTime"):
logging.warning("Detected timestamp missing for txid=%s. Probably old transaction", txid)
txinfo = TxInfoSol(txid, "", "", wallet_address)
return txinfo
# Transactions that resulted in error
meta = result["meta"]
if meta is None:
logging.error("empty meta field. txid=%s", txid)
return None
err = result["meta"]["err"]
if err is not None:
return None
ts = result["blockTime"]
timestamp = datetime.fromtimestamp(ts, tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S") if ts else ""
fee = float(result["meta"]["fee"]) / BILLION
instructions = data["result"]["transaction"]["message"]["instructions"]
txinfo = TxInfoSol(txid, timestamp, fee, wallet_address)
txinfo.instructions = instructions
txinfo.instruction_types = _instruction_types(instructions)
txinfo.program_ids = [x["programId"] for x in txinfo.instructions]
txinfo.input_accounts = _input_accounts(instructions)
txinfo.inner = _extract_inner_instructions(data)
txinfo.inner_parsed = _parsed(txinfo.inner)
txinfo.log_instructions, txinfo.log, txinfo.log_string = _log_messages(txid, data)
txinfo.wallet_accounts = _wallet_accounts(txid, wallet_address, txinfo.instructions, txinfo.inner)
txinfo.account_to_mint, txinfo.mints = _mints(data, wallet_address)
txinfo.balance_changes_all, txinfo.balance_changes = _balance_changes(data, txinfo.wallet_accounts, txinfo.mints)
txinfo.transfers = _transfers(txinfo.balance_changes)
txinfo.transfers_net, txinfo.fee = _transfers_net(txinfo, txinfo.transfers, fee)
if _has_empty_token_balances(data, txinfo.mints):
# Fall back to alternative method to calculate transfers
txinfo.transfers = _transfers_instruction(txinfo)
txinfo.transfers_net, _ = _transfers_net(txinfo, txinfo.transfers, fee, mint_to=True)
# Update wallet_info with any staking addresses found
addresses = _staking_addresses_found(wallet_address, txinfo.instructions)
for address in addresses:
wallet_info.add_staking_address(address)
return txinfo
def _staking_addresses_found(wallet_address, instructions):
out = []
for instruction in instructions:
parsed = instruction.get("parsed", None)
instruction_type = parsed.get("type", None) if (parsed and type(parsed) is dict) else None
program = instruction.get("program")
if (program == PROGRAM_STAKE and instruction_type == INSTRUCTION_TYPE_DELEGATE):
stake_account = parsed["info"]["stakeAccount"]
stake_authority = parsed["info"]["stakeAuthority"]
if stake_authority == wallet_address:
out.append(stake_account)
return out
def _has_empty_token_balances(data, mints):
post_token_balances = data["result"]["meta"]["postTokenBalances"]
pre_token_balances = data["result"]["meta"]["preTokenBalances"]
if len(post_token_balances) == 0 and len(pre_token_balances) == 0 and len(mints.keys()) > 1:
return True
else:
return False
def _transfers(balance_changes):
transfers_in = []
transfers_out = []
for account_address, (currency, amount_change) in balance_changes.items():
if amount_change > 0:
transfers_in.append((amount_change, currency, "", account_address))
elif amount_change < 0:
transfers_out.append((-amount_change, currency, account_address, ""))
return transfers_in, transfers_out, []
def _balance_changes(data, wallet_accounts, mints):
balance_changes_sol = _balance_changes_sol(data)
balance_changes_tokens = _balance_changes_tokens(data, mints)
balance_changes = dict(balance_changes_sol)
balance_changes.update(dict(balance_changes_tokens))
balance_changes_wallet = {k: v for (k, v) in balance_changes.items() if k in wallet_accounts}
return balance_changes, balance_changes_wallet
def _balance_changes_tokens(data, mints):
account_keys = [row["pubkey"] for row in data["result"]["transaction"]["message"]["accountKeys"]]
post_token_balances = data["result"]["meta"]["postTokenBalances"]
pre_token_balances = data["result"]["meta"]["preTokenBalances"]
a = {}
b = {}
balance_changes = {}
for row in pre_token_balances:
account_address, currency_a, amount_a, _ = _row_to_amount_currency(row, account_keys, mints)
a[account_address] = (currency_a, amount_a)
for row in post_token_balances:
account_address, currency_b, amount_b, decimals = _row_to_amount_currency(row, account_keys, mints)
b[account_address] = (currency_b, amount_b)
# calculate change in balance
currency_a, amount_a = a.get(account_address, (currency_b, 0.0))
amount_change = round(amount_b - amount_a, decimals)
# add to result
balance_changes[account_address] = (currency_a, amount_change)
# Handle case where post_token_balance doesn't exist for token (aka zero balance)
for row in pre_token_balances:
account_address, currency_a, amount_a, _ = _row_to_amount_currency(row, account_keys, mints)
if account_address not in balance_changes:
balance_changes[account_address] = (currency_a, -amount_a)
return balance_changes
def _row_to_amount_currency(row, account_keys, mints):
account_address = account_keys[row["accountIndex"]]
mint = row["mint"]
amount = row["uiTokenAmount"]["uiAmount"]
decimals = row["uiTokenAmount"]["decimals"]
if not amount:
amount = 0.0
currency = mints[mint]["currency"] if mint in mints else mint
return account_address, currency, amount, decimals
def _balance_changes_sol(data):
account_keys = [row["pubkey"] for row in data["result"]["transaction"]["message"]["accountKeys"]]
post_balances_sol = data["result"]["meta"]["postBalances"]
pre_balances_sol = data["result"]["meta"]["preBalances"]
balance_changes = {}
for i, account_address in enumerate(account_keys):
amount = (float(post_balances_sol[i]) - float(pre_balances_sol[i])) / BILLION
amount = round(amount, 9)
if amount != 0:
balance_changes[account_address] = (CURRENCY_SOL, amount)
return balance_changes
def _wallet_accounts(txid, wallet_address, instructions, inner):
token_accounts = RpcAPI.fetch_token_accounts(wallet_address)
accounts_wallet = set(token_accounts.keys())
accounts_instruction = _instruction_accounts(txid, wallet_address, instructions, inner)
accounts = set(accounts_instruction)
accounts = accounts.union(accounts_wallet)
return accounts
def _instruction_types(instructions):
out = []
for instruction in instructions:
parsed = instruction.get("parsed", None)
instruction_type = parsed.get("type", None) if (parsed and type(parsed) is dict) else None
program = instruction.get("program")
out.append((instruction_type, program))
return out
def _input_accounts(instructions):
out = []
for instruction in instructions:
if "accounts" in instruction:
out.append(instruction["accounts"])
return out
def _mints(data, wallet_address):
""" Returns
account_to_mints: dict of <account_address> -> <mint_address>
mints: dict of <mint_address> -> { "currency" : <ticker>, "decimals" : <decimals> }
"""
# ## Get mints of wallet token accounts
token_accounts = RpcAPI.fetch_token_accounts(wallet_address)
out = dict(token_accounts)
# ## Get mints of accounts found in preTokenBalances and postTokenBalances
# Get account addresses
accounts = [d["pubkey"] for d in data["result"]["transaction"]["message"]["accountKeys"]]
# Get accounts of mints found in preTokenBalances and postTokenBalances
mintlist = list(data["result"]["meta"]["preTokenBalances"])
mintlist.extend(list(data["result"]["meta"]["postTokenBalances"]))
for info in mintlist:
account_index = info["accountIndex"]
mint = info["mint"]
decimals = info["uiTokenAmount"]["decimals"]
account = accounts[account_index]
out[account] = {
"mint": mint,
"decimals": decimals
}
# ## Repackage output format
account_to_mint = {}
mints = {}
for account_address, info in out.items():
mint = info["mint"]
decimals = info["decimals"]
account_to_mint[account_address] = mint
mints[mint] = {
"currency": Tickers.get(mint),
"decimals": decimals
}
# Add wallet_address
account_to_mint[wallet_address] = MINT_SOL
mints[MINT_SOL] = {
"currency": CURRENCY_SOL,
"decimals": 9
}
return account_to_mint, mints
def _extract_inner_instructions(data):
if "innerInstructions" not in data["result"]["meta"]:
return None
inner_instructions = data["result"]["meta"]["innerInstructions"]
if inner_instructions is None:
return None
out = []
for instructions_dict in inner_instructions:
if "instructions" in instructions_dict:
out.extend(instructions_dict["instructions"])
return out
def _parsed(inner_instructions):
out = {}
for elem in inner_instructions:
if "parsed" in elem:
parsed = elem["parsed"]
info = parsed["info"]
type = parsed["type"]
if type not in out:
out[type] = []
out[type].append(info)
return out
def _instruction_accounts(txid, wallet_address, instructions, inner):
accounts = set()
accounts.add(wallet_address)
instrs = instructions[:] + inner[:]
# Add associated accounts from Instructions
for instruction in instrs:
if "parsed" in instruction:
parsed = instruction["parsed"]
if type(parsed) is dict:
# if wallet associated with source
if parsed.get("type") in ["initializeAccount", "approve", "transfer"]:
info = parsed["info"]
# Grab set of addresses associated with source
keys = ["authority", "source", "newAccount", "owner", "account"]
addresses_source = set([info.get(k) for k in keys if k in info])
# Don't include token program address
addresses_source = set([x for x in addresses_source if not x.startswith("Token")])
if accounts.intersection(addresses_source):
accounts = accounts.union(addresses_source)
# if wallet associated with destination
if parsed.get("type") == "closeAccount":
info = parsed["info"]
account = info["account"]
destination = info["destination"]
if destination == wallet_address:
accounts.add(account)
return accounts
def _transfers_instruction(txinfo):
""" Returns transfers using information from instructions data (alternative method instead of balance changes) """
account_to_mint = txinfo.account_to_mint
inner_instructions = txinfo.inner
wallet_accounts = txinfo.wallet_accounts
transfers_in = []
transfers_out = []
transfers_unknown = []
for i, instruction in enumerate(inner_instructions):
if "parsed" in instruction:
parsed = instruction["parsed"]
if parsed["type"] == "transfer":
info = parsed["info"]
amount_string = info.get("amount", None)
lamports = info.get("lamports", None)
source = info.get("source", None)
destination = info.get("destination", None)
if not amount_string:
amount_string = lamports
if amount_string == "0":
continue
# Find mint address
if lamports:
mint = MINT_SOL
else:
if source in account_to_mint and account_to_mint[source] != MINT_SOL:
mint = account_to_mint[source]
elif destination in account_to_mint and account_to_mint[destination] != MINT_SOL:
mint = account_to_mint[destination]
else:
mint = MINT_SOL
# Determine amount, currency
amount, currency = util_sol.amount_currency(txinfo, amount_string, mint)
# Determine direction of transfer
if source in wallet_accounts:
transfers_out.append((amount, currency, source, destination))
elif destination in wallet_accounts:
transfers_in.append((amount, currency, source, destination))
else:
transfers_unknown.append((amount, currency, source, destination))
return transfers_in, transfers_out, transfers_unknown
def _extract_mint_to(instructions, wallet_address):
try:
for instruction in instructions:
parsed = instruction.get("parsed", None)
if parsed and parsed.get("type") == "mintTo":
info = parsed["info"]
amount = info["amount"]
mint = info["mint"]
return amount, mint
except Exception:
pass
return None, None
def _add_mint_to_as_transfers(txinfo, net_transfers_in):
""" Adds 'mintTo' instructions as transfers if found """
# Extract any "mintTo" from instructions
mint_amount_string, mint = _extract_mint_to(txinfo.instructions, txinfo.wallet_address)
# Extract any "mintTo" from inner instructions
if not mint:
mint_amount_string, mint = _extract_mint_to(txinfo.inner, txinfo.wallet_address)
if mint_amount_string and mint:
mints_transfers_in = [x[1] for x in net_transfers_in]
amount, currency = util_sol.amount_currency(txinfo, mint_amount_string, mint)
if mint in mints_transfers_in:
# Mint transaction already reflected as inbound transfer. Do nothing
pass
else:
net_transfers_in.append((amount, currency, "", ""))
def _transfers_net(txinfo, transfers, fee, mint_to=False):
_transfers_in, _transfers_out, _ = transfers
| |
extend this class.
"""
def __init__(self, value: ATTRIBUTE_TYPES) -> None:
"""
Initialize a Relation object.
:param value: the right value of the relation.
"""
self.value = value
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Relation:
"""The operator of the relation."""
@classmethod
def from_pb(cls, relation: query_pb2.Query.Relation):
"""
From the Relation Protobuf object to the associated
instance of a subclass of Relation.
:param relation: the Protobuf object that represents the relation constraint.
:return: an instance of one of the subclasses of Relation.
"""
relations_from_pb = {
query_pb2.Query.Relation.GTEQ: GtEq,
query_pb2.Query.Relation.GT: Gt,
query_pb2.Query.Relation.LTEQ: LtEq,
query_pb2.Query.Relation.LT: Lt,
query_pb2.Query.Relation.NOTEQ: NotEq,
query_pb2.Query.Relation.EQ: Eq
}
relation_class = relations_from_pb[relation.op]
value_case = relation.val.WhichOneof("value")
if value_case == "s":
return relation_class(relation.val.s)
elif value_case == "b":
return relation_class(relation.val.b)
elif value_case == "i":
return relation_class(relation.val.i)
elif value_case == "d":
return relation_class(relation.val.d)
elif value_case == "l":
return relation_class(Location.from_pb(relation.val.l))
def to_pb(self) -> query_pb2.Query.Relation:
"""
From an instance of Relation to its associated Protobuf object.
:return: the Protobuf object that contains the relation.
"""
relation = query_pb2.Query.Relation()
relation.op = self._operator()
query_value = query_pb2.Query.Value()
if isinstance(self.value, bool):
query_value.b = self.value
elif isinstance(self.value, int):
query_value.i = self.value
elif isinstance(self.value, float):
query_value.d = self.value
elif isinstance(self.value, str):
query_value.s = self.value
elif isinstance(self.value, Location):
query_value.l.CopyFrom(self.value.to_pb())
relation.val.CopyFrom(query_value)
return relation
def _get_type(self) -> Type[ATTRIBUTE_TYPES]:
return type(self.value)
def __eq__(self, other):
if type(other) != type(self):
return False
else:
return self.value == other.value
class OrderingRelation(Relation, ABC):
"""A specialization of the :class:`~oef.query.Relation` class to represent ordering relation (e.g. greater-than)."""
def __init__(self, value: ORDERED_TYPES):
super().__init__(value)
def _get_type(self) -> Type[ORDERED_TYPES]:
return type(self.value)
class Eq(Relation):
"""
The equality relation. That is, if the value of an attribute is equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books whose author is <NAME>
>>> c = Constraint("author", Eq("<NAME>"))
>>> c.check(Description({"author": "<NAME>"}))
True
>>> c.check(Description({"author": "<NAME>"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.EQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value == self.value
class NotEq(Relation):
"""
The non-equality relation. That is, if the value of an attribute is not equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books that are not of the genre Horror
>>> c = Constraint("genre", NotEq("horror"))
>>> c.check(Description({"genre": "non-fiction"}))
True
>>> c.check(Description({"author": "horror"}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.NOTEQ
def check(self, value: ATTRIBUTE_TYPES) -> bool:
"""
Check if a value is not equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value != self.value
class Lt(OrderingRelation):
"""
The Less-than relation. That is, if the value of an attribute is less than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990
>>> c = Constraint("year", Lt(1990))
>>> c.check(Description({"year": 1985}))
True
>>> c.check(Description({"year": 2000}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value < self.value
class LtEq(OrderingRelation):
"""
Less-than-equal relation. That is, if the value of an attribute is less than or equal to the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published before 1990, 1990 included
>>> c = Constraint("year", LtEq(1990))
>>> c.check(Description({"year": 1990}))
True
>>> c.check(Description({"year": 1991}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.LTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is less than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value <= self.value
class Gt(OrderingRelation):
"""
Greater-than relation. That is, if the value of an attribute is greater than the value specified then
the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books with rating greater than 4.0
>>> c = Constraint("average_rating", Gt(4.0))
>>> c.check(Description({"average_rating": 4.5}))
True
>>> c.check(Description({"average_rating": 3.0}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GT
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value is greater than the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value > self.value
class GtEq(OrderingRelation):
"""
Greater-than-equal relation. That is, if the value of an attribute is greater than or equal to the value specified
then the :class:`~oef.query.Constraint` with this constraint type is satisfied.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", GtEq(2000))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 1990}))
False
"""
def _operator(self):
return query_pb2.Query.Relation.GTEQ
def check(self, value: ORDERED_TYPES) -> bool:
"""
Check if a value greater than or equal to the value of the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
return value >= self.value
class Range(ConstraintType):
"""
A constraint type that allows you to restrict the values of the attribute in a given range.
Examples:
All the books published after 2000, included
>>> c = Constraint("year", Range((2000, 2005)))
>>> c.check(Description({"year": 2000}))
True
>>> c.check(Description({"year": 2005}))
True
>>> c.check(Description({"year": 1990}))
False
>>> c.check(Description({"year": 2010}))
False
"""
def __init__(self, values: RANGE_TYPES) -> None:
"""
Initialize a range constraint type.
:param values: a pair of ``int``, a pair of ``str``, a pair of ``float` or
| a pair of :class:`~oef.schema.Location`.
"""
self.values = values
def to_pb(self) -> query_pb2.Query:
"""
From an instance of Range to its associated Protobuf object.
:return: the Protobuf object that contains the range.
"""
range_ = query_pb2.Query.Range()
if type(self.values[0]) == str:
values = query_pb2.Query.StringPair()
values.first = self.values[0]
values.second = self.values[1]
range_.s.CopyFrom(values)
elif type(self.values[0]) == int:
values = query_pb2.Query.IntPair()
values.first = self.values[0]
values.second = self.values[1]
range_.i.CopyFrom(values)
elif type(self.values[0]) == float:
values = query_pb2.Query.DoublePair()
values.first = self.values[0]
values.second = self.values[1]
range_.d.CopyFrom(values)
elif type(self.values[0]) == Location:
values = query_pb2.Query.LocationPair()
values.first.CopyFrom(self.values[0].to_pb())
values.second.CopyFrom(self.values[1].to_pb())
range_.l.CopyFrom(values)
return range_
@classmethod
def from_pb(cls, range_pb: query_pb2.Query.Range):
"""
From the Range Protobuf object to the associated instance of ``Range``.
:param range_pb: the Protobuf object that represents the range.
:return: an instance of ``Range`` equivalent to the Protobuf object provided as input.
"""
range_case = range_pb.WhichOneof("pair")
if range_case == "s":
return cls((range_pb.s.first, range_pb.s.second))
elif range_case == "i":
return cls((range_pb.i.first, range_pb.i.second))
elif range_case == "d":
return cls((range_pb.d.first, range_pb.d.second))
elif range_case == "l":
return cls((Location.from_pb(range_pb.l.first), Location.from_pb(range_pb.l.second)))
def check(self, value: RANGE_TYPES) -> bool:
"""
Check if a value is in the range specified by the constraint.
:param value: the value to check.
:return: ``True`` if the value satisfy the constraint, ``False`` otherwise.
"""
left, right = self.values
return left <= value <= right
def _get_type(self) -> Type[Union[int, str, float, Location]]:
return type(self.values[0])
def __eq__(self, other):
if type(other) != Range:
return False
else:
return self.values == other.values
class Set(ConstraintType, ABC):
"""
A constraint type that allows you to restrict the values of the attribute in a specific set.
The specific operator of the relation is defined in the subclasses that extend this class.
"""
def __init__(self, values: SET_TYPES) -> None:
"""
Initialize a :class:`~oef.query.Set` constraint.
:param values: a list of values for the set relation.
"""
self.values = values
@property
@abstractmethod
def _operator(self) -> query_pb2.Query.Set:
"""The operator over the set."""
def to_pb(self):
"""
From an instance of one of the subclasses of :class:`~oef.query.Set` to its associated Protobuf object.
:return: the Protobuf object that contains the set constraint.
"""
set_ = query_pb2.Query.Set()
set_.op = self._operator()
value_type = type(self.values[0]) if len(self.values) > 0 else str
if value_type == str:
values = query_pb2.Query.Set.Values.Strings()
values.vals.extend(self.values)
set_.vals.s.CopyFrom(values)
elif value_type == bool:
values = query_pb2.Query.Set.Values.Bools()
values.vals.extend(self.values)
set_.vals.b.CopyFrom(values)
elif value_type == int:
values = query_pb2.Query.Set.Values.Ints()
values.vals.extend(self.values)
set_.vals.i.CopyFrom(values)
elif value_type == float:
values = query_pb2.Query.Set.Values.Doubles()
values.vals.extend(self.values)
set_.vals.d.CopyFrom(values)
elif value_type == Location:
| |
is the parent/ancestor of the relation
destuser = ndb.KeyProperty(kind=UserModel)
def set_dest(self, user_id):
""" Set a destination user key property """
k = None if user_id is None else ndb.Key(UserModel, user_id)
self.destuser = k
@classmethod
def list_favorites(cls, user_id, max_len=MAX_FAVORITES):
""" Query for a list of favorite users for the given user """
assert user_id is not None
if user_id is None:
return
k = ndb.Key(UserModel, user_id)
q = cls.query(ancestor=k)
for fm in q.fetch(max_len, read_consistency=ndb.EVENTUAL):
yield None if fm.destuser is None else fm.destuser.id()
@classmethod
def has_relation(cls, srcuser_id, destuser_id):
""" Return True if destuser is a favorite of user """
if srcuser_id is None or destuser_id is None:
return False
ks = ndb.Key(UserModel, srcuser_id)
kd = ndb.Key(UserModel, destuser_id)
q = cls.query(ancestor=ks).filter(FavoriteModel.destuser == kd)
return q.get(keys_only=True) is not None
@classmethod
def add_relation(cls, src_id, dest_id):
""" Add a favorite relation between the two users """
fm = FavoriteModel(parent=ndb.Key(UserModel, src_id))
fm.set_dest(dest_id)
fm.put()
@classmethod
def del_relation(cls, src_id, dest_id):
""" Delete a favorite relation between a source user and a destination user """
ks = ndb.Key(UserModel, src_id)
kd = ndb.Key(UserModel, dest_id)
while True:
# There might conceivably be more than one relation,
# so repeat the query/delete cycle until we don't find any more
q = cls.query(ancestor=ks).filter(FavoriteModel.destuser == kd)
fmk = q.get(keys_only=True)
if fmk is None:
return
fmk.delete()
class ChallengeModel(ndb.Model):
""" Models a challenge issued by a user to another user """
# The challenging (source) user is the parent/ancestor of the relation
# The challenged user
destuser = ndb.KeyProperty(kind=UserModel)
# The parameters of the challenge (time, bag type, etc.)
prefs = ndb.JsonProperty()
# The time of issuance
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def set_dest(self, user_id):
""" Set a destination user key property """
k = None if user_id is None else ndb.Key(UserModel, user_id)
self.destuser = k
@classmethod
def has_relation(cls, srcuser_id, destuser_id):
""" Return True if srcuser has issued a challenge to destuser """
if srcuser_id is None or destuser_id is None:
return False
ks = ndb.Key(UserModel, srcuser_id)
kd = ndb.Key(UserModel, destuser_id)
q = cls.query(ancestor=ks).filter(ChallengeModel.destuser == kd)
return q.get(keys_only=True) is not None
@classmethod
def find_relation(cls, srcuser_id, destuser_id):
""" Return (found, prefs) where found is True if srcuser has challenged destuser """
if srcuser_id is None or destuser_id is None:
# noinspection PyRedundantParentheses
return (False, None)
ks = ndb.Key(UserModel, srcuser_id)
kd = ndb.Key(UserModel, destuser_id)
q = cls.query(ancestor=ks).filter(ChallengeModel.destuser == kd)
cm = q.get()
if cm is None:
# Not found
# noinspection PyRedundantParentheses
return (False, None)
# Found: return the preferences associated with the challenge (if any)
return (True, cm.prefs)
@classmethod
def add_relation(cls, src_id, dest_id, prefs):
""" Add a challenge relation between the two users """
cm = ChallengeModel(parent=ndb.Key(UserModel, src_id))
cm.set_dest(dest_id)
cm.prefs = {} if prefs is None else prefs
cm.put()
@classmethod
def del_relation(cls, src_id, dest_id):
""" Delete a challenge relation between a source user and a destination user """
ks = ndb.Key(UserModel, src_id)
kd = ndb.Key(UserModel, dest_id)
prefs = None
found = False
while True:
# There might conceivably be more than one relation,
# so repeat the query/delete cycle until we don't find any more
q = cls.query(ancestor=ks).filter(ChallengeModel.destuser == kd)
cm = q.get()
if cm is None:
# Return the preferences of the challenge, if any
return (found, prefs)
# Found the relation in question: store the associated preferences
found = True
if prefs is None:
prefs = cm.prefs
cm.key.delete()
@classmethod
def list_issued(cls, user_id, max_len=20):
""" Query for a list of challenges issued by a particular user """
assert user_id is not None
if user_id is None:
return
k = ndb.Key(UserModel, user_id)
# List issued challenges in ascending order by timestamp (oldest first)
q = cls.query(ancestor=k).order(ChallengeModel.timestamp)
def ch_callback(cm):
""" Map an issued challenge to a tuple of useful info """
id0 = None if cm.destuser is None else cm.destuser.id()
return (id0, cm.prefs, cm.timestamp)
for cm in q.fetch(max_len):
yield ch_callback(cm)
@classmethod
def list_received(cls, user_id, max_len=20):
""" Query for a list of challenges issued to a particular user """
assert user_id is not None
if user_id is None:
return
k = ndb.Key(UserModel, user_id)
# List received challenges in ascending order by timestamp (oldest first)
q = cls.query(ChallengeModel.destuser == k).order(
ChallengeModel.timestamp)
def ch_callback(cm):
""" Map a received challenge to a tuple of useful info """
p0 = cm.key.parent()
id0 = None if p0 is None else p0.id()
return (id0, cm.prefs, cm.timestamp)
for cm in q.fetch(max_len):
yield ch_callback(cm)
class StatsModel(ndb.Model):
""" Models statistics about users """
# The user associated with this statistic or None if robot
user = ndb.KeyProperty(kind=UserModel, indexed=True,
required=False, default=None)
robot_level = ndb.IntegerProperty(required=False, default=0)
# The timestamp of this statistic
timestamp = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
games = ndb.IntegerProperty()
human_games = ndb.IntegerProperty()
elo = ndb.IntegerProperty(indexed=True, default=1200)
human_elo = ndb.IntegerProperty(indexed=True, default=1200)
score = ndb.IntegerProperty(indexed=False)
human_score = ndb.IntegerProperty(indexed=False)
score_against = ndb.IntegerProperty(indexed=False)
human_score_against = ndb.IntegerProperty(indexed=False)
wins = ndb.IntegerProperty(indexed=False)
losses = ndb.IntegerProperty(indexed=False)
human_wins = ndb.IntegerProperty(indexed=False)
human_losses = ndb.IntegerProperty(indexed=False)
MAX_STATS = 100
def set_user(self, user_id, robot_level=0):
""" Set the user key property """
k = None if user_id is None else ndb.Key(UserModel, user_id)
self.user = k
self.robot_level = robot_level
@classmethod
def create(cls, user_id, robot_level=0):
""" Create a fresh instance with default values """
sm = cls()
sm.set_user(user_id, robot_level)
sm.timestamp = None
sm.elo = 1200
sm.human_elo = 1200
sm.games = 0
sm.human_games = 0
sm.score = 0
sm.human_score = 0
sm.score_against = 0
sm.human_score_against = 0
sm.wins = 0
sm.losses = 0
sm.human_wins = 0
sm.human_losses = 0
return sm
def copy_from(self, src):
""" Copy data from the src instance """
# user and robot_level are assumed to be in place already
assert hasattr(self, "user")
assert hasattr(self, "robot_level")
self.timestamp = src.timestamp
self.elo = src.elo
self.human_elo = src.human_elo
self.games = src.games
self.human_games = src.human_games
self.score = src.score
self.human_score = src.human_score
self.score_against = src.score_against
self.human_score_against = src.human_score_against
self.wins = src.wins
self.losses = src.losses
self.human_wins = src.human_wins
self.human_losses = src.human_losses
def populate_dict(self, d):
""" Copy statistics data to the given dict """
d["elo"] = self.elo
d["human_elo"] = self.human_elo
d["games"] = self.games
d["human_games"] = self.human_games
d["score"] = self.score
d["human_score"] = self.human_score
d["score_against"] = self.score_against
d["human_score_against"] = self.human_score_against
d["wins"] = self.wins
d["losses"] = self.losses
d["human_wins"] = self.human_wins
d["human_losses"] = self.human_losses
@staticmethod
def dict_key(d):
""" Return a dictionary key that works for human users and robots """
if d["user"] is None:
return "robot-" + str(d["robot_level"])
return d["user"]
@staticmethod
def user_id_from_key(k):
""" Decompose a dictionary key into a (user_id, robot_level) tuple """
if k is not None and k.startswith("robot-"):
return (None, int(k[6:]))
return (k, 0)
@classmethod
def _list_by(cls, prop, makedict, timestamp=None, max_len=MAX_STATS):
""" Returns the Elo ratings at the indicated time point (None = now),
in descending order """
# Currently this means a safety_buffer of 160
max_fetch = int(max_len * 2.6)
safety_buffer = max_fetch - max_len
check_false_positives = True
if timestamp is None:
timestamp = datetime.utcnow()
max_fetch = max_len
# No need to check false positives if querying newest records
check_false_positives = False
# Use descending Elo order
# Ndb doesn't allow us to put an inequality filter on the timestamp here
# so we need to fetch irrespective of timestamp and manually filter
q = cls.query().order(-prop)
result = dict()
CHUNK_SIZE = 100
lowest_elo = None
# The following loop may yield an incorrect result since there may
# be newer stats records for individual users with lower Elo scores
# than those scanned to create the list. In other words, there may
# be false positives on the list (but not false negatives, i.e.
# there can't be higher Elo scores somewhere that didn't make it
# to the list). We attempt to address this by fetching 2.5 times the
# number of requested users, then separately checking each of them for
# false positives. If we have too many false positives, we don't return
# the full requested number of result records.
for sm in iter_q(q, CHUNK_SIZE):
if sm.timestamp <= timestamp:
# Within our time range
d = makedict(sm)
ukey = cls.dict_key(d)
if (ukey not | |
"order": 11
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 12
},
"in_group": {
"type": "boolean",
"title": "In Group",
"description": "In group",
"order": 13
},
"indicator_count": {
"type": "integer",
"title": "Indicator Count",
"description": "Indicator count",
"order": 14
},
"indicator_type_counts": {
"$ref": "#/definitions/indicator_type_counts",
"title": "Indicator Type Counts",
"description": "Indicator type counts",
"order": 15
},
"industries": {
"type": "array",
"title": "Industries",
"description": "Industries",
"items": {
"type": "object"
},
"order": 16
},
"is_author": {
"type": "boolean",
"title": "Is Author",
"description": "Is author",
"order": 17
},
"is_following": {
"type": "integer",
"title": "Is Following",
"description": "Is following",
"order": 18
},
"is_modified": {
"type": "boolean",
"title": "Is Modified",
"description": "Is modified",
"order": 19
},
"is_subscribing": {
"type": "integer",
"title": "Is Subscribing",
"description": "Is subscribing",
"order": 20
},
"locked": {
"type": "integer",
"title": "Locked",
"description": "Locked",
"order": 21
},
"modified": {
"type": "string",
"title": "Modified",
"description": "Modified",
"order": 22
},
"modified_text": {
"type": "string",
"title": "Modified Text",
"description": "Modified text",
"order": 23
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 24
},
"observation": {
"$ref": "#/definitions/observation",
"title": "Observation",
"description": "Observation",
"order": 25
},
"public": {
"type": "integer",
"title": "Public",
"description": "Public",
"order": 26
},
"pulse_source": {
"type": "string",
"title": "Pulse Source",
"description": "Pulse source",
"order": 27
},
"references": {
"type": "array",
"title": "References",
"description": "References",
"items": {
"type": "string"
},
"order": 28
},
"subscriber_count": {
"type": "integer",
"title": "Subscriber Count",
"description": "Subscriber count",
"order": 29
},
"tags": {
"type": "array",
"title": "Tags",
"description": "Tags",
"items": {
"type": "string"
},
"order": 30
},
"targeted_countries": {
"type": "array",
"title": "Targeted Countries",
"description": "Targeted countries",
"items": {
"type": "object"
},
"order": 31
},
"threat_hunter_scannable": {
"type": "boolean",
"title": "Threat Hunter Scannable",
"description": "Threat hunter scannable",
"order": 32
},
"upvotes_count": {
"type": "number",
"title": "Upvotes Count",
"description": "Upvotes count",
"order": 33
},
"validator_count": {
"type": "integer",
"title": "Validator Count",
"description": "Validator count",
"order": 34
},
"vote": {
"type": "integer",
"title": "Vote",
"description": "Vote",
"order": 35
},
"votes_count": {
"type": "number",
"title": "Votes Count",
"description": "Votes count",
"order": 36
}
},
"definitions": {
"author": {
"type": "object",
"title": "author",
"properties": {
"avatar_url": {
"type": "string",
"title": "Avatar URL",
"description": "Avatar URL",
"order": 1
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 2
},
"is_following": {
"type": "integer",
"title": "Is Following",
"description": "Is following",
"order": 3
},
"is_subscribed": {
"type": "integer",
"title": "Is Subscribed",
"description": "Is subscribed",
"order": 4
},
"username": {
"type": "string",
"title": "Username",
"description": "Username",
"order": 5
}
}
},
"groups": {
"type": "object",
"title": "groups",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "Group ID",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Group name",
"order": 1
}
}
},
"indicator_type_counts": {
"type": "object",
"title": "indicator_type_counts",
"properties": {
"IPv4": {
"type": "integer",
"title": "IPv4",
"description": "IPv4",
"order": 1
},
"URL": {
"type": "integer",
"title": "URL",
"description": "URL count",
"order": 2
},
"domain": {
"type": "integer",
"title": "Domain",
"description": "Domain count",
"order": 3
},
"email": {
"type": "integer",
"title": "Email",
"description": "Email",
"order": 5
},
"hostname": {
"type": "integer",
"title": "Hostname",
"description": "Hostname count",
"order": 4
}
}
},
"observation": {
"type": "object",
"title": "observation",
"properties": {
"adversary": {
"type": "string",
"title": "Adversary",
"description": "Adversary",
"order": 1
},
"author_id": {
"type": "integer",
"title": "Author ID",
"description": "Author ID",
"order": 2
},
"author_name": {
"type": "string",
"title": "Author Name",
"description": "Author name",
"order": 3
},
"avatar_url": {
"type": "string",
"title": "Avatar URL",
"description": "Avatar URL",
"order": 4
},
"cloned_from": {
"type": "string",
"title": "Cloned From",
"description": "Cloned from",
"order": 5
},
"comment_count": {
"type": "integer",
"title": "Comment Count",
"description": "Comment count",
"order": 6
},
"created": {
"type": "string",
"title": "Created",
"description": "Created",
"order": 7
},
"description": {
"type": "string",
"title": "Description",
"description": "Description",
"order": 8
},
"downvotes_count": {
"type": "number",
"title": "Downvotes Count",
"description": "Downvotes count",
"order": 9
},
"export_count": {
"type": "integer",
"title": "Export Count",
"description": "Export count",
"order": 10
},
"extract_source": {
"type": "array",
"title": "Extract Source",
"description": "Extract source",
"items": {
"type": "string"
},
"order": 11
},
"follower_count": {
"type": "integer",
"title": "Follower Count",
"description": "Follower count",
"order": 12
},
"groups": {
"type": "array",
"title": "Groups",
"description": "Groups",
"items": {
"$ref": "#/definitions/groups"
},
"order": 13
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 14
},
"indicator_type_counts": {
"$ref": "#/definitions/indicator_type_counts",
"title": "Indicator Type Counts",
"description": "Indicator type counts",
"order": 15
},
"industries": {
"type": "array",
"title": "Industries",
"description": "Industries",
"items": {
"type": "string"
},
"order": 16
},
"is_following": {
"type": "integer",
"title": "Is Following",
"description": "Is following",
"order": 17
},
"is_subscribed": {
"type": "integer",
"title": "Is Subscribed",
"description": "Is subscribed",
"order": 18
},
"is_subscribing": {
"type": "boolean",
"title": "Is Subscribing",
"description": "Is subscribing",
"order": 19
},
"locked": {
"type": "integer",
"title": "Locked",
"description": "Locked",
"order": 20
},
"modified": {
"type": "string",
"title": "Modified",
"description": "Modified",
"order": 21
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 22
},
"public": {
"type": "integer",
"title": "Public",
"description": "Public",
"order": 23
},
"pulse_source": {
"type": "string",
"title": "Pulse Source",
"description": "Pulse source",
"order": 24
},
"references": {
"type": "array",
"title": "References",
"description": "References",
"items": {
"type": "string"
},
"order": 25
},
"revision": {
"type": "integer",
"title": "Revision",
"description": "Revision",
"order": 26
},
"subscriber_count": {
"type": "integer",
"title": "Subscriber Count",
"description": "Subscriber count",
"order": 27
},
"tags": {
"type": "array",
"title": "Tags",
"description": "Tags",
"items": {
"type": "string"
},
"order": 28
},
"targeted_countries": {
"type": "array",
"title": "Targeted Countries",
"description": "Targeted countries",
"items": {
"type": "string"
},
"order": 29
},
"tlp": {
"type": "string",
"title": "TLP",
"description": "Traffic Light Protocol",
"order": 30
},
"upvotes_count": {
"type": "number",
"title": "Upvotes Count",
"description": "Upvotes count",
"order": 31
},
"user_subscriber_count": {
"type": "integer",
"title": "User Subscriber Count",
"description": "User subscriber count",
"order": 32
},
"validator_count": {
"type": "integer",
"title": "Validator Count",
"description": "Validator count",
"order": 33
},
"vote": {
"type": "integer",
"title": "Vote",
"description": "Vote",
"order": 34
},
"votes_count": {
"type": "number",
"title": "Votes Count",
"description": "Votes count",
"order": 35
}
},
"definitions": {
"groups": {
"type": "object",
"title": "groups",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "Group ID",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Group name",
"order": 1
}
}
},
"indicator_type_counts": {
"type": "object",
"title": "indicator_type_counts",
"properties": {
"IPv4": {
"type": "integer",
"title": "IPv4",
"description": "IPv4",
"order": 1
},
"URL": {
"type": "integer",
"title": "URL",
"description": "URL count",
"order": 2
},
"domain": {
"type": "integer",
"title": "Domain",
"description": "Domain count",
"order": 3
},
"email": {
"type": "integer",
"title": "Email",
"description": "Email",
"order": 5
},
"hostname": {
"type": "integer",
"title": "Hostname",
"description": "Hostname count",
"order": 4
}
}
}
}
}
}
}
}
},
"pulses": {
"type": "object",
"title": "pulses",
"properties": {
"TLP": {
"type": "string",
"title": "TLP",
"description": "Traffic Light Protocol",
"order": 1
},
"adversary": {
"type": "string",
"title": "Adversary",
"description": "Adversary",
"order": 2
},
"author": {
"$ref": "#/definitions/author",
"title": "Author",
"description": "Author",
"order": 3
},
"cloned_from": {
"type": "string",
"title": "Cloned From",
"description": "Cloned from",
"order": 4
},
"comment_count": {
"type": "integer",
"title": "Comment Count",
"description": "Comment count",
"order": 5
},
"created": {
"type": "string",
"title": "Created",
"description": "Created",
"order": 6
},
"description": {
"type": "string",
"title": "Description",
"description": "Description",
"order": 7
},
"downvotes_count": {
"type": "number",
"title": "Downvotes Count",
"description": "Downvotes count",
"order": 8
},
"export_count": {
"type": "integer",
"title": "Export Count",
"description": "Export count",
"order": 9
},
"follower_count": {
"type": "integer",
"title": "Follower Count",
"description": "Follower count",
"order": 10
},
"groups": {
"type": "array",
"title": "Groups",
"description": "Groups",
"items": {
"$ref": "#/definitions/groups"
},
"order": 11
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 12
},
"in_group": {
"type": "boolean",
"title": "In Group",
"description": "In group",
"order": 13
},
"indicator_count": {
"type": "integer",
"title": "Indicator Count",
"description": "Indicator count",
"order": 14
},
"indicator_type_counts": {
"$ref": "#/definitions/indicator_type_counts",
"title": "Indicator Type Counts",
"description": | |
# Copyright <NAME> 2016
# Copyright <NAME> 2016
# Copyright <NAME> 2016
# Licenced under 3 clause BSD licence
#
#
# Solar Control System
#
# <NAME> --- 30th January 2016
#
#
# <eventInterface module='EventHandlers.SolarRun' name='SolarRun' >
# <check_time type='http://id.webbrick.co.uk/events/config/get' source='solar_check_time' />
# <check_run_time type='http://id.webbrick.co.uk/events/config/get' source='solar_check_run_time' />
# <enable type='http://id.webbrick.co.uk/events/config/get' source='solar_enable' />
# <pump type='http://id.webbrick.co.uk/events/webbrick/DO' source='webbrick/36/DO/4' key="state" invert="true" />
# <pipe_temperature type='http://id.webbrick.co.uk/events/webbrick/CT' source='webbrick/999/CT/0' />
# <tank_temperature type='http://id.webbrick.co.uk/events/webbrick/CT' source='webbrick/999/CT/1' />
#
# </eventInterface>
import logging
import time
from EventLib.Event import Event
from EventLib.Status import StatusVal
from EventLib.SyncDeferred import makeDeferred
from EventHandlers.BaseHandler import BaseHandler
from EventHandlers.Utils import *
# make logging global to module.
#_log = None
_log = logging
_log.basicConfig(level=logging.DEBUG, filename='/var/log/webbrick/SolarRun.log')
CONFIG_TYPE = "http://id.webbrick.co.uk/events/config/get"
class SolarRun( BaseHandler ):
"""
SolarRun class that subscribes to events and generates SolarRun events
"""
def __init__ (self, localRouter):
super(SolarRun,self).__init__(localRouter)
global _log
_log = self._log # make global
_log.debug('SolarRun Initialising')
self._debug = True #
self._counter = 0 # Will be used for counting
self._state = None # Used for tracking overall state
self._solar_run_evt_type = 'http://id.webbrick.co.uk/events/SolarRun'
self._solar_run_evt_source = self._log.name
self._check_time = 120
self._check_event = None
self._enable = False
self._enable_event = None
self._threshold = 6.0 # default to 6 degrees of difference between pipe and tank
self._isDark = 0 # a default value
self._dayphasetext = "Unknown" # a default value
self._pump_state = None
self._pump_event = None
self._pump_check_run = False
self._pump_check_start = None
self._pump_check_run_time_event = None
self._pump_check_run_time = 1 # pump will run for 60 seconds before we decide that we have heat in the pipes.
self._tank_event = None
self._tank_temperature = None
self._tank_limit = 80 # tank should not go beyond 80 DegC
self._pipe_event = None
self._pipe_temperature = None
def start(self):
self._log.debug( 'start' )
self._localRouter.subscribe( self._subscribeTime, self, 'http://id.webbrick.co.uk/events/config/get' )
self._localRouter.subscribe( self._subscribeTime, self, 'http://id.webbrick.co.uk/events/time/dayphaseext' )
self._localRouter.subscribe( self._subscribeTime, self, 'http://id.webbrick.co.uk/events/time/isDark' )
self._localRouter.subscribe( self._subscribeTime, self, 'http://id.webbrick.co.uk/events/time/second', 'time/second' )
self._localRouter.subscribe( self._subscribeTime, self, 'http://id.webbrick.co.uk/events/time/minute', 'time/minute' )
#
# Subscribe to the enable and check_time event type
#
if self._enable_event :
self._localRouter.subscribe( self._subscribeTime, self, self._enable_event['type'] )
if self._check_event :
self._localRouter.subscribe( self._subscribeTime, self, self._check_event['type'] )
if self._pump_event :
self._localRouter.subscribe( self._subscribeTime, self, self._pump_event['type'] )
if self._pump_check_run_time_event :
self._localRouter.subscribe( self._subscribeTime, self, self._pump_check_run_time_event['type'] )
if self._tank_event :
self._localRouter.subscribe( self._subscribeTime, self, self._tank_event['type'] )
if self._pipe_event :
self._localRouter.subscribe( self._subscribeTime, self, self._pipe_event['type'] )
self.subscribeAll()
def stop(self):
self._log.debug( 'stop' )
if self._debug:
self._log.debug ("--------- Variables Were ----------------")
self._log.debug ("MyType %s" % str(self._solar_run_evt_type))
self._log.debug ("MySource %s" % str(self._solar_run_evt_source))
self._log.debug ("Enabled %s" % str(self._enable))
self._log.debug ("Check Time %s" % str(self._check_time))
self._log.debug ("DayPhase %s" % str(self._dayphasetext))
self._log.debug ("Dark %s" % str(self._isDark))
self._log.debug ("PumpState %s" % str(self._pump_state))
self._log.debug ("Tank_Temperature %s" % str(self._tank_temperature))
self._log.debug ("-----------------------------------------")
self._localRouter.unsubscribe( self, 'http://id.webbrick.co.uk/events/config/get' )
self._localRouter.unsubscribe( self, 'http://id.webbrick.co.uk/events/time/dayphaseext' )
self._localRouter.unsubscribe( self, 'http://id.webbrick.co.uk/events/time/isDark' )
self._localRouter.unsubscribe( self, 'http://id.webbrick.co.uk/events/time/second', 'time/second' )
self._localRouter.unsubscribe( self, 'http://id.webbrick.co.uk/events/time/minute', 'time/minute' )
if self._enable_event :
self._localRouter.unsubscribe( self, self._enable_event['type'] )
if self._check_event :
self._localRouter.unsubscribe( self, self._check_event['type'] )
self.unSubscribeAll()
def configure( self, cfgDict ):
from string import upper
super(SolarRun,self).configure(cfgDict)
self._log.debug(cfgDict)
if cfgDict.has_key('enable'):
self._enable_event = cfgDict['enable']
if cfgDict.has_key('check_time'):
self._check_event = cfgDict['check_time']
if cfgDict.has_key('pump'):
self._pump_event = cfgDict['pump']
if cfgDict.has_key('check_run_time'):
self._pump_check_run_time_event = cfgDict['check_run_time']
if cfgDict.has_key('pipe_temperature'):
self._pipe_event = cfgDict['pipe_temperature']
if cfgDict.has_key('tank_temperature'):
self._tank_event = cfgDict['tank_temperature']
if cfgDict.has_key('eventtype'):
if cfgDict['eventtype'].has_key('type'):
self._SolarRun_evt_type = cfgDict['eventtype']['type']
if cfgDict['eventtype'].has_key('eventsource'):
if cfgDict['eventtype']['eventsource'].has_key('source'):
self._SolarRun_evt_source = cfgDict['eventtype']['eventsource']['source']
if self._debug:
self._log.debug ("--------- Config Debug ----------------")
self._log.debug ("Check Time %s" % str(self._check_time))
self._log.debug ("Check Run Time %s" % str(self._pump_check_run_time))
self._log.debug ("enable event %s" % str(self._enable_event))
self._log.debug ("---------------------------------------")
def doActions( self, actions, inEvent ):
if actions:
for action in actions:
# logged in BaseHandler.sendEvent
self._log.debug( 'Generate event %s' % ( action ) )
self.sendEvent( makeNewEvent( action, inEvent, None ) )
def configureActions( self, cfgDict ):
self._log.debug("configureActions %s" % (cfgDict) )
result = None
if cfgDict.has_key("newEvent"):
if isinstance( cfgDict["newEvent"], list ):
result = cfgDict["newEvent"]
else:
result = list()
result.append(cfgDict["newEvent"])
self._log.debug("configureActions %s" % (result) )
return result
def indexSolarRun(self, inEvent):
self._log.debug('checking if there is anything to do at this time')
if self._pump_check_run:
self.Evaluate_Conditions('tick')
def setSolarRunEvent(self):
self._log.debug ("Setting SolarRun Event ....")
self._SolarRun_event = Event(self._SolarRun_evt_type,self._SolarRun_evt_source, {'hold': self._hold, 'occupancy': self._occupancy, 'isDark': self._isDark, 'dayphase': self._dayphasetext, 'presence': self._presence, 'light_state': self._light_state} )
self._counter = 0 # reset
def sendSolarRunEvent(self, new_pump_state, reason='event'):
self._log.debug ("Sending SolarRun Event: %s" % new_pump_state)
self._SolarRun_event = Event(self._SolarRun_evt_type,self._SolarRun_evt_source, {'set_pump_state': new_pump_state,
'pump_state': self._pump_state,
'reason': reason} )
self.sendEvent( self._SolarRun_event)
self._SolarRun_event = None # reset
def doHandleConfig( self, inEvent ):
from string import upper
src = inEvent.getSource()
#self._log.debug ("Found Event: %s" % str(inEvent))
#self._log.debug ("Handle Config Event SRC %s" % str(src))
#
# Now see if this matches anything we need
#
if self._enable_event:
if self._enable_event['type'] == CONFIG_TYPE:
if self._enable_event['source'] == src:
en = inEvent.getPayload()['val']
if en == "1":
self._enable = True
if upper(en) == "TRUE":
self._enable = True
else:
self._enable = False
if self._check_event['type'] == CONFIG_TYPE:
if self._check_event['source'] == src:
self._check_time = int(inEvent.getPayload()['val'])
if self._pump_check_run_time_event['type'] == CONFIG_TYPE:
if self._pump_check_run_time_event['source'] == src:
self._pump_check_run_time = int(inEvent.getPayload()['val'])
def doHandleDayPhase( self, inEvent ):
src = inEvent.getSource()
#self._log.debug ("Found Event: %s" % str(inEvent))
#self._log.debug ("Handle DayPhase Event SRC %s" % str(src))
if src == 'time/dayphaseext':
self._dayphasetext = inEvent.getPayload()['dayphasetext']
def doHandleDark( self, inEvent ):
src = inEvent.getSource()
#self._log.debug ("Found Event: %s" % str(inEvent))
#self._log.debug ("Handle Dark Event SRC %s" % str(src))
if src == 'time/isDark':
self._isDark = inEvent.getPayload()['state']
def doHandleEvent( self, handler, inEvent ):
self._log.debug('inEvent handled %s with %s' % (inEvent.getType(), inEvent.getSource()))
if inEvent.getType() == 'http://id.webbrick.co.uk/events/config/get':
self.doHandleConfig( inEvent )
return makeDeferred(StatusVal.OK)
elif inEvent.getType() == 'http://id.webbrick.co.uk/events/time/dayphaseext':
self.doHandleDayPhase( inEvent )
return makeDeferred(StatusVal.OK)
elif inEvent.getType() == 'http://id.webbrick.co.uk/events/time/isDark':
self.doHandleDark( inEvent )
self.Evaluate_Conditions('DayNightChange')
return makeDeferred(StatusVal.OK)
elif inEvent.getType() == "http://id.webbrick.co.uk/events/time/second":
self.indexSolarRun(inEvent)
return makeDeferred(StatusVal.OK)
elif inEvent.getType() == "http://id.webbrick.co.uk/events/time/minute":
now = int(inEvent.getPayload()['minute'])
if (now%self._check_time == 0) and self._enable and not self._isDark:
self.Evaluate_Conditions('check')
else:
self.Evaluate_Conditions('minute')
return makeDeferred(StatusVal.OK)
elif self._pump_event and inEvent.getType() == self._pump_event['type'] and inEvent.getSource() == self._pump_event['source']:
self._pump_state = int(inEvent.getPayload()['state'])
self._log.debug('Changing Pump State to %s' % self._pump_state)
return makeDeferred(StatusVal.OK)
elif self._pipe_event and inEvent.getType() == self._pipe_event['type'] and inEvent.getSource() == self._pipe_event['source']:
self._pipe_temperature = float(inEvent.getPayload()['val'])
self._log.debug('Changing Pipe Temperature to %s' % self._pipe_temperature)
self.Evaluate_Conditions('pipe_temperature')
return makeDeferred(StatusVal.OK)
elif self._tank_event and inEvent.getType() == self._tank_event['type'] and inEvent.getSource() == self._tank_event['source']:
self._tank_temperature = float(inEvent.getPayload()['val'])
self._log.debug('Changing Tank Temperature to %s' % self._tank_temperature)
self.Evaluate_Conditions('tank_temperature')
return makeDeferred(StatusVal.OK)
else:
return super(SolarRun,self).doHandleEvent( handler, inEvent)
def Evaluate_Conditions(self, why):
#To get here we should be enabled, it should light and we are either at an evaluation time, or we have been running
self._log.debug('Here we go with evaluation, reason %s' % why)
self._log.debug('Tank_Temperature : %s' % self._tank_temperature)
self._log.debug('Pipe_Temperature : %s' % self._pipe_temperature)
self._log.debug('pump_state : %s' % self._pump_state)
self._log.debug('State : %s' % self._state)
self._log.debug('pump_check_run : %s' % self._pump_check_run)
self._log.debug('pump_check_run_time : %s' % self._pump_check_run_time)
# Lets do it
if self._tank_temperature > self._tank_limit:
self._log.debug('Set pump to stop OVERTEMPERATURE %s' % self._tank_temperature)
self.sendSolarRunEvent('stop','over_temperature')
self._state = 'over_temperature'
return # nothing more to do
if self._isDark:
# its dark stop the pump
self._log.debug('Set pump to stop because dark')
if (self._pump_state == 1):
self.sendSolarRunEvent('stop','darkness')
self._state = 'inDarkness'
return # nothing more to do if we are in darkness
if not self._enable:
# we are disabled, stop the pump do nothing more
self._log.debug('Set pump to stop because not enabled')
if (self._pump_state == 1):
self.sendSolarRunEvent('stop','not enabled')
self._state = 'Not Enabled'
return
if why == 'check':
if (self._pump_state == 0) and (self._pump_check_run == False):
self._log.debug('Set pump to checking')
self.sendSolarRunEvent('check_run','checking')
self._pump_check_run = True
self._state = 'checking'
self._pump_check_start = time.time()
if self._tank_temperature and self._pipe_temperature:
# only evaluate if you have both temperatures available
differential = self._pipe_temperature - self._tank_temperature
if self._pipe_temperature > (self._tank_temperature + self._threshold):
# pipe is hot pump should run as long as the tank is not over temperature
if self._tank_temperature < self._tank_limit:
if (self._pump_state == 0):
self._log.debug('Set pump to run %s - %s' % (self._pipe_temperature, self._tank_temperature))
self.sendSolarRunEvent('run', 'heat available %s' % differential)
self._state = 'run'
if self._pipe_temperature < (self._tank_temperature + self._threshold):
if self._pump_check_run:
if (time.time() - self._pump_check_start) < self._pump_check_run_time:
# Do nothing until the check run is complete
self._log.debug('Waiting for check_run to complete')
pass
else:
self._pump_check_run = False
self.sendSolarRunEvent('stop', 'heat not available at end of check run %s' % differential)
else:
# Not in pump | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecurityPolicyArgs', 'SecurityPolicy']
@pulumi.input_type
class SecurityPolicyArgs:
def __init__(__self__, *,
ciphers: pulumi.Input[Sequence[pulumi.Input[str]]],
security_policy_name: pulumi.Input[str],
tls_versions: pulumi.Input[Sequence[pulumi.Input[str]]],
dry_run: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a SecurityPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ciphers: The supported cipher suites, which are determined by the TLS protocol version.The specified cipher suites must be supported by at least one TLS protocol version that you select.
:param pulumi.Input[str] security_policy_name: The name of the resource. The name must be 2 to 128 characters in length and must start with a letter. It can contain digits, periods (.), underscores (_), and hyphens (-).
:param pulumi.Input[Sequence[pulumi.Input[str]]] tls_versions: The TLS protocol versions that are supported. Valid values: TLSv1.0, TLSv1.1, TLSv1.2 and TLSv1.3.
:param pulumi.Input[bool] dry_run: The dry run.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
"""
pulumi.set(__self__, "ciphers", ciphers)
pulumi.set(__self__, "security_policy_name", security_policy_name)
pulumi.set(__self__, "tls_versions", tls_versions)
if dry_run is not None:
pulumi.set(__self__, "dry_run", dry_run)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def ciphers(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The supported cipher suites, which are determined by the TLS protocol version.The specified cipher suites must be supported by at least one TLS protocol version that you select.
"""
return pulumi.get(self, "ciphers")
@ciphers.setter
def ciphers(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "ciphers", value)
@property
@pulumi.getter(name="securityPolicyName")
def security_policy_name(self) -> pulumi.Input[str]:
"""
The name of the resource. The name must be 2 to 128 characters in length and must start with a letter. It can contain digits, periods (.), underscores (_), and hyphens (-).
"""
return pulumi.get(self, "security_policy_name")
@security_policy_name.setter
def security_policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "security_policy_name", value)
@property
@pulumi.getter(name="tlsVersions")
def tls_versions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The TLS protocol versions that are supported. Valid values: TLSv1.0, TLSv1.1, TLSv1.2 and TLSv1.3.
"""
return pulumi.get(self, "tls_versions")
@tls_versions.setter
def tls_versions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "tls_versions", value)
@property
@pulumi.getter(name="dryRun")
def dry_run(self) -> Optional[pulumi.Input[bool]]:
"""
The dry run.
"""
return pulumi.get(self, "dry_run")
@dry_run.setter
def dry_run(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dry_run", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SecurityPolicyState:
def __init__(__self__, *,
ciphers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dry_run: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tls_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering SecurityPolicy resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ciphers: The supported cipher suites, which are determined by the TLS protocol version.The specified cipher suites must be supported by at least one TLS protocol version that you select.
:param pulumi.Input[bool] dry_run: The dry run.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[str] security_policy_name: The name of the resource. The name must be 2 to 128 characters in length and must start with a letter. It can contain digits, periods (.), underscores (_), and hyphens (-).
:param pulumi.Input[str] status: The status of the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tls_versions: The TLS protocol versions that are supported. Valid values: TLSv1.0, TLSv1.1, TLSv1.2 and TLSv1.3.
"""
if ciphers is not None:
pulumi.set(__self__, "ciphers", ciphers)
if dry_run is not None:
pulumi.set(__self__, "dry_run", dry_run)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if security_policy_name is not None:
pulumi.set(__self__, "security_policy_name", security_policy_name)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tls_versions is not None:
pulumi.set(__self__, "tls_versions", tls_versions)
@property
@pulumi.getter
def ciphers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The supported cipher suites, which are determined by the TLS protocol version.The specified cipher suites must be supported by at least one TLS protocol version that you select.
"""
return pulumi.get(self, "ciphers")
@ciphers.setter
def ciphers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ciphers", value)
@property
@pulumi.getter(name="dryRun")
def dry_run(self) -> Optional[pulumi.Input[bool]]:
"""
The dry run.
"""
return pulumi.get(self, "dry_run")
@dry_run.setter
def dry_run(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dry_run", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="securityPolicyName")
def security_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource. The name must be 2 to 128 characters in length and must start with a letter. It can contain digits, periods (.), underscores (_), and hyphens (-).
"""
return pulumi.get(self, "security_policy_name")
@security_policy_name.setter
def security_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_policy_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the resource.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tlsVersions")
def tls_versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The TLS protocol versions that are supported. Valid values: TLSv1.0, TLSv1.1, TLSv1.2 and TLSv1.3.
"""
return pulumi.get(self, "tls_versions")
@tls_versions.setter
def tls_versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tls_versions", value)
class SecurityPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ciphers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dry_run: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tls_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a ALB Security Policy resource.
For information about ALB Security Policy and how to use it, see [What is Security Policy](https://www.alibabacloud.com/help/doc-detail/213607.htm).
> **NOTE:** Available in v1.130.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "testAccSecurityPolicy"
default = alicloud.alb.SecurityPolicy("default",
security_policy_name=name,
tls_versions=["TLSv1.0"],
ciphers=[
"ECDHE-ECDSA-AES128-SHA",
"AES256-SHA",
])
```
## Import
ALB Security Policy can be imported using the id, e.g.
```sh
$ pulumi import alicloud:alb/securityPolicy:SecurityPolicy example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ciphers: The supported cipher suites, which are determined by the TLS protocol version.The specified cipher suites must be supported by at least one TLS protocol version that you select.
:param pulumi.Input[bool] dry_run: The dry run.
:param pulumi.Input[str] resource_group_id: The ID of the resource group.
:param pulumi.Input[str] security_policy_name: The name of the resource. The name must be 2 to 128 characters in length and must start with a letter. It can contain digits, periods (.), underscores (_), and hyphens (-).
:param pulumi.Input[Sequence[pulumi.Input[str]]] tls_versions: The TLS protocol versions that are supported. Valid values: TLSv1.0, TLSv1.1, TLSv1.2 and TLSv1.3.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a ALB Security Policy resource.
For information about ALB Security Policy and how to use it, see [What is Security Policy](https://www.alibabacloud.com/help/doc-detail/213607.htm).
> **NOTE:** Available in v1.130.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "testAccSecurityPolicy"
default = alicloud.alb.SecurityPolicy("default",
security_policy_name=name,
tls_versions=["TLSv1.0"],
ciphers=[
"ECDHE-ECDSA-AES128-SHA",
"AES256-SHA",
])
```
## Import
ALB Security Policy can be imported using the id, e.g.
```sh
$ pulumi import alicloud:alb/securityPolicy:SecurityPolicy example <id>
```
:param str resource_name: The name of the resource.
:param SecurityPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ciphers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dry_run: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tls_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version | |
<reponame>pcie-bench/pcie-model
## Copyright (C) 2018 <NAME>. All rights reserved.
## Copyright (C) 2015 Netronome Systems, Inc. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""General definitions for PCIe bandwidth calculations"""
# pylint: disable=invalid-name
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-few-public-methods
##
## General PCIe variables from the Spec
##
Vers = ['gen1', 'gen2', 'gen3', 'gen4', 'gen5']
Laness = ['x1', 'x2', 'x4', 'x8', 'x16', 'x32']
Laness_mul = [1, 2, 4, 8, 16, 32]
# Transactions per second
GTs = {'gen1' : 2.5,
'gen2' : 5.0,
'gen3' : 8.0,
'gen4' : 16.0,
'gen5' : 32.0}
# Either 8b/10b or 128b/130b symbol encoding
Gbs = {}
for ver in GTs.keys():
if GTs[ver] >= 8.0:
Gbs[ver] = (128.0/130.0) * GTs[ver]
else:
Gbs[ver] = (8.0/10.0) * GTs[ver]
# Raw bandwidth Gbs * Lanes
Raw = {}
for ver in Vers:
for lanes in Laness:
if not ver in Raw:
Raw[ver] = {}
Raw[ver][lanes] = Gbs[ver] * \
Laness_mul[Laness.index(lanes)]
# Maximum Payload Size
MPSs = [128, 256, 512, 1024, 2048, 4096]
# Maximum Read Request Sizes
MRRSs = [128, 256, 512, 1024, 2048, 4096]
# Read Completion Boundaries
RCBs = [64, 128, 256, 512]
# FC Update Rate,
# see PCIe Base Spec rev 5.0 Table 2-46, 2-47, and 2-48
FC_Size = 8 # 2 B Phys + 4 B DLLP + 2B DLLP CRC
FC_Guide = {
'gen1' : {
'x1' : {128: 237, 256: 416, 512: 559, 1024: 1071, 2048: 2095, 4096: 4143},
'x2' : {128: 128, 256: 217, 512: 289, 1024: 545, 2048: 1057, 4096: 2081},
'x4' : {128: 73, 256: 118, 512: 154, 1024: 282, 2048: 538, 4096: 1050},
'x8' : {128: 67, 256: 107, 512: 86, 1024: 150, 2048: 278, 4096: 534},
'x16' : {128: 48, 256: 72, 512: 86, 1024: 150, 2048: 278, 4096: 534},
'x32' : {128: 33, 256: 45, 512: 52, 1024: 84, 2048: 248, 4096: 276},
},
'gen2' : {
'x1' : {128: 288, 256: 467, 512: 610, 1024: 1122, 2048: 2146, 4096: 4194},
'x2' : {128: 179, 256: 268, 512: 340, 1024: 596, 2048: 1108, 4096: 2132},
'x4' : {128: 124, 256: 169, 512: 205, 1024: 333, 2048: 589, 4096: 1101},
'x8' : {128: 118, 256: 158, 512: 137, 1024: 201, 2048: 329, 4096: 585},
'x16' : {128: 99, 256: 123, 512: 137, 1024: 201, 2048: 329, 4096: 585},
'x32' : {128: 84, 256: 96, 512: 103, 1024: 135, 2048: 199, 4096: 327},
},
'gen3' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
'gen4' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
'gen5' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
}
# Ack Limit,
# see PCIe Base Spec rev 5.0 Table 3-7, 3-8, and 3-9
Ack_Size = 8 # 2 B Phys + 4 B DLLP + 2B DLLP CRC
Ack_Limits = {
'gen1' : {
'x1' : {128: 237, 256: 416, 512: 559, 1024: 1071, 2048: 2095, 4096: 4143},
'x2' : {128: 128, 256: 217, 512: 289, 1024: 545, 2048: 1057, 4096: 2081},
'x4' : {128: 73, 256: 118, 512: 154, 1024: 282, 2048: 538, 4096: 1050},
'x8' : {128: 67, 256: 107, 512: 86, 1024: 150, 2048: 278, 4096: 534},
'x16' : {128: 48, 256: 72, 512: 86, 1024: 150, 2048: 278, 4096: 534},
'x32' : {128: 33, 256: 45, 512: 52, 1024: 84, 2048: 148, 4096: 276},
},
'gen2' : {
'x1' : {128: 288, 256: 467, 512: 610, 1024: 1122, 2048: 2146, 4096: 4194},
'x2' : {128: 179, 256: 268, 512: 340, 1024: 596, 2048: 1108, 4096: 2132},
'x4' : {128: 124, 256: 169, 512: 205, 1024: 333, 2048: 589, 4096: 1101},
'x8' : {128: 118, 256: 158, 512: 137, 1024: 201, 2048: 329, 4096: 585},
'x16' : {128: 99, 256: 123, 512: 137, 1024: 201, 2048: 329, 4096: 585},
'x32' : {128: 84, 256: 96, 512: 103, 1024: 135, 2048: 199, 4096: 237},
},
'gen3' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
'gen4' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
'gen5' : {
'x1' : {128: 333, 256: 512, 512: 655, 1024: 1167, 2048: 2191, 4096: 4239},
'x2' : {128: 224, 256: 313, 512: 385, 1024: 641, 2048: 1153, 4096: 2177},
'x4' : {128: 169, 256: 214, 512: 250, 1024: 378, 2048: 634, 4096: 1146},
'x8' : {128: 163, 256: 203, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x16' : {128: 144, 256: 168, 512: 182, 1024: 246, 2048: 374, 4096: 630},
'x32' : {128: 129, 256: 141, 512: 148, 1024: 180, 2048: 244, 4096: 372},
},
}
# SKIP ordered sets for clock compensation (inserted on all lanes)
SKIP_Interval = 1538
SKIP_Length = 4
# DLLP header (6 bytes) plus start and end symbol at Phys layer
DLLP_Hdr = 8
# Maximum Bandwidth usable at TLP layer. This takes into account the
# recommended rates for ACKs and FC updates as per spec as well as the SKIP
# ordered sets for clock compensation. The Bandwidth can be further reduced
# due to bit errors or different chipset configurations
TLP_bw = {}
for ver in Vers:
for lanes in Laness:
for mps in MPSs:
if not ver in TLP_bw:
TLP_bw[ver] = {}
if not lanes in TLP_bw[ver]:
TLP_bw[ver][lanes] = {}
ack_overhead | |
<reponame>mingwayzhang/tvm
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
from __future__ import absolute_import
import warnings
import topi
from . import _quantize
from .quantize import QAnnotateKind, current_qconfig
from .quantize import annotate_context
from .. import expr as _expr
from .. import op as _op
from ..op import op as _reg
from ..base import register_relay_node
from ..._ffi.function import register_func
@_reg.register_compute("relay.op.annotation.simulated_quantize")
def simulated_quantize_compute(attrs, inputs, out_type, target):
"""Compiler for simulated_quantize."""
assert len(inputs) == 4
assert attrs.sign
assert attrs.rounding == "round"
data, scale, clip_min, clip_max = inputs
if attrs.kind == QAnnotateKind.IDENTITY:
return [topi.identity(data)]
# simulate rounding error
scaled_data = topi.divide(data, scale)
clipped_data = topi.maximum(topi.minimum(scaled_data, clip_max), clip_min)
round_data = topi.round(clipped_data)
# recover data
rdata = topi.multiply(round_data, scale)
return [rdata]
_reg.register_schedule("relay.op.annotation.simulated_quantize",
_reg.schedule_injective)
_reg.register_pattern("relay.op.annotation.simulated_quantize",
_reg.OpPattern.ELEMWISE)
@register_relay_node
class QAnnotateExpr(_expr.TempExpr):
"""A special kind of Expr for Annotating.
Parameters
---------
expr: Expr
the original relay ir expr.
kind: QAnnotateKind
the kind of annotation field.
"""
def __init__(self, expr, kind):
self.__init_handle_by_constructor__(
_quantize.make_annotate_expr, expr, kind)
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(
ref_call.op, args, ref_call.attrs, ref_call.type_args)
def _get_expr_kind(anno):
"""Get the expression and QAnnotateKind from QAnnotateExpr or Expr"""
if isinstance(anno, QAnnotateExpr):
return anno.expr, anno.kind
return anno, None
def register_annotate_function(op_name, frewrite=None, level=10):
"""register a rewrite function for operator, used by annotation.
Parameters
---------
op_name: str
The name of operation
frewrite : function, optional
The function to be registered.
level : int, optional
The priority level
"""
def default_rewrite(ref_call, new_args, ctx):
# recover from QAnnotateExpr
args = [_get_expr_kind(x)[0] for x in new_args]
return _forward_op(ref_call, args)
def _register(func):
"""internal register function"""
def frewrite_with_guard(ref_call, new_args, ctx):
if not current_qconfig().guard(ref_call):
return default_rewrite(ref_call, new_args, ctx)
return func(ref_call, new_args, ctx)
_op.op._Register(op_name, "FQAnnotateRewrite", frewrite_with_guard, level)
return frewrite_with_guard
return _register(frewrite) if frewrite is not None else _register
def attach_simulated_quantize(data, kind, sign=True, rounding="round"):
"""Attach a simulated quantize operation after input data expr.
Parameters
---------
data: Expr
the original data expr.
kind: QAnnotateKind
the kind of annotation field.
"""
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
if isinstance(data, _expr.Call) and data.op == quantize_op:
if data.attrs.kind == kind and data.attrs.sign == sign and data.attrs.rounding == rounding:
return data
actx = annotate_context()
key = tuple([data, kind, sign, rounding])
if key in actx.qnode_map:
return actx.qnode_map[key]
dom_scale = _expr.var("dom_scale")
clip_min = _expr.var("clip_min")
clip_max = _expr.var("clip_max")
qnode = _quantize.simulated_quantize(
data, dom_scale, clip_min, clip_max, kind, sign, rounding)
actx.qnode_map[key] = qnode
return qnode
register_func("relay.quantize.attach_simulated_quantize", attach_simulated_quantize)
@register_annotate_function("nn.contrib_conv2d_NCHWc")
def conv2d_nchwc_rewrite(ref_call, new_args, ctx):
warnings.warn("NCHWc layout Conv2D detected, please use a lower "
"optimization level before applying the quantization "
"pass as quantization will have no effect here...")
@register_annotate_function("nn.conv2d")
def conv2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv2d. Lhs of conv will be quantized to
input field, and rhs of conv will be quantized to weight field.
Output would be in activation field"""
actx = annotate_context()
if current_qconfig().skip_conv_layers is not None:
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if actx.conv2d_counter() in skipped_indices:
actx.count_conv2d()
return None
actx.count_conv2d()
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
def check_to_skip():
"""Check the index of conv2d layer to decide whether to skip the current operator."""
if current_qconfig().skip_conv_layers is not None:
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if annotate_context().conv2d_counter() - 1 in skipped_indices:
return True
return False
# TODO(tmoreau89,ziheng) need to include an option to turn off dense quant
# @register_annotate_function("nn.dense")
def dense_rewrite(ref_call, new_args, ctx):
"""Rewrite function for dense. Lhs of dense will be quantized to input field, and rhs of
dense will be quantized to weight field. Output would be in activation field."""
if check_to_skip():
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("multiply")
def multiply_rewrite(ref_call, new_args, ctx):
"""Rewrite function for multiply."""
if check_to_skip():
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
return None
if lhs_kind in [QAnnotateKind.ACTIVATION, QAnnotateKind.INPUT] and rhs_kind is None:
# quantize lhs to INPUT field
if lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
# quantize rhs to WEIGHT field
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError
@register_annotate_function("add")
def add_rewrite(ref_call, new_args, ctx):
"""Rewrite function for add."""
if check_to_skip():
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
return None
if lhs_kind is None and rhs_kind is not None:
# quantize lhs to INPUT field if it is normal expression
assert rhs_kind in [QAnnotateKind.INPUT, QAnnotateKind.ACTIVATION]
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind is not None and rhs_kind is None:
if isinstance(rhs_expr, _expr.Constant):
# quantize rhs to WEIGHT field if it is Constant
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
# quantize rhs to INPUT field if it is not Constant
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if lhs_kind is not None and rhs_kind is not None:
if lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.INPUT:
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.ACTIVATION:
# quantize rhs to INPUT field if both lhs and rhs are ACTIVATION
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if (lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.INPUT) or \
(lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.ACTIVATION):
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError()
@register_annotate_function("stop_fusion")
def stop_fusion_rewrite(ref_call, new_args, ctx):
"""Rewrite function for add."""
if check_to_skip():
return None
x_expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
ret_expr = attach_simulated_quantize(x_expr, QAnnotateKind.INPUT)
ret_expr = _forward_op(ref_call, [ret_expr])
return QAnnotateExpr(ret_expr, QAnnotateKind.INPUT)
def identity_rewrite(ref_call, new_args, ctx):
"""Simply forward the original operation"""
if check_to_skip():
return None
x_expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
ret_expr = _forward_op(ref_call, [x_expr])
return QAnnotateExpr(ret_expr, x_kind)
register_annotate_function("clip", identity_rewrite)
register_annotate_function("nn.relu", identity_rewrite)
register_annotate_function("strided_slice", identity_rewrite)
register_annotate_function("nn.avg_pool2d", identity_rewrite)
register_annotate_function("annotation.stop_fusion", identity_rewrite)
def pool2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for max pool2d"""
if check_to_skip():
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
register_annotate_function("nn.max_pool2d", pool2d_rewrite)
@register_annotate_function("annotation.force_cast")
def force_cast_rewrite(ref_call, new_args, ctx):
"""Rewrite function to force cast"""
if check_to_skip():
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return new_args[0]
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
@register_annotate_function("concatenate")
def concatenate_rewrite(ref_call, new_args, ctx):
"""Rewrite function for concatenate"""
if check_to_skip():
return None
input_tuple = new_args[0]
expr_list = [_get_expr_kind(x)[0] for x in input_tuple]
kind_list = [_get_expr_kind(x)[1] for x in input_tuple]
# make sure the inputs of concatenate are all normal
# expression or annotate expression
if all([k is None for k in kind_list]):
return None
for i, k in enumerate(kind_list):
if k is None:
expr_list[i] = attach_simulated_quantize(expr_list[i], QAnnotateKind.ACTIVATION)
expr = _forward_op(ref_call, [_expr.Tuple(expr_list)])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
# Graph rewrite function registration for VTA target
def register_vta_rewrite(op_name, frewrite=None, level=10):
def _register(func):
return _op.op._Register(op_name, "FQVTARewrite", func, level)
return _register(frewrite) if frewrite is not None else _register
@register_relay_node
class QVTAExpr(_expr.TempExpr):
def __init__(self, expr):
self.__init_handle_by_constructor__(
_quantize.make_vta_expr, expr)
def realize(self):
return _quantize.temp_expr_realize(self)
def vta_expr_check(expr):
if isinstance(expr, QVTAExpr):
return True, expr.expr
return False, expr
@register_vta_rewrite("nn.conv2d")
def conv2d_vta_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv2d for VTA target"""
actx = annotate_context()
if current_qconfig().skip_conv_layers is not None:
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if actx.conv2d_counter() in skipped_indices:
actx.count_conv2d()
return None
actx.count_conv2d()
data_cond, data = vta_expr_check(new_args[0])
kernel_cond, kernel = vta_expr_check(new_args[1])
assert not | |
length is equal to the
number of frequency channels, it will be assumed
identical for all interferometers. If a 2D array
is provided, it should be of size
n_baselines x nchan. Tsys = Tnet
vis_freq [numpy array] The simulated complex visibility (in Jy or K)
observed by each of the interferometers along frequency axis for
each timestamp of observation per frequency channel. It is the
sum of skyvis_freq and vis_noise_freq. It can be either directly
initialized or simulated in observe(). Same dimensions as
skyvis_freq.
vis_lag [numpy array] The simulated complex visibility (in Jy Hz or K Hz)
along delay axis for each interferometer obtained by FFT of
vis_freq along frequency axis. Same size as vis_noise_lag and
skyis_lag. It is evaluated in member function delay_transform().
vis_noise_freq
[numpy array] Complex visibility noise (in Jy or K) generated
using an rms of vis_rms_freq along frequency axis for each
interferometer which is then added to the generated sky
visibility. Same dimensions as skyvis_freq. Used in the member
function observe(). Read its docstring for more details.
vis_noise_lag
[numpy array] Complex visibility noise (in Jy Hz or K Hz) along
delay axis for each interferometer generated using an FFT of
vis_noise_freq along frequency axis. Same size as vis_noise_freq.
Created in the member function delay_transform(). Read its
docstring for more details.
vis_rms_freq
[list of float] Theoretically estimated thermal noise rms (in Jy
or K) in visibility measurements. Same size as vis_freq. This
will be estimated and used to inject simulated noise when a call
to member function observe() is made. Read the docstring of
observe() for more details. The noise rms is estimated from the
instrument parameters as:
(2 k T_sys / (A_eff x sqrt(2 x channel_width x t_acc))) / Jy, or
T_sys / sqrt(2 x channel_width x t_acc)
simparms_file
[string] Full path to filename containing simulation parameters
in YAML format
Member functions:
__init__() Initializes an instance of class InterferometerArray
observe() Simulates an observing run with the interferometer
specifications and an external sky catalog thus producing
visibilities. The simulation generates visibilities
observed by the interferometer for the specified
parameters.
observing_run() Simulate an extended observing run in 'track' or 'drift'
mode, by an instance of the InterferometerArray class, of
the sky when a sky catalog is provided. The simulation
generates visibilities observed by the interferometer
array for the specified parameters. Uses member function
observe() and builds the observation from snapshots. The
timestamp for each snapshot is the current time at which
the snapshot is generated.
generate_noise() Generates thermal noise from attributes that describe
system parameters which can be added to sky visibilities
add_noise() Adds the thermal noise generated in member function
generate_noise() to the sky visibilities after
extracting and applying complex instrument gains
apply_gradients() Apply the perturbations in combination with the
gradients to determine perturbed visibilities
duplicate_measurements()
Duplicate visibilities based on redundant baselines
specified. This saves time when compared to simulating
visibilities over redundant baselines. Thus, it is more
efficient to simulate unique baselines and duplicate
measurements for redundant baselines
getBaselineGroupKeys()
Find redundant baseline group keys of groups that
contain the input baseline labels
getBaselinesInGroups()
Find all redundant baseline labels in groups that
contain the given input baseline labels
getThreePointCombinations()
Return all or class Inonly unique 3-point combinations of
baselines
getClosurePhase() Get closure phases of visibilities from triplets of
antennas
rotate_visibilities()
Centers the phase of visibilities around any given phase
center. Project baseline vectors with respect to a
reference point on the sky. Essentially a wrapper to
member functions phase_centering() and
project_baselines()
phase_centering() Centers the phase of visibilities around any given phase
center.
project_baselines() Project baseline vectors with respect to a reference
point on the sky. Assigns the projected baselines to the
attribute projected_baselines
conjugate() Flips the baseline vectors and conjugates the visibilies
for a specified subset of baselines.
delay_transform() Transforms the visibilities from frequency axis onto
delay (time) axis using an IFFT. This is performed for
noiseless sky visibilities, thermal noise in visibilities,
and observed visibilities.
concatenate() Concatenates different visibility data sets from instances
of class InterferometerArray along baseline, frequency or
time axis.
save() Saves the interferometer array information to disk in
HDF5, FITS, NPZ and UVFITS formats
pyuvdata_write() Saves the interferometer array information to disk in
various formats through pyuvdata module
----------------------------------------------------------------------------
"""
def __init__(self, labels, baselines, channels, telescope=None, eff_Q=0.89,
latitude=34.0790, longitude=0.0, altitude=0.0,
skycoords='radec', A_eff=NP.pi*(25.0/2)**2,
pointing_coords='hadec', layout=None, blgroupinfo=None,
baseline_coords='localenu', freq_scale=None, gaininfo=None,
init_file=None, simparms_file=None):
"""
------------------------------------------------------------------------
Intialize the InterferometerArray class which manages information on a
multi-element interferometer.
Class attributes initialized are:
labels, baselines, channels, telescope, latitude, longitude, altitude,
skycoords, eff_Q, A_eff, pointing_coords, baseline_coords,
baseline_lengths, channels, bp, bp_wts, freq_resolution, lags, lst,
obs_catalog_indices, pointing_center, skyvis_freq, skyvis_lag,
timestamp, t_acc, Tsys, Tsysinfo, vis_freq, vis_lag, t_obs, n_acc,
vis_noise_freq, vis_noise_lag, vis_rms_freq, geometric_delays,
projected_baselines, simparms_file, layout, gradient, gradient_mode,
gaininfo, blgroups, bl_reversemap
Read docstring of class InterferometerArray for details on these
attributes.
Keyword input(s):
init_file [string] Location of the initialization file from which an
instance of class InterferometerArray will be created.
File format must be compatible with the one saved to disk
by member function save().
simparms_file
[string] Location of the simulation parameters in YAML
format that went into making the simulated data product
Other input parameters have their usual meanings. Read the docstring of
class InterferometerArray for details on these inputs.
------------------------------------------------------------------------
"""
argument_init = False
init_file_success = False
if init_file is not None:
try:
with h5py.File(init_file+'.hdf5', 'r') as fileobj:
self.simparms_file = None
self.latitude = 0.0
self.longitude = 0.0
self.altitude = 0.0
self.skycoords = 'radec'
self.flux_unit = 'JY'
self.telescope = {}
self.telescope['shape'] = 'delta'
self.telescope['size'] = 1.0
self.telescope['groundplane'] = None
self.Tsysinfo = []
self.layout = {}
self.blgroups = None
self.bl_reversemap = None
self.lags = None
self.vis_lag = None
self.skyvis_lag = None
self.vis_noise_lag = None
self.gradient_mode = None
self.gradient = {}
self.gaininfo = None
for key in ['header', 'telescope_parms', 'spectral_info', 'simparms', 'antenna_element', 'timing', 'skyparms', 'array', 'layout', 'instrument', 'visibilities', 'gradients', 'gaininfo', 'blgroupinfo']:
try:
grp = fileobj[key]
except KeyError:
if key in ['gradients', 'gaininfo']:
pass
elif key not in ['simparms', 'blgroupinfo']:
raise KeyError('Key {0} not found in init_file'.format(key))
if key == 'header':
self.flux_unit = grp['flux_unit'].value
if key == 'telescope_parms':
if 'latitude' in grp:
self.latitude = grp['latitude'].value
if 'longitude' in grp:
self.longitude = grp['longitude'].value
if 'altitude' in grp:
self.altitude = grp['altitude'].value
if 'id' in grp:
self.telescope['id'] = grp['id'].value
if key == 'layout':
if 'positions' in grp:
self.layout['positions'] = grp['positions'].value
else:
raise KeyError('Antenna layout positions is missing')
try:
self.layout['coords'] = grp['positions'].attrs['coords']
except KeyError:
raise KeyError('Antenna layout position coordinate system is missing')
if 'labels' in grp:
self.layout['labels'] = grp['labels'].value
else:
raise KeyError('Layout antenna labels is missing')
if 'ids' in grp:
self.layout['ids'] = grp['ids'].value
else:
raise KeyError('Layout antenna ids is missing')
if key == 'antenna_element':
if 'shape' in grp:
self.telescope['shape'] = grp['shape'].value
if 'size' in grp:
self.telescope['size'] = grp['size'].value
if 'ocoords' in grp:
self.telescope['ocoords'] = grp['ocoords'].value
else:
raise KeyError('Keyword "ocoords" not found in init_file')
if 'orientation' in grp:
self.telescope['orientation'] = grp['orientation'].value.reshape(1,-1)
else:
raise KeyError('Key "orientation" not found in init_file')
if 'groundplane' in grp:
self.telescope['groundplane'] = grp['groundplane'].value
if key == 'simparms':
if 'simfile' in grp:
self.simparms_file = grp['simfile'].value
if key == 'spectral_info':
self.freq_resolution = grp['freq_resolution'].value
self.channels = grp['freqs'].value
if 'lags' in grp:
self.lags = grp['lags'].value
if 'bp' in grp:
self.bp = grp['bp'].value
else:
raise KeyError('Key "bp" not found in init_file')
if 'bp_wts' in grp:
self.bp_wts = grp['bp_wts'].value
else:
self.bp_wts = NP.ones_like(self.bp)
self.bp_wts = grp['bp_wts'].value
if key == 'skyparms':
if 'pointing_coords' in grp:
self.pointing_coords = grp['pointing_coords'].value
if 'phase_center_coords' in grp:
self.phase_center_coords = grp['phase_center_coords'].value
if 'skycoords' in grp:
self.skycoords = grp['skycoords'].value
self.lst = grp['LST'].value
self.pointing_center = grp['pointing_center'].value
self.phase_center = grp['phase_center'].value
if key == 'timing':
if 'timestamps' in grp:
self.timestamp = grp['timestamps'].value.tolist()
else:
raise KeyError('Key "timestamps" not found in init_file')
if 't_acc' in grp:
self.t_acc = grp['t_acc'].value.tolist()
self.t_obs = grp['t_obs'].value
self.n_acc = grp['n_acc'].value
else:
raise KeyError('Key "t_acc" not found in init_file')
if key == 'instrument':
if | |
only the aspects affected by the change in the snapshot."""
return self._inner_dict.get('proposedSnapshot') # type: ignore
@proposedSnapshot.setter
def proposedSnapshot(self, value: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the proposed metadata change. Include only the aspects affected by the change in the snapshot."""
self._inner_dict['proposedSnapshot'] = value
@property
def proposedDelta(self) -> None:
"""Getter: Delta of the proposed metadata partial update."""
return self._inner_dict.get('proposedDelta') # type: ignore
@proposedDelta.setter
def proposedDelta(self, value: None) -> None:
"""Setter: Delta of the proposed metadata partial update."""
self._inner_dict['proposedDelta'] = value
class ArrayTypeClass(DictWrapper):
"""Array field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.ArrayType")
def __init__(self,
nestedType: Union[None, List[str]]=None,
):
super().__init__()
self.nestedType = nestedType
@classmethod
def construct_with_defaults(cls) -> "ArrayTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.nestedType = self.RECORD_SCHEMA.field_map["nestedType"].default
@property
def nestedType(self) -> Union[None, List[str]]:
"""Getter: List of types this array holds."""
return self._inner_dict.get('nestedType') # type: ignore
@nestedType.setter
def nestedType(self, value: Union[None, List[str]]) -> None:
"""Setter: List of types this array holds."""
self._inner_dict['nestedType'] = value
class BinaryJsonSchemaClass(DictWrapper):
"""Schema text of binary JSON schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BinaryJsonSchema")
def __init__(self,
schema: str,
):
super().__init__()
self.schema = schema
@classmethod
def construct_with_defaults(cls) -> "BinaryJsonSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.schema = str()
@property
def schema(self) -> str:
"""Getter: The native schema text for binary JSON file format."""
return self._inner_dict.get('schema') # type: ignore
@schema.setter
def schema(self, value: str) -> None:
"""Setter: The native schema text for binary JSON file format."""
self._inner_dict['schema'] = value
class BooleanTypeClass(DictWrapper):
"""Boolean field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BooleanType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "BooleanTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class BytesTypeClass(DictWrapper):
"""Bytes field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BytesType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "BytesTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class DatasetFieldForeignKeyClass(DictWrapper):
"""For non-urn based foregin keys."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.DatasetFieldForeignKey")
def __init__(self,
parentDataset: str,
currentFieldPaths: List[str],
parentField: str,
):
super().__init__()
self.parentDataset = parentDataset
self.currentFieldPaths = currentFieldPaths
self.parentField = parentField
@classmethod
def construct_with_defaults(cls) -> "DatasetFieldForeignKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.parentDataset = str()
self.currentFieldPaths = list()
self.parentField = str()
@property
def parentDataset(self) -> str:
"""Getter: dataset that stores the resource."""
return self._inner_dict.get('parentDataset') # type: ignore
@parentDataset.setter
def parentDataset(self, value: str) -> None:
"""Setter: dataset that stores the resource."""
self._inner_dict['parentDataset'] = value
@property
def currentFieldPaths(self) -> List[str]:
"""Getter: List of fields in hosting(current) SchemaMetadata that conform a foreign key. List can contain a single entry or multiple entries if several entries in hosting schema conform a foreign key in a single parent dataset."""
return self._inner_dict.get('currentFieldPaths') # type: ignore
@currentFieldPaths.setter
def currentFieldPaths(self, value: List[str]) -> None:
"""Setter: List of fields in hosting(current) SchemaMetadata that conform a foreign key. List can contain a single entry or multiple entries if several entries in hosting schema conform a foreign key in a single parent dataset."""
self._inner_dict['currentFieldPaths'] = value
@property
def parentField(self) -> str:
"""Getter: SchemaField@fieldPath that uniquely identify field in parent dataset that this field references."""
return self._inner_dict.get('parentField') # type: ignore
@parentField.setter
def parentField(self, value: str) -> None:
"""Setter: SchemaField@fieldPath that uniquely identify field in parent dataset that this field references."""
self._inner_dict['parentField'] = value
class DateTypeClass(DictWrapper):
"""Date field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.DateType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "DateTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class EditableSchemaFieldInfoClass(DictWrapper):
"""SchemaField to describe metadata related to dataset schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EditableSchemaFieldInfo")
def __init__(self,
fieldPath: str,
description: Union[None, str]=None,
globalTags: Union[None, "GlobalTagsClass"]=None,
):
super().__init__()
self.fieldPath = fieldPath
self.description = description
self.globalTags = globalTags
@classmethod
def construct_with_defaults(cls) -> "EditableSchemaFieldInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.fieldPath = str()
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.globalTags = self.RECORD_SCHEMA.field_map["globalTags"].default
@property
def fieldPath(self) -> str:
"""Getter: FieldPath uniquely identifying the SchemaField this metadata is associated with"""
return self._inner_dict.get('fieldPath') # type: ignore
@fieldPath.setter
def fieldPath(self, value: str) -> None:
"""Setter: FieldPath uniquely identifying the SchemaField this metadata is associated with"""
self._inner_dict['fieldPath'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Description"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Description"""
self._inner_dict['description'] = value
@property
def globalTags(self) -> Union[None, "GlobalTagsClass"]:
"""Getter: Tags associated with the field"""
return self._inner_dict.get('globalTags') # type: ignore
@globalTags.setter
def globalTags(self, value: Union[None, "GlobalTagsClass"]) -> None:
"""Setter: Tags associated with the field"""
self._inner_dict['globalTags'] = value
class EditableSchemaMetadataClass(DictWrapper):
"""EditableSchemaMetadata stores editable changes made to schema metadata. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EditableSchemaMetadata")
def __init__(self,
editableSchemaFieldInfo: List["EditableSchemaFieldInfoClass"],
created: Optional["AuditStampClass"]=None,
lastModified: Optional["AuditStampClass"]=None,
deleted: Union[None, "AuditStampClass"]=None,
):
super().__init__()
if created is None:
# default: {'actor': 'urn:li:corpuser:unknown', 'impersonator': None, 'time': 0}
self.created = _json_converter.from_json_object(self.RECORD_SCHEMA.field_map["created"].default, writers_schema=self.RECORD_SCHEMA.field_map["created"].type)
else:
self.created = created
if lastModified is None:
# default: {'actor': 'urn:li:corpuser:unknown', 'impersonator': None, 'time': 0}
self.lastModified = _json_converter.from_json_object(self.RECORD_SCHEMA.field_map["lastModified"].default, writers_schema=self.RECORD_SCHEMA.field_map["lastModified"].type)
else:
self.lastModified = lastModified
self.deleted = deleted
self.editableSchemaFieldInfo = editableSchemaFieldInfo
@classmethod
def construct_with_defaults(cls) -> "EditableSchemaMetadataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = _json_converter.from_json_object(self.RECORD_SCHEMA.field_map["created"].default, writers_schema=self.RECORD_SCHEMA.field_map["created"].type)
self.lastModified = _json_converter.from_json_object(self.RECORD_SCHEMA.field_map["lastModified"].default, writers_schema=self.RECORD_SCHEMA.field_map["lastModified"].type)
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.editableSchemaFieldInfo = list()
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def editableSchemaFieldInfo(self) -> List["EditableSchemaFieldInfoClass"]:
"""Getter: Client provided a list of fields from document schema."""
return self._inner_dict.get('editableSchemaFieldInfo') # type: ignore
@editableSchemaFieldInfo.setter
def editableSchemaFieldInfo(self, value: List["EditableSchemaFieldInfoClass"]) -> None:
"""Setter: Client provided a list of fields from document schema."""
self._inner_dict['editableSchemaFieldInfo'] = value
class EnumTypeClass(DictWrapper):
"""Enum field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EnumType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "EnumTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class EspressoSchemaClass(DictWrapper):
"""Schema text of an espresso table schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EspressoSchema")
def __init__(self,
documentSchema: str,
tableSchema: str,
):
super().__init__()
self.documentSchema = documentSchema
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "EspressoSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> | |
"s004" # 0x050C cia402sts[0-5]
S005 = "s005" # 0x050E sts[0-5]
S006 = "s006" # 0x0510 rtn[0-5]
S007 = "s007" # 0x0512 cia402err[0-5]
S008 = "s008" # 0x0514 alarm[0-5]
S009 = "s009" # 0x0518 targetplsfb[0-5]
S010 = "s010" # 0x051C cia402actualpls[0-5]
S011 = "s011" # 0x0520 cia402followingerr[0-5]
S012 = "s012" # 0x0524 observer_output_value[0-5]
S013 = "s013" # 0x0528 torque[0-5]
S014 = "s014" # 0x052A thermal[0-5]
S015 = "s015" # 0x052C disturbance[0-5]
S016 = "s016" # 0x052E gainrate[0-5]
S017 = "s017" # 0x0530 polerate[0-5]
S018 = "s018" # 0x0532 filtered_torque[0-5]
S019 = "s019" # 0x0534 filtered_velocity[0-5]
S020 = "s020" # 0x0536 filtered_D[0-5]
S020 = "s020" # 0x0538 filtered_Q[0-5]
# Force torque sensor information
F000 = "f000" # 0x0700 sts
F001 = "f001" # 0x0701 gain_sts
F100 = "f100" # 0x0710 zero_point[0-7]
F200 = "f200" # 0x0720 raw_value[0-7]
F300 = "f300" # 0x0730 gain[0-7]
# System management block information
Y000 = "y000" # 0x0800 robtask_name[0-31]
Y001 = "y001" # 0x0820 running_name[0-31]
Y002 = "y002" # 0x0840 running_pid
Y003 = "y003" # 0x0844 assign_port[0]
Y004 = "y004" # 0x0846 assign_port[1]
Y005 = "y005" # 0x0848 assign_port[2]
Y006 = "y006" # 0x084A assign_port[3]
Y007 = "y007" # 0x084C assign_port[4]
Y008 = "y008" # 0x085E assign_port[5]
Y009 = "y009" # 0x0850 assign_port[6]
Y010 = "y010" # 0x0852 assign_port[7]
Y011 = "y011" # 0x0854 assign_port[8]
Y012 = "y012" # 0x0856 assign_port[9]
Y013 = "y013" # 0x0858 assign_port[10]
Y014 = "y014" # 0x085A assign_port[11]
# User block information
U000 = "u000" # 0x1800 intval[0]
U001 = "u001" # 0x1804 intval[1]
U002 = "u002" # 0x1808 intval[2]
U003 = "u003" # 0x180C intval[3]
U004 = "u004" # 0x1810 intval[4]
U005 = "u005" # 0x1814 intval[5]
U006 = "u006" # 0x1818 intval[6]
U007 = "u007" # 0x181C intval[7]
U008 = "u008" # 0x1820 intval[8]
U009 = "u009" # 0x1824 intval[9]
U010 = "u010" # 0x1828 intval[10]
U011 = "u011" # 0x182C intval[11]
U012 = "u012" # 0x1830 intval[12]
U013 = "u013" # 0x1834 intval[13]
U014 = "u014" # 0x1838 intval[14]
U015 = "u015" # 0x183C intval[15]
U016 = "u016" # 0x1840 intval[16]
U017 = "u017" # 0x1844 intval[17]
U018 = "u018" # 0x1848 intval[18]
U019 = "u019" # 0x184C intval[19]
U020 = "u020" # 0x1850 intval[20]
U021 = "u021" # 0x1854 intval[21]
U022 = "u022" # 0x1858 intval[22]
U023 = "u023" # 0x185C intval[23]
U024 = "u024" # 0x1860 intval[24]
U025 = "u025" # 0x1864 intval[25]
U026 = "u026" # 0x1868 intval[26]
U027 = "u027" # 0x186C intval[27]
U028 = "u028" # 0x1870 intval[28]
U029 = "u029" # 0x1874 intval[29]
U030 = "u030" # 0x1878 intval[30]
U031 = "u031" # 0x187C intval[31]
U032 = "u032" # 0x1880 intval[32]
U033 = "u033" # 0x1884 intval[33]
U034 = "u034" # 0x1888 intval[34]
U035 = "u035" # 0x188C intval[35]
U036 = "u036" # 0x1890 intval[36]
U037 = "u037" # 0x1894 intval[37]
U038 = "u038" # 0x1898 intval[38]
U039 = "u039" # 0x189C intval[39]
U040 = "u040" # 0x18A0 intval[40]
U041 = "u041" # 0x18A4 intval[41]
U042 = "u042" # 0x18A8 intval[42]
U043 = "u043" # 0x18AC intval[43]
U044 = "u044" # 0x18B0 intval[44]
U045 = "u045" # 0x18B4 intval[45]
U046 = "u046" # 0x18B8 intval[46]
U047 = "u047" # 0x18BC intval[47]
U048 = "u048" # 0x18C0 intval[48]
U049 = "u049" # 0x18C4 intval[49]
U050 = "u050" # 0x18C8 intval[50]
U051 = "u051" # 0x18CC intval[51]
U052 = "u052" # 0x18D0 intval[52]
U053 = "u053" # 0x18D4 intval[53]
U054 = "u054" # 0x18D8 intval[54]
U055 = "u055" # 0x18DC intval[55]
U056 = "u056" # 0x18E0 intval[56]
U057 = "u057" # 0x18E4 intval[57]
U058 = "u058" # 0x18E8 intval[58]
U059 = "u059" # 0x18EC intval[59]
U060 = "u060" # 0x18F0 intval[60]
U061 = "u061" # 0x18F4 intval[61]
U062 = "u062" # 0x18F8 intval[62]
U063 = "u063" # 0x18FC intval[63]
U064 = "u064" # 0x1900 intval[64]
U065 = "u065" # 0x1904 intval[65]
U066 = "u066" # 0x1908 intval[66]
U067 = "u067" # 0x190C intval[67]
U068 = "u068" # 0x1910 intval[68]
U069 = "u069" # 0x1914 intval[69]
U070 = "u070" # 0x1918 intval[70]
U071 = "u071" # 0x191C intval[71]
U072 = "u072" # 0x1920 intval[72]
U073 = "u073" # 0x1924 intval[73]
U074 = "u074" # 0x1928 intval[74]
U075 = "u075" # 0x192C intval[75]
U076 = "u076" # 0x1930 intval[76]
U077 = "u077" # 0x1934 intval[77]
U078 = "u078" # 0x1938 intval[78]
U079 = "u079" # 0x193C intval[79]
U080 = "u080" # 0x1940 intval[80]
U081 = "u081" # 0x1944 intval[81]
U082 = "u082" # 0x1948 intval[82]
U083 = "u083" # 0x194C intval[83]
U084 = "u084" # 0x1950 intval[84]
U085 = "u085" # 0x1954 intval[85]
U086 = "u086" # 0x1958 intval[86]
U087 = "u087" # 0x195C intval[87]
U088 = "u088" # 0x1960 intval[88]
U089 = "u089" # 0x1964 intval[89]
U090 = "u090" # 0x1968 intval[90]
U091 = "u091" # 0x196C intval[91]
U092 = "u092" # 0x1970 intval[92]
U093 = "u093" # 0x1974 intval[93]
U094 = "u094" # 0x1978 intval[94]
U095 = "u095" # 0x197C intval[95]
U096 = "u096" # 0x1980 intval[96]
U097 = "u097" # 0x1984 intval[97]
U098 = "u098" # 0x1988 intval[98]
U099 = "u099" # 0x198C intval[99]
U100 = "u100" # 0x1990 intval[100]
U101 = "u101" # 0x1994 intval[101]
U102 = "u102" # 0x1998 intval[102]
U103 = "u103" # 0x199C intval[103]
U104 = "u104" # 0x19A0 intval[104]
U105 = "u105" # 0x19A4 intval[105]
U106 = "u106" # 0x19A8 intval[106]
U107 = "u107" # 0x19AC intval[107]
U108 = "u108" # 0x19B0 intval[108]
U109 = "u109" # 0x19B4 intval[109]
U110 = "u110" # 0x19B8 intval[110]
U111 = "u111" # 0x19BC intval[111]
U112 = "u112" # 0x19C0 intval[112]
U113 = "u113" # 0x19C4 intval[113]
U114 = "u114" # 0x19C8 intval[114]
U115 = "u115" # 0x19CC intval[115]
U116 = "u116" # 0x19D0 intval[116]
U117 = "u117" # 0x19D4 intval[117]
U118 = "u118" # 0x19D8 intval[118]
U119 = "u119" # 0x19DC intval[119]
U120 = "u120" # 0x19E0 intval[120]
U121 = "u121" # 0x19E4 intval[121]
U122 = "u122" # 0x19E8 intval[122]
U123 = "u123" # 0x19EC intval[123]
U124 = "u124" # 0x19F0 intval[124]
U125 = "u125" # 0x19F4 intval[125]
U126 = "u126" # 0x19F8 intval[126]
U127 = "u127" # 0x19FC intval[127]
U128 = "u128" # 0x1A00 intval[128]
U129 = "u129" # 0x1A04 intval[129]
U130 = "u130" # 0x1A08 intval[130]
U131 = "u131" # 0x1A0C intval[131]
U132 = "u132" # 0x1A10 intval[132]
U133 = "u133" # 0x1A14 intval[133]
U134 = "u134" # 0x1A18 intval[134]
U135 = "u135" # 0x1A1C intval[135]
U136 = "u136" # 0x1A20 intval[136]
U137 = "u137" # 0x1A24 intval[137]
U138 = "u138" # 0x1A28 intval[138]
U139 = "u139" # 0x1A2C intval[139]
U140 = "u140" # 0x1A30 intval[140]
U141 = "u141" # 0x1A34 intval[141]
U142 = "u142" # 0x1A38 intval[142]
U143 = "u143" # 0x1A3C intval[143]
U144 = "u144" # 0x1A40 intval[144]
U145 = "u145" # 0x1A44 intval[145]
U146 = "u146" # 0x1A48 intval[146]
U147 = "u147" # 0x1A4C intval[147]
U148 = "u148" # 0x1A50 intval[148]
U149 = "u149" # 0x1A54 intval[149]
U150 = "u150" # 0x1A58 intval[150]
U151 = "u151" # 0x1A5C intval[151]
U152 = "u152" # 0x1A60 intval[152]
U153 = "u153" # 0x1A64 intval[153]
U154 = "u154" # 0x1A68 intval[154]
U155 = "u155" # 0x1A6C intval[155]
U156 = "u156" # 0x1A70 intval[156]
U157 = "u157" # 0x1A74 intval[157]
U158 = "u158" # 0x1A78 intval[158]
U159 = "u159" # 0x1A7C intval[159]
U160 = "u160" # 0x1A80 intval[160]
U161 = "u161" # 0x1A84 intval[161]
U162 = "u162" # 0x1A88 intval[162]
U163 = "u163" # 0x1A8C intval[163]
U164 = "u164" # 0x1A90 intval[164]
U165 = "u165" # 0x1A94 intval[165]
U166 = "u166" # 0x1A98 intval[166]
U167 = "u167" # 0x1A9C intval[167]
U168 = "u168" # 0x1AA0 intval[168]
U169 = "u169" # 0x1AA4 intval[169]
U170 = "u170" # 0x1AA8 intval[170]
U171 = "u171" # 0x1AAC intval[171]
U172 = "u172" # 0x1AB0 intval[172]
U173 = | |
<gh_stars>1-10
''' show_smnp.py
JUNOS parsers for the following commands:
* show snmp mib walk system
* show configuration snmp
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from pyats.utils.exceptions import SchemaError
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, Schema)
class ShowSnmpMibWalkSystemSchema(MetaParser):
""" Schema for:
* show snmp mib walk system
"""
# Sub Schema snmp-object
def validate_snmp_object_list(value):
if not isinstance(value, list):
raise SchemaError('snmp-object is not a list')
snmp_object_schema = Schema({
"name": str,
"object-value": str,
Optional("object-value-type"): str,
Optional("oid"): str
})
# Validate each dictionary in list
for item in value:
snmp_object_schema.validate(item)
return value
schema = {
"snmp-object-information": {
"snmp-object": Use(validate_snmp_object_list),
}
}
class ShowSnmpMibWalkSystem(ShowSnmpMibWalkSystemSchema):
""" Parser for:
* show snmp mib walk system
"""
cli_command = 'show snmp mib walk system'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# sysContact.0 = KHK
p1 = re.compile(r'^(?P<name>\S+) += +(?P<object_value>.+)$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
group = m.groupdict()
snmp_object_list = ret_dict.setdefault("snmp-object-information", {})\
.setdefault("snmp-object",[])
entry = {}
entry['name'] = group['name']
entry['object-value'] = group["object_value"]
snmp_object_list.append(entry)
return ret_dict
class ShowSnmpConfigurationSchema(MetaParser):
""" Schema for:
* show configuration snmp
"""
'''
schema = {
"configuration": {
"snmp": {
Optional("location"): str,
Optional(("contact"): str,
Optional("community"): [
{
"community-name": str,
Optional("authorization"): str,
Optional("clients"): [
{
"name": str,
Optional("restrict"): bool
},
]
},
],
Optional("trap-options"): {
"source-address": str
},
"trap-group": {
"trap-group-name": str,
Optional("version"): str,
Optional("categories"): [
{
"name": str,
},
],
Optional("targets"): [
{
"name": str
},
]
}
}
},
}
'''
# Sub Schema snmp community
def validate_community_list(value):
if not isinstance(value, list):
raise SchemaError('snmp community is not a list')
def validate_clients_list(value):
if not isinstance(value, list):
raise SchemaError('snmp clients is not a list')
snmp_clients_schema = Schema({
"name": str,
Optional("restrict"): bool
})
# Validate each dictionary in list
for item in value:
snmp_clients_schema.validate(item)
return value
snmp_community_schema = Schema({
"name": str,
Optional("authorization"): str,
Optional("clients"): Use(validate_clients_list),
})
# Validate each dictionary in list
for item in value:
snmp_community_schema.validate(item)
return value
# Sub Schema snmp categories and targets
def validate_categories_or_targets_list(value):
if not isinstance(value, list):
raise SchemaError('snmp categories/targets is not a list')
snmp_categories_or_targets_schema = Schema({
"name": str,
})
# Validate each dictionary in list
for item in value:
snmp_categories_or_targets_schema.validate(item)
return value
schema = {
"configuration": {
"snmp": {
Optional("location"): str,
Optional("contact"): str,
Optional("community"): Use(validate_community_list),
Optional("trap-options"): {
"source-address": str
},
Optional("trap-group"): {
"name": str,
Optional("version"): str,
Optional("categories"): Use(validate_categories_or_targets_list),
Optional("targets"): Use(validate_categories_or_targets_list),
}
}
}
}
class ShowSnmpConfiguration(ShowSnmpConfigurationSchema):
""" Parser for:
* show configuration snmp
"""
cli_command = 'show configuration snmp'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# location TH-HK2/floor_1B-002/rack_KHK1104;
p1 = re.compile(r'^location +(?P<location>.+);+$')
# contact KHK;
p2 = re.compile(r'^contact +(?P<contact>.+);+$')
# community safaripub {
# authorization read-only;
# clients {
# 10.169.5.0/25;
# 0.0.0.0/0 restrict;
# 2001:db8:d38a:cf16::/64;
# 2001:db8:d38a:d3e9::/64;
# }
# }
p3 = re.compile(r'^(community +(?P<name>.+) +\{)$')
p4 = re.compile(r'^(authorization +(?P<authorization>.+);)$')
p5 = re.compile(r'^(?P<client>((\d+\.[\d\.]+\/[\d]+)|(\w+\:[\w\:]+\/[\d]+)|(0x\d+))([\s\S])*);$')
# trap-options {
# source-address lo0;
# }
p6 = re.compile(r'^source-address +(?P<source_address>.+);$')
# trap-group safaripub {
# version v1;
# categories {
# chassis;
# link;
# routing;
# }
# targets {
# 10.64.99.32;
# 10.169.249.67;
# }
# }
p7 = re.compile(r'^trap-group +(?P<name>.+) +\{$')
p8 = re.compile(r'^version +(?P<version>.+);$')
p9 = re.compile(r'^categories +\{(?P<categories>.+);$')
p10 = re.compile(r'^(?P<target>((\d+\.[\d\.]+)|(\w+\:[\w\:]+)|(0x\d+)));$')
ret_dict = {}
inner_block_text = ''
category_block_started = False
for line in out.splitlines():
line = line.strip()
# location TH-HK2/floor_1B-002/rack_KHK1104;
m = p1.match(line)
if m:
group = m.groupdict()
snmp_dict = ret_dict.setdefault("configuration", {})\
.setdefault("snmp", {})
snmp_dict['location'] = group['location']
continue
# contact KHK;
m = p2.match(line)
if m:
group = m.groupdict()
if "configuration" not in ret_dict:
snmp_dict = ret_dict.setdefault("configuration", {}) \
.setdefault("snmp", {})
snmp_dict['contact'] = group['contact']
continue
# community safaripub {
# authorization read-only;
# clients {
# 10.169.5.0/25;
# 0.0.0.0/0 restrict;
# 2001:db8:d38a:cf16::/64;
# 2001:db8:d38a:d3e9::/64;
# }
# }
m = p3.match(line)
if m:
group = m.groupdict()
if "configuration" not in ret_dict:
snmp_dict = ret_dict.setdefault("configuration", {}) \
.setdefault("snmp", {})
if not snmp_dict.get('community', []):
community_list = snmp_dict.setdefault('community', [])
community_list.append({})
community_list[-1]['name'] = group['name']
m = p4.match(line)
if m:
group = m.groupdict()
community_list[-1]['authorization'] = group['authorization']
if 'client' in line:
community_list[-1]['clients'] = []
continue
m = p5.match(line)
if m:
group = m.groupdict()
client = group['client']
if '/' in client:
client = client.split()
if client:
client_dict = {}
client_dict['name'] = client[0]
if len(client) > 1 and 'restrict' in client:
client_dict['restrict'] = True
community_list[-1]['clients'].append(client_dict)
continue
# trap-options {
# source-address lo0;
# }
m = p6.match(line)
if m:
group = m.groupdict()
if "configuration" not in ret_dict:
snmp_dict = ret_dict.setdefault("configuration", {}) \
.setdefault("snmp", {})
trap_options = snmp_dict.setdefault('trap-options', {})
trap_options['source-address'] = group['source_address']
continue
# trap-group safaripub {
# version v1;
# categories {
# chassis;
# link;
# routing;
# }
# targets {
# 10.64.99.32;
# 10.169.249.67;
# }
# }
m = p7.match(line)
if m:
group = m.groupdict()
if "configuration" not in ret_dict:
snmp_dict = ret_dict.setdefault("configuration", {}) \
.setdefault("snmp", {})
trap_group = snmp_dict.setdefault('trap-group', {})
trap_group['name'] = group['name']
continue
m = p8.match(line)
if m:
group = m.groupdict()
trap_group['version'] = group['version']
continue
if 'categories' in line:
category_block_started = True
if category_block_started:
if not '}' in line:
inner_block_text += line
continue
if inner_block_text and category_block_started:
category_block_started = False
m = p9.match(inner_block_text)
if m:
group = m.groupdict()
categories = trap_group.setdefault('categories', [])
for category in group['categories'].split(';'):
categories.append({'name': category})
inner_block_text = ''
continue
if 'targets' in line:
targets = trap_group.setdefault('targets', [])
m = p10.match(line)
if m:
group = m.groupdict()
targets.append({'name': group['target']})
continue
return ret_dict
class ShowSnmpStatisticsSchema(MetaParser):
""" Schema for:
* show snmp statistics
"""
'''
schema = {
"snmp-statistics": {
"snmp-input-statistics": {
"packets": str,
"bad-versions": str,
"bad-community-names": str,
"bad-community-uses": str,
"asn-parse-errors": str,
"too-bigs": str,
"no-such-names": str,
"bad-values": str,
"read-onlys": str,
"general-errors": str,
"total-request-varbinds": str,
"total-set-varbinds": str,
"get-requests": str,
"get-nexts": str,
"set-requests": str,
"get-responses": str,
"traps": str,
"silent-drops": str,
"proxy-drops": str,
"commit-pending-drops": str,
"throttle-drops": str,
"duplicate-request-drops": str
},
"snmp-v3-input-statistics": {
"unknown-secmodels": str,
"invalid-msgs": str,
"unknown-pduhandlers": str,
"unavail-contexts": str,
"unknown-contexts": str,
"unsupported-seclevels": str,
"not-in-timewindows": str,
"unknown-usernames": str,
"unknown-eids": str,
"wrong-digests": str,
"decrypt-errors": str
},
"snmp-output-statistics": {
"packets": str,
"too-bigs": str,
"no-such-names": str,
"bad-values": str,
"general-errors": str,
"get-requests": str,
"get-nexts": str,
"set-requests": str,
"get-responses": str,
"traps": str
},
"snmp-performance-statistics": {
"average-response-time": str,
"one-minute-request-throughput": str,
"five-minute-request-throughput": str,
"fifteen-minute-request-throughput": str,
"one-minute-response-throughput": str,
"five-minute-response-throughput": str,
"fifteen-minute-response-throughput": str
}
}
}
'''
# Sub Schema snmp community
def validate_community_list(value):
if not isinstance(value, list):
raise SchemaError('snmp community is not a list')
def validate_clients_list(value):
if not isinstance(value, list):
raise SchemaError('snmp clients is not a list')
snmp_clients_schema = Schema({
"name": str,
Optional("restrict"): bool
})
# Validate each dictionary in list
for item in value:
snmp_clients_schema.validate(item)
return value
snmp_community_schema = Schema({
"name": str,
"authorization": str,
"clients": Use(validate_clients_list),
})
# Validate each dictionary in list
for item in value:
snmp_community_schema.validate(item)
return value
# Sub Schema snmp categories or targets
def validate_categories_or_targets_list(value):
if not isinstance(value, list):
raise SchemaError('snmp categories is not a list')
snmp_categories_or_targets_schema = Schema({
"name": str,
})
# Validate each dictionary in list
for item in value:
snmp_categories_or_targets_schema.validate(item)
return value
schema = {
"snmp-statistics": {
"snmp-input-statistics": {
"packets": str,
"bad-versions": str,
"bad-community-names": str,
"bad-community-uses": str,
"asn-parse-errors": str,
"too-bigs": str,
"no-such-names": str,
"bad-values": str,
"read-onlys": str,
"general-errors": str,
"total-request-varbinds": str,
"total-set-varbinds": str,
"get-requests": str,
"get-nexts": str,
"set-requests": str,
"get-responses": str,
"traps": str,
"silent-drops": str,
"proxy-drops": str,
"commit-pending-drops": str,
"throttle-drops": str,
"duplicate-request-drops": str
},
"snmp-v3-input-statistics": {
"unknown-secmodels": str,
"invalid-msgs": str,
"unknown-pduhandlers": str,
"unavail-contexts": str,
"unknown-contexts": str,
"unsupported-seclevels": str,
"not-in-timewindows": str,
"unknown-usernames": str,
"unknown-eids": str,
"wrong-digests": str,
"decrypt-errors": str
},
"snmp-output-statistics": {
"packets": str,
"too-bigs": str,
"no-such-names": str,
"bad-values": str,
"general-errors": str,
"get-requests": str,
"get-nexts": str,
"set-requests": str,
"get-responses": str,
"traps": str
},
"snmp-performance-statistics": {
"average-response-time": str,
"one-minute-request-throughput": str,
"five-minute-request-throughput": str,
"fifteen-minute-request-throughput": str,
"one-minute-response-throughput": str,
"five-minute-response-throughput": str,
"fifteen-minute-response-throughput": str
}
}
}
class ShowSnmpStatistics(ShowSnmpStatisticsSchema):
""" Parser for:
* show snmp statistics
"""
cli_command = 'show snmp statistics'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# SNMP statistics:
p1 = re.compile(r'^SNMP +statistics:$')
# Input:
p2 = re.compile(r'^Input:$')
# Packets: 8, Bad versions: 0, Bad community names: 0,
# Bad | |
0, -1, 0, 0, 0, -1),
(1, 1, 1, 0, 0, -1, -1, 0, 0)]
else:
# borders 8, 9
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(0, -1, 0, -1, 0, 0, 0, 0, -1),
(0, 0, -1, 0, -1, 0, -1, 0, 0),
(0, 0, 1, 1, 0, 0, 0, 1, 0),
(0, 1, 0, 0, 0, 1, 1, 0, 0)]
elif self._border(10):
if self._border(11) and self._border(12):
# borders 8, 10, 11, 12
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, -1, 0, 0, 0, 1),
(-1, 0, 0, 0, 1, 0, 0, 0, -1),
(0, -1, 0, -1, 0, 0, 0, 0, -1),
(0, -1, 0, 1, 0, 0, 0, 0, 1),
(0, 1, 0, -1, 0, 0, 0, 0, 1),
(0, 1, 0, 1, 0, 0, 0, 0, -1),
(1, 0, 0, 0, -1, 0, 0, 0, -1)]
else:
# borders 8, 10
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, -1, 0, 0, 0, 1),
(0, -1, 0, -1, 0, 0, 0, 0, -1),
(0, 1, 0, 1, 0, 0, 0, 0, -1)]
elif self._border(14):
# borders 8, 13, 14
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, -1, -1, 1, 0, 0, 0, 0, 1),
(-1, 0, 0, 1, 1, 1, 0, 0, -1),
(0, -1, 0, -1, 0, 0, 0, 0, -1),
(0, 1, 0, -1, -1, -1, 0, 0, 1),
(1, 1, 1, 0, -1, 0, 0, 0, -1)]
else:
# borders 8
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(0, -1, 0, -1, 0, 0, 0, 0, -1)]
if self._border(9):
if self._border(12):
if self._border(10) and self._border(11):
# borders 9, 10, 11, 12
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, -1, 0, 0, 0, 1),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(-1, 0, 0, 0, 0, 1, 0, 1, 0),
(-1, 0, 0, 0, 1, 0, 0, 0, -1),
(1, 0, 0, 0, -1, 0, 0, 0, -1),
(1, 0, 0, 0, 0, -1, 0, 1, 0),
(1, 0, 0, 0, 0, 1, 0, -1, 0)]
else:
# borders 9, 12
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(-1, 0, 0, 0, 0, 1, 0, 1, 0),
(1, 0, 0, 0, -1, 0, 0, 0, -1)]
elif self._border(14):
if self._border(13):
# borders 9, 13, 14
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, -1, -1, 0, 0, 1, 0, 1, 0),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(1, 1, 1, 0, -1, 0, 0, 0, -1)]
else:
# borders 9, 14
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, -1, -1, 0, 0, 1, 0, 1, 0),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(1, 1, 1, 0, -1, 0, 0, 0, -1)]
elif self._border(15):
# borders 9, 15, 16
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, -1, 0, 1, -1, 1, 0),
(-1, 0, 0, 0, 0, -1, 0, -1, 0),
(0, -1, 1, 0, -1, 0, 1, -1, 0),
(0, -1, 1, 0, 0, 1, -1, 0, 1),
(0, 1, -1, -1, 1, 0, 0, 1, 0),
(0, 1, -1, 1, 0, -1, 0, 0, -1),
(1, 0, 0, 1, -1, 0, 1, 0, -1)]
else:
# borders 9
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, 0, -1, 0, -1, 0)]
if self._border(10):
if self._border(11) and self._border(12):
# borders 10, 11, 12
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, -1, 0, 0, 0, 1),
(-1, 0, 0, 0, 1, 0, 0, 0, -1),
(1, 0, 0, 0, -1, 0, 0, 0, -1)]
else:
# borders 10
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, -1, 0, 0, 0, 1)]
if self._border(11):
# borders 11
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, 0, 1, 0, 0, 0, -1)]
if self._border(12):
# border 12
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(1, 0, 0, 0, -1, 0, 0, 0, -1)]
if self._border(13) and self._border(14):
# border 13, 14
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(1, 1, 1, 0, -1, 0, 0, 0, -1)]
if self._border(14):
# border 14
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(1, 1, 1, 0, -1, 0, 0, 0, -1)]
if self._border(15):
if self._border(16):
# borders 15, 16
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(-1, 0, 0, -1, 0, 1, -1, 1, 0),
(0, -1, 1, 0, -1, 0, 1, -1, 0),
(0, 1, -1, 1, 0, -1, 0, 0, -1)]
else:
# borders 15
return [(1, 0, 0, 0, 1, 0, 0, 0, 1),
(0, 1, -1, 1, 0, -1, 0, 0, -1)]
return [(1, 0, 0, 0, 1, 0, 0, 0, 1)]
def _automorphisms_reduced_slow(self):
"""
Return the automorphisms of the reduced ternary quadratic form.
It searches over all 3x3 matrices with coefficients -1, 0, 1,
determinant 1 and finite order, because Eisenstein reduced forms
are Minkowski reduced. See Cassels.
EXAMPLES::
sage: Q = TernaryQF([1, 1, 7, 0, 0, 0])
sage: Q.is_eisenstein_reduced()
True
sage: auts = Q._automorphisms_reduced_slow() # long time (3s on sage.math, 2014)
sage: len(auts) # long time
8
sage: A = auts[randint(0,7)] # long time
sage: Q(A) == Q # long time
True
sage: Q = TernaryQF([3, 4, 5, 3, 3, 2])
sage: Q._automorphisms_reduced_slow() # long time
[
[1 0 0]
[0 1 0]
[0 0 1]
]
"""
if TernaryQF.possible_automorphisms is None:
I = [-1, 0, 1]
auts = [matrix(ZZ, 3, [a, b, c, d, e, f, g, h, i]) for a in I for b in I for c in I for d in I for e in I for f in I for g in I for h in I for i in I]
auts = [m for m in auts if m.det() == 1]
auts = [m for m in auts if m**2 in auts]
auts = [m for m in auts if m**2 in auts]
auts = [m for m in auts if m**2 in auts]
TernaryQF.possible_automorphisms = auts
return [m for m in TernaryQF.possible_automorphisms if self(m) == self]
def automorphisms(self, slow = True):
"""
Returns a list with the automorphisms of the definite ternary quadratic form.
EXAMPLES::
sage: Q = TernaryQF([1, 1, 7, 0, 0, 0])
sage: auts = Q.automorphisms()
sage: auts
[
[-1 0 0] [-1 0 0] [ 0 -1 0] [ 0 -1 0] [ 0 1 0] [ 0 1 0]
[ 0 -1 0] [ 0 1 0] [-1 0 0] [ 1 0 0] [-1 0 0] [ 1 0 0]
[ 0 0 1], [ 0 0 -1], [ 0 0 -1], [ 0 0 1], [ 0 0 1], [ 0 0 -1],
[ 1 0 0] [1 0 0]
[ 0 -1 0] [0 1 0]
[ 0 0 -1], [0 0 1]
]
sage: all(Q == Q(A) for A in auts)
True
sage: Q = TernaryQF([3, 4, 5, 3, 3, 2])
sage: Q.automorphisms(slow = False)
[
[1 0 0]
[0 1 0]
[0 0 1]
]
sage: Q = TernaryQF([4, 2, 4, 3, -4, -5])
sage: auts = Q.automorphisms(slow = False)
sage: auts
[
[1 0 0] [ 2 -1 -1]
[0 1 0] [ 3 -2 -1]
[0 0 1], [ 0 0 -1]
]
sage: A = auts[1]
| |
that model.
See more here: http://stackoverflow.com/a/15745652"""
self.object = form.save(commit=False) # don't save M2M fields
self.object.username = create_username(
personal=form.cleaned_data["personal"], family=form.cleaned_data["family"]
)
# Need to save that object because of commit=False previously.
# This doesn't save our troublesome M2M field.
self.object.save()
# send a signal to add a comment
create_comment_signal.send(
sender=self.form_class,
content_object=self.object,
comment=form.cleaned_data["comment"],
timestamp=None,
)
# saving intermediary M2M model: Qualification
for lesson in form.cleaned_data["lessons"]:
Qualification.objects.create(lesson=lesson, person=self.object)
# Important: we need to use ModelFormMixin.form_valid() here!
# But by doing so we omit SuccessMessageMixin completely, so we need to
# simulate it. The code below is almost identical to
# SuccessMessageMixin.form_valid().
response = super().form_valid(form)
success_message = self.get_success_message(form.cleaned_data)
if success_message:
messages.success(self.request, success_message)
return response
def get_initial(self):
initial = {
"personal": self.request.GET.get("personal", ""),
"family": self.request.GET.get("family", ""),
"email": self.request.GET.get("email", ""),
}
return initial
class PersonUpdate(OnlyForAdminsMixin, UserPassesTestMixin, AMYUpdateView):
model = Person
form_class = PersonForm
pk_url_kwarg = "person_id"
template_name = "workshops/person_edit_form.html"
def test_func(self):
if not (
self.request.user.has_perm("workshops.change_person")
or self.request.user == self.get_object()
):
raise PermissionDenied
return True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
failed_trainings = TrainingProgress.objects.filter(
state="f", trainee=self.object
).exists()
kwargs = {
"initial": {"person": self.object},
"widgets": {"person": HiddenInput()},
}
context.update(
{
"awards": self.object.award_set.select_related(
"event", "badge"
).order_by("badge__name"),
"tasks": self.object.task_set.select_related("role", "event").order_by(
"-event__slug"
),
"consents_form": ActiveTermConsentsForm(
form_tag=False,
prefix="consents",
**kwargs,
),
"award_form": AwardForm(
form_tag=False,
prefix="award",
failed_trainings=failed_trainings,
**kwargs,
),
"task_form": TaskForm(
form_tag=False,
prefix="task",
failed_trainings=failed_trainings,
**kwargs,
),
}
)
return context
def form_valid(self, form):
self.object = form.save(commit=False)
# remove existing Qualifications for user
Qualification.objects.filter(person=self.object).delete()
# add new Qualifications
for lesson in form.cleaned_data.pop("lessons"):
Qualification.objects.create(person=self.object, lesson=lesson)
return super().form_valid(form)
class PersonDelete(OnlyForAdminsMixin, PermissionRequiredMixin, AMYDeleteView):
model = Person
permission_required = "workshops.delete_person"
success_url = reverse_lazy("all_persons")
pk_url_kwarg = "person_id"
class PersonArchive(PermissionRequiredMixin, LoginRequiredMixin, AMYDeleteView):
model = Person
permission_required = "workshops.delete_person"
pk_url_kwarg = "person_id"
success_message = "{} was archived successfully."
def perform_destroy(self, *args, **kwargs):
self.object.archive()
def get_success_url(self) -> str:
"""
If the user archived their own profile,
send them to the login page.
Otherwise send the user back to the page they're currently on.
"""
if self.request.user.pk == self.object.pk:
return reverse("login")
return self.object.get_absolute_url()
def has_permission(self):
"""If the user is archiving their own profile, the user has permission."""
user_id_being_archived = self.kwargs.get(self.pk_url_kwarg)
user_archiving_own_profile = self.request.user.pk == user_id_being_archived
return super(PersonArchive, self).has_permission() or user_archiving_own_profile
class PersonPermissions(OnlyForAdminsMixin, PermissionRequiredMixin, AMYUpdateView):
permission_required = "workshops.change_person"
form_class = PersonPermissionsForm
pk_url_kwarg = "person_id"
queryset = Person.objects.prefetch_related(
"groups",
Prefetch(
"user_permissions",
queryset=Permission.objects.select_related("content_type"),
),
)
@login_required
def person_password(request, person_id):
user = get_object_or_404(Person, pk=person_id)
# Either the user requests change of their own password, or someone with
# permission for changing person does.
if not (
(request.user == user) or (request.user.has_perm("workshops.change_person"))
):
raise PermissionDenied
Form = PasswordChangeForm
if request.user.is_superuser:
Form = SetPasswordForm
elif request.user.pk != user.pk:
# non-superuser can only change their own password, not someone else's
raise PermissionDenied
if request.method == "POST":
form = Form(user, request.POST)
if form.is_valid():
form.save() # saves the password for the user
update_session_auth_hash(request, form.user)
messages.success(request, "Password was changed successfully.")
return redirect(reverse("person_details", args=[user.id]))
else:
messages.error(request, "Fix errors below.")
else:
form = Form(user)
form.helper = BootstrapHelper(add_cancel_button=False)
return render(
request,
"generic_form.html",
{"form": form, "model": Person, "object": user, "title": "Change password"},
)
@admin_required
@permission_required(
["workshops.delete_person", "workshops.change_person"], raise_exception=True
)
def persons_merge(request):
"""Display two persons side by side on GET and merge them on POST.
If no persons are supplied via GET params, display person selection
form."""
obj_a_pk = request.GET.get("person_a")
obj_b_pk = request.GET.get("person_b")
if not obj_a_pk or not obj_b_pk:
context = {
"title": "Merge Persons",
"form": PersonsSelectionForm(),
}
if "next" in request.GET:
return redirect(request.GET.get("next", "/"))
return render(request, "generic_form.html", context)
elif obj_a_pk == obj_b_pk:
context = {
"title": "Merge Persons",
"form": PersonsSelectionForm(),
}
messages.warning(request, "You cannot merge the same person with " "themself.")
if "next" in request.GET:
return redirect(request.GET.get("next", "/"))
return render(request, "generic_form.html", context)
obj_a = get_object_or_404(Person, pk=obj_a_pk)
obj_b = get_object_or_404(Person, pk=obj_b_pk)
form = PersonsMergeForm(initial=dict(person_a=obj_a, person_b=obj_b))
if request.method == "POST":
form = PersonsMergeForm(request.POST)
if form.is_valid():
# merging in process
data = form.cleaned_data
obj_a = data["person_a"]
obj_b = data["person_b"]
# `base_obj` stays in the database after merge
# `merging_obj` will be removed from DB after merge
if data["id"] == "obj_a":
base_obj = obj_a
merging_obj = obj_b
base_a = True
else:
base_obj = obj_b
merging_obj = obj_a
base_a = False
# non-M2M-relationships
easy = (
"username",
"personal",
"middle",
"family",
"email",
"secondary_email",
"may_contact",
"publish_profile",
"gender",
"gender_other",
"airport",
"github",
"twitter",
"url",
"affiliation",
"occupation",
"orcid",
"is_active",
)
# M2M relationships
difficult = (
"award_set",
"qualification_set",
"domains",
"languages",
"task_set",
"trainingprogress_set",
"comment_comments", # made by this person
"comments", # made by others regarding this person
"consent_set",
)
try:
_, integrity_errors = merge_objects(
obj_a, obj_b, easy, difficult, choices=data, base_a=base_a
)
if integrity_errors:
msg = (
"There were integrity errors when merging related "
"objects:\n"
"\n".join(integrity_errors)
)
messages.warning(request, msg)
except ProtectedError as e:
return failed_to_delete(
request, object=merging_obj, protected_objects=e.protected_objects
)
else:
messages.success(
request,
"Persons were merged successfully. "
"You were redirected to the base "
"person.",
)
return redirect(base_obj.get_absolute_url())
else:
messages.error(request, "Fix errors in the form.")
context = {
"title": "Merge two persons",
"form": form,
"obj_a": obj_a,
"obj_b": obj_b,
}
return render(request, "workshops/persons_merge.html", context)
@admin_required
def sync_usersocialauth(request, person_id):
person_id = int(person_id)
try:
person = Person.objects.get(pk=person_id)
except Person.DoesNotExist:
messages.error(
request,
"Cannot sync UserSocialAuth table for person #{} "
"-- there is no Person with such id.".format(person_id),
)
return redirect(reverse("persons"))
else:
try:
result = person.synchronize_usersocialauth()
if result:
messages.success(
request, "Social account was successfully synchronized."
)
else:
messages.error(
request,
"It was not possible to synchronize this person "
"with their social account.",
)
except GithubException:
messages.error(
request,
"Cannot sync UserSocialAuth table for person #{} "
"due to errors with GitHub API.".format(person_id),
)
return redirect(reverse("person_details", args=(person_id,)))
# ------------------------------------------------------------
class AllEvents(OnlyForAdminsMixin, AMYListView):
context_object_name = "all_events"
template_name = "workshops/all_events.html"
queryset = (
Event.objects.select_related("assigned_to")
.prefetch_related("host", "tags")
.annotate(
num_instructors=Sum(
Case(
When(task__role__name="instructor", then=Value(1)),
default=0,
output_field=IntegerField(),
),
)
)
.order_by("-start")
)
filter_class = EventFilter
title = "All Events"
@admin_required
def event_details(request, slug):
"""List details of a particular event."""
try:
task_prefetch = Prefetch(
"task_set",
to_attr="contacts",
queryset=Task.objects.select_related("person")
.filter(
# we only want hosts, organizers and instructors
Q(role__name="host")
| Q(role__name="organizer")
| Q(role__name="instructor")
)
.filter(person__may_contact=True)
.exclude(Q(person__email="") | Q(person__email=None)),
)
event = (
Event.objects.attendance()
.prefetch_related(task_prefetch)
.select_related(
"assigned_to",
"host",
"administrator",
"sponsor",
"membership",
)
.get(slug=slug)
)
member_sites = Membership.objects.filter(task__event=event).distinct()
except Event.DoesNotExist:
raise Http404("Event matching query does not exist.")
person_important_badges = Prefetch(
"person__badges",
to_attr="important_badges",
queryset=Badge.objects.filter(name__in=Badge.IMPORTANT_BADGES),
)
tasks = (
Task.objects.filter(event__id=event.id)
.select_related("event", "person", "role")
.prefetch_related(person_important_badges)
.order_by("role__name")
)
admin_lookup_form = AdminLookupForm()
if event.assigned_to:
admin_lookup_form = AdminLookupForm(initial={"person": event.assigned_to})
admin_lookup_form.helper = BootstrapHelper(
form_action=reverse("event_assign", args=[slug]), add_cancel_button=False
)
context = {
"title": "Event {0}".format(event),
"event": event,
"tasks": tasks,
"member_sites": member_sites,
"all_emails": tasks.filter(person__may_contact=True)
.exclude(person__email=None)
.values_list("person__email", flat=True),
"today": datetime.date.today(),
"admin_lookup_form": admin_lookup_form,
"event_location": {
"venue": event.venue,
"humandate": event.human_readable_date,
"latitude": event.latitude,
"longitude": event.longitude,
},
}
return render(request, "workshops/event.html", context)
@admin_required
def validate_event(request, slug):
"""Check the event's home page *or* the specified URL (for testing)."""
try:
event = Event.objects.get(slug=slug)
except Event.DoesNotExist:
raise Http404("Event matching query does not exist.")
page_url = request.GET.get("url", None) # for manual override
if page_url is None:
page_url = event.url
page_url = page_url.strip()
error_messages = []
warning_messages = []
try:
metadata = fetch_workshop_metadata(page_url)
# validate metadata
error_messages, warning_messages = validate_workshop_metadata(metadata)
except WrongWorkshopURL as e:
error_messages.append(f"URL error: {e.msg}")
except requests.exceptions.HTTPError as e:
error_messages.append(
'Request for "{0}" returned status code {1}'.format(
page_url, e.response.status_code
)
)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
error_messages.append("Network connection error.")
context = {
"title": "Validate Event {0}".format(event),
"event": event,
"page": page_url,
"error_messages": error_messages,
"warning_messages": warning_messages,
}
return render(request, "workshops/validate_event.html", context)
class EventCreate(OnlyForAdminsMixin, PermissionRequiredMixin, AMYCreateView):
permission_required = "workshops.add_event"
model = Event
form_class = EventCreateForm
template_name = "workshops/event_create_form.html"
def form_valid(self, form):
"""Additional functions for validating Event Create form:
* maybe adding a mail job, if conditions are met
"""
# save the object
res = super().form_valid(form)
# check conditions for running a PostWorkshopAction
if PostWorkshopAction.check(self.object):
triggers = Trigger.objects.filter(
active=True, action="week-after-workshop-completion"
)
ActionManageMixin.add(
action_class=PostWorkshopAction,
logger=logger,
scheduler=scheduler,
triggers=triggers,
context_objects=dict(event=self.object),
object_=self.object,
request=self.request,
)
# check conditions for running a InstructorsHostIntroductionAction
if InstructorsHostIntroductionAction.check(self.object):
triggers = Trigger.objects.filter(
active=True, action="instructors-host-introduction"
)
ActionManageMixin.add(
action_class=InstructorsHostIntroductionAction,
logger=logger,
scheduler=scheduler,
triggers=triggers,
context_objects=dict(event=self.object),
object_=self.object,
request=self.request,
)
# return remembered results
return res
class EventUpdate(OnlyForAdminsMixin, PermissionRequiredMixin, AMYUpdateView):
permission_required = [
"workshops.change_event",
"workshops.add_task",
]
queryset = Event.objects.select_related(
"assigned_to",
"host",
"administrator",
"sponsor",
"language",
)
slug_field = "slug"
template_name = "workshops/event_edit_form.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
kwargs = {
"initial": {"event": self.object},
"widgets": {"event": | |
tth_eta, gvec_l = xfcapi.detectorXYToGvec(
dpts.T,
rmat_d, rmat_s,
tvec_d, tvec_s, tvec_c,
beamVec=beamVec)
tth_eta = np.vstack(tth_eta).T
# warp measured points
if distortion is not None:
dpts = distortion.apply_inverse(dpts)
# plane spacings and energies
dsp = 1. / mutil.columnNorm(np.dot(bMat, dhkl))
wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0])
# find on spatial extent of detector
xTest = np.logical_and(
dpts[0, :] >= -0.5*panel_dims[1] + panel_buffer,
dpts[0, :] <= 0.5*panel_dims[1] - panel_buffer)
yTest = np.logical_and(
dpts[1, :] >= -0.5*panel_dims[0] + panel_buffer,
dpts[1, :] <= 0.5*panel_dims[0] - panel_buffer)
onDetector = np.logical_and(xTest, yTest)
if multipleEnergyRanges:
validEnergy = np.zeros(len(wlen), dtype=bool)
for i in range(len(lmin)):
validEnergy = validEnergy | \
np.logical_and(wlen >= lmin[i], wlen <= lmax[i])
pass
else:
validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)
pass
# index for valid reflections
keepers = np.where(np.logical_and(onDetector, validEnergy))[0]
# assign output arrays
xy_det[iG][keepers, :] = dpts[:, keepers].T
hkls_in[iG][:, keepers] = dhkl[:, keepers]
angles[iG][keepers, :] = tth_eta[keepers, :]
dspacing[iG, keepers] = dsp[keepers]
energy[iG, keepers] = processWavelength(wlen[keepers])
pass
pass
return xy_det, hkls_in, angles, dspacing, energy
if USE_NUMBA:
@numba.njit(nogil=True, cache=True)
def _expand_pixels(original, w, h, result):
hw = 0.5 * w
hh = 0.5 * h
for el in range(len(original)):
x, y = original[el, 0], original[el, 1]
result[el*4 + 0, 0] = x - hw
result[el*4 + 0, 1] = y - hh
result[el*4 + 1, 0] = x + hw
result[el*4 + 1, 1] = y - hh
result[el*4 + 2, 0] = x + hw
result[el*4 + 2, 1] = y + hh
result[el*4 + 3, 0] = x - hw
result[el*4 + 3, 1] = y + hh
return result
@numba.njit(nogil=True, cache=True)
def _compute_max(tth, eta, result):
period = 2.0 * np.pi
hperiod = np.pi
for el in range(0, len(tth), 4):
max_tth = np.abs(tth[el + 0] - tth[el + 3])
eta_diff = eta[el + 0] - eta[el + 3]
max_eta = np.abs(
np.remainder(eta_diff + hperiod, period) - hperiod
)
for i in range(3):
curr_tth = np.abs(tth[el + i] - tth[el + i + 1])
eta_diff = eta[el + i] - eta[el + i + 1]
curr_eta = np.abs(
np.remainder(eta_diff + hperiod, period) - hperiod
)
max_tth = np.maximum(curr_tth, max_tth)
max_eta = np.maximum(curr_eta, max_eta)
result[el//4, 0] = max_tth
result[el//4, 1] = max_eta
return result
def angularPixelSize(
xy_det, xy_pixelPitch,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
distortion=None, beamVec=None, etaVec=None):
"""
Calculate angular pixel sizes on a detector.
* choices to beam vector and eta vector specs have been supressed
* assumes xy_det in UNWARPED configuration
"""
xy_det = np.atleast_2d(xy_det)
if distortion is not None: # !!! check this logic
xy_det = distortion.apply(xy_det)
if beamVec is None:
beamVec = xfcapi.bVec_ref
if etaVec is None:
etaVec = xfcapi.eta_ref
xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype)
xy_expanded = _expand_pixels(
xy_det,
xy_pixelPitch[0], xy_pixelPitch[1],
xy_expanded)
gvec_space, _ = xfcapi.detectorXYToGvec(
xy_expanded,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
beamVec=beamVec, etaVec=etaVec)
result = np.empty_like(xy_det)
return _compute_max(gvec_space[0], gvec_space[1], result)
else:
def angularPixelSize(xy_det, xy_pixelPitch,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
distortion=None, beamVec=None, etaVec=None):
"""
Calculate angular pixel sizes on a detector.
* choices to beam vector and eta vector specs have been supressed
* assumes xy_det in UNWARPED configuration
"""
xy_det = np.atleast_2d(xy_det)
if distortion is not None: # !!! check this logic
xy_det = distortion.apply(xy_det)
if beamVec is None:
beamVec = xfcapi.bVec_ref
if etaVec is None:
etaVec = xfcapi.eta_ref
xp = np.r_[-0.5, 0.5, 0.5, -0.5] * xy_pixelPitch[0]
yp = np.r_[-0.5, -0.5, 0.5, 0.5] * xy_pixelPitch[1]
diffs = np.array([[3, 3, 2, 1],
[2, 0, 1, 0]])
ang_pix = np.zeros((len(xy_det), 2))
for ipt, xy in enumerate(xy_det):
xc = xp + xy[0]
yc = yp + xy[1]
tth_eta, gHat_l = xfcapi.detectorXYToGvec(
np.vstack([xc, yc]).T,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
beamVec=beamVec, etaVec=etaVec)
delta_tth = np.zeros(4)
delta_eta = np.zeros(4)
for j in range(4):
delta_tth[j] = abs(
tth_eta[0][diffs[0, j]] - tth_eta[0][diffs[1, j]]
)
delta_eta[j] = xfcapi.angularDifference(
tth_eta[1][diffs[0, j]], tth_eta[1][diffs[1, j]]
)
ang_pix[ipt, 0] = np.amax(delta_tth)
ang_pix[ipt, 1] = np.amax(delta_eta)
return ang_pix
if USE_NUMBA:
@numba.njit(nogil=True, cache=True)
def _coo_build_window_jit(frame_row, frame_col, frame_data,
min_row, max_row, min_col, max_col,
result):
n = len(frame_row)
for i in range(n):
if ((min_row <= frame_row[i] <= max_row) and
(min_col <= frame_col[i] <= max_col)):
new_row = frame_row[i] - min_row
new_col = frame_col[i] - min_col
result[new_row, new_col] = frame_data[i]
return result
def _coo_build_window(frame_i, min_row, max_row, min_col, max_col):
window = np.zeros(
((max_row - min_row + 1), (max_col - min_col + 1)),
dtype=np.int16
)
return _coo_build_window_jit(frame_i.row, frame_i.col, frame_i.data,
min_row, max_row, min_col, max_col,
window)
else: # not USE_NUMBA
def _coo_build_window(frame_i, min_row, max_row, min_col, max_col):
mask = ((min_row <= frame_i.row) & (frame_i.row <= max_row) &
(min_col <= frame_i.col) & (frame_i.col <= max_col))
new_row = frame_i.row[mask] - min_row
new_col = frame_i.col[mask] - min_col
new_data = frame_i.data[mask]
window = np.zeros(
((max_row - min_row + 1), (max_col - min_col + 1)),
dtype=np.int16
)
window[new_row, new_col] = new_data
return window
def make_reflection_patches(instr_cfg,
tth_eta, ang_pixel_size, omega=None,
tth_tol=0.2, eta_tol=1.0,
rmat_c=np.eye(3), tvec_c=np.zeros((3, 1)),
npdiv=1, quiet=False,
compute_areas_func=gutil.compute_areas):
"""
Make angular patches on a detector.
panel_dims are [(xmin, ymin), (xmax, ymax)] in mm
pixel_pitch is [row_size, column_size] in mm
FIXME: DISTORTION HANDING IS STILL A KLUDGE!!!
patches are:
delta tth
d ------------- ... -------------
e | x | x | x | ... | x | x | x |
l ------------- ... -------------
t .
a .
.
e ------------- ... -------------
t | x | x | x | ... | x | x | x |
a ------------- ... -------------
outputs are:
(tth_vtx, eta_vtx),
(x_vtx, y_vtx),
connectivity,
subpixel_areas,
(x_center, y_center),
(i_row, j_col)
"""
npts = len(tth_eta)
# detector quantities
rmat_d = xfcapi.makeRotMatOfExpMap(
np.r_[instr_cfg['detector']['transform']['tilt']]
)
tvec_d = np.r_[instr_cfg['detector']['transform']['translation']]
pixel_size = instr_cfg['detector']['pixels']['size']
frame_nrows = instr_cfg['detector']['pixels']['rows']
frame_ncols = instr_cfg['detector']['pixels']['columns']
panel_dims = (
-0.5*np.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]],
0.5*np.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]]
)
row_edges = np.arange(frame_nrows + 1)[::-1]*pixel_size[1] \
+ panel_dims[0][1]
col_edges = np.arange(frame_ncols + 1)*pixel_size[0] \
+ panel_dims[0][0]
# handle distortion
distortion = None
if distortion_key in instr_cfg['detector']:
distortion_cfg = instr_cfg['detector'][distortion_key]
if distortion_cfg is not None:
try:
func_name = distortion_cfg['function_name']
dparams = distortion_cfg['parameters']
distortion = distortion_pkg.get_mapping(
func_name, dparams
)
except(KeyError):
raise RuntimeError(
"problem with distortion specification"
)
# sample frame
chi = instr_cfg['oscillation_stage']['chi']
tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']]
# beam vector
bvec = np.r_[instr_cfg['beam']['vector']]
# data to loop
# ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL?
if omega is None:
full_angs = np.hstack([tth_eta, np.zeros((npts, 1))])
else:
full_angs = np.hstack([tth_eta, omega.reshape(npts, 1)])
patches = []
for angs, pix in zip(full_angs, ang_pixel_size):
# calculate bin edges for patch based on local angular pixel size
# tth
ntths, tth_edges = gutil.make_tolerance_grid(
bin_width=np.degrees(pix[0]),
window_width=tth_tol,
num_subdivisions=npdiv
)
# eta
netas, eta_edges = gutil.make_tolerance_grid(
bin_width=np.degrees(pix[1]),
window_width=eta_tol,
num_subdivisions=npdiv
)
# FOR ANGULAR MESH
conn = gutil.cellConnectivity(
netas,
ntths,
origin='ll'
)
# meshgrid args are (cols, rows), a.k.a (fast, slow)
m_tth, m_eta = np.meshgrid(tth_edges, eta_edges)
npts_patch = m_tth.size
# calculate the patch XY coords from the (tth, eta) angles
# !!! will CHEAT and ignore the small perturbation the different
# omega angle values causes and simply use the central value
gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) \
+ np.radians(np.vstack([m_tth.flatten(),
m_eta.flatten(),
np.zeros(npts_patch)]).T)
xy_eval_vtx, rmats_s, on_plane = _project_on_detector_plane(
gVec_angs_vtx,
rmat_d, rmat_c,
chi,
tvec_d, tvec_c, tvec_s,
distortion,
beamVec=bvec)
areas = compute_areas_func(xy_eval_vtx, conn)
# EVALUATION POINTS
# !!! for lack of a better option will use centroids
tth_eta_cen = gutil.cellCentroids(
np.atleast_2d(gVec_angs_vtx[:, :2]),
conn
)
gVec_angs = np.hstack(
[tth_eta_cen,
np.tile(angs[2], (len(tth_eta_cen), 1))]
)
xy_eval, rmats_s, on_plane = _project_on_detector_plane(
gVec_angs,
rmat_d, rmat_c,
chi,
tvec_d, tvec_c, tvec_s,
distortion,
beamVec=bvec)
row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1])
col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0])
# append patch data to list
patches.append(
((gVec_angs_vtx[:, 0].reshape(m_tth.shape),
gVec_angs_vtx[:, 1].reshape(m_tth.shape)),
(xy_eval_vtx[:, 0].reshape(m_tth.shape),
xy_eval_vtx[:, 1].reshape(m_tth.shape)),
conn,
areas.reshape(netas, ntths),
(xy_eval[:, 0].reshape(netas, ntths),
xy_eval[:, 1].reshape(netas, ntths)),
(row_indices.reshape(netas, ntths),
col_indices.reshape(netas, ntths)))
)
pass # close loop over angles
return patches
def extract_detector_transformation(detector_params):
"""
Construct arrays from detector parameters.
goes from 10 vector of detector parames OR instrument config dictionary
(from YAML spec) to affine transformation arrays
Parameters
----------
detector_params : TYPE
DESCRIPTION.
Returns
-------
rMat_d : TYPE
DESCRIPTION.
tVec_d : TYPE
DESCRIPTION.
chi : TYPE
DESCRIPTION.
tVec_s : TYPE
DESCRIPTION.
"""
# extract variables for convenience
if isinstance(detector_params, dict):
rMat_d = xfcapi.makeRotMatOfExpMap(
| |
lb, size, value):
"""set_block(StringDataSet3D self, DataSetIndex3D lb, DataSetIndex3D size, Strings value)"""
return _RMF_HDF5.StringDataSet3D_set_block(self, lb, size, value)
def set_size(self, ijk):
"""set_size(StringDataSet3D self, DataSetIndex3D ijk)"""
return _RMF_HDF5.StringDataSet3D_set_size(self, ijk)
__swig_destroy__ = _RMF_HDF5.delete_StringDataSet3D
__del__ = lambda self: None
StringDataSet3D_swigregister = _RMF_HDF5.StringDataSet3D_swigregister
StringDataSet3D_swigregister(StringDataSet3D)
class StringsDataSet1D(StringsDataSetAttributes1D):
"""Proxy of C++ RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,1)> class."""
__swig_setmethods__ = {}
for _s in [StringsDataSetAttributes1D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, StringsDataSet1D, name, value)
__swig_getmethods__ = {}
for _s in [StringsDataSetAttributes1D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, StringsDataSet1D, name)
def __init__(self):
"""__init__(RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,1)> self) -> StringsDataSet1D"""
this = _RMF_HDF5.new_StringsDataSet1D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set_value(self, ijk, value):
"""set_value(StringsDataSet1D self, DataSetIndex1D ijk, Strings value)"""
return _RMF_HDF5.StringsDataSet1D_set_value(self, ijk, value)
def __str__(self):
"""__str__(StringsDataSet1D self) -> std::string"""
return _RMF_HDF5.StringsDataSet1D___str__(self)
def __repr__(self):
"""__repr__(StringsDataSet1D self) -> std::string"""
return _RMF_HDF5.StringsDataSet1D___repr__(self)
def set_block(self, lb, size, value):
"""set_block(StringsDataSet1D self, DataSetIndex1D lb, DataSetIndex1D size, StringsList value)"""
return _RMF_HDF5.StringsDataSet1D_set_block(self, lb, size, value)
def set_size(self, ijk):
"""set_size(StringsDataSet1D self, DataSetIndex1D ijk)"""
return _RMF_HDF5.StringsDataSet1D_set_size(self, ijk)
__swig_destroy__ = _RMF_HDF5.delete_StringsDataSet1D
__del__ = lambda self: None
StringsDataSet1D_swigregister = _RMF_HDF5.StringsDataSet1D_swigregister
StringsDataSet1D_swigregister(StringsDataSet1D)
class StringsDataSet2D(StringsDataSetAttributes2D):
"""Proxy of C++ RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,2)> class."""
__swig_setmethods__ = {}
for _s in [StringsDataSetAttributes2D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, StringsDataSet2D, name, value)
__swig_getmethods__ = {}
for _s in [StringsDataSetAttributes2D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, StringsDataSet2D, name)
def __init__(self):
"""__init__(RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,2)> self) -> StringsDataSet2D"""
this = _RMF_HDF5.new_StringsDataSet2D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set_value(self, ijk, value):
"""set_value(StringsDataSet2D self, DataSetIndex2D ijk, Strings value)"""
return _RMF_HDF5.StringsDataSet2D_set_value(self, ijk, value)
def __str__(self):
"""__str__(StringsDataSet2D self) -> std::string"""
return _RMF_HDF5.StringsDataSet2D___str__(self)
def __repr__(self):
"""__repr__(StringsDataSet2D self) -> std::string"""
return _RMF_HDF5.StringsDataSet2D___repr__(self)
def set_block(self, lb, size, value):
"""set_block(StringsDataSet2D self, DataSetIndex2D lb, DataSetIndex2D size, StringsList value)"""
return _RMF_HDF5.StringsDataSet2D_set_block(self, lb, size, value)
def set_size(self, ijk):
"""set_size(StringsDataSet2D self, DataSetIndex2D ijk)"""
return _RMF_HDF5.StringsDataSet2D_set_size(self, ijk)
__swig_destroy__ = _RMF_HDF5.delete_StringsDataSet2D
__del__ = lambda self: None
StringsDataSet2D_swigregister = _RMF_HDF5.StringsDataSet2D_swigregister
StringsDataSet2D_swigregister(StringsDataSet2D)
class StringsDataSet3D(StringsDataSetAttributes3D):
"""Proxy of C++ RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,3)> class."""
__swig_setmethods__ = {}
for _s in [StringsDataSetAttributes3D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, StringsDataSet3D, name, value)
__swig_getmethods__ = {}
for _s in [StringsDataSetAttributes3D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, StringsDataSet3D, name)
def __init__(self):
"""__init__(RMF::HDF5::DataSetD<(RMF::HDF5::StringsTraits,3)> self) -> StringsDataSet3D"""
this = _RMF_HDF5.new_StringsDataSet3D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set_value(self, ijk, value):
"""set_value(StringsDataSet3D self, DataSetIndex3D ijk, Strings value)"""
return _RMF_HDF5.StringsDataSet3D_set_value(self, ijk, value)
def __str__(self):
"""__str__(StringsDataSet3D self) -> std::string"""
return _RMF_HDF5.StringsDataSet3D___str__(self)
def __repr__(self):
"""__repr__(StringsDataSet3D self) -> std::string"""
return _RMF_HDF5.StringsDataSet3D___repr__(self)
def set_block(self, lb, size, value):
"""set_block(StringsDataSet3D self, DataSetIndex3D lb, DataSetIndex3D size, StringsList value)"""
return _RMF_HDF5.StringsDataSet3D_set_block(self, lb, size, value)
def set_size(self, ijk):
"""set_size(StringsDataSet3D self, DataSetIndex3D ijk)"""
return _RMF_HDF5.StringsDataSet3D_set_size(self, ijk)
__swig_destroy__ = _RMF_HDF5.delete_StringsDataSet3D
__del__ = lambda self: None
StringsDataSet3D_swigregister = _RMF_HDF5.StringsDataSet3D_swigregister
StringsDataSet3D_swigregister(StringsDataSet3D)
class ConstGroup(_ConstAttributesObject):
"""Proxy of C++ RMF::HDF5::ConstGroup class."""
__swig_setmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ConstGroup, name, value)
__swig_getmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ConstGroup, name)
def __str__(self):
"""__str__(ConstGroup self) -> std::string"""
return _RMF_HDF5.ConstGroup___str__(self)
def __repr__(self):
"""__repr__(ConstGroup self) -> std::string"""
return _RMF_HDF5.ConstGroup___repr__(self)
def __init__(self, *args):
"""
__init__(RMF::HDF5::ConstGroup self) -> ConstGroup
__init__(RMF::HDF5::ConstGroup self, ConstGroup parent, std::string name) -> ConstGroup
"""
this = _RMF_HDF5.new_ConstGroup(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_child_int_data_set_1d(self, *args):
"""
get_child_int_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntTraits,1 > props) -> IntConstDataSet1D
get_child_int_data_set_1d(ConstGroup self, std::string name) -> IntConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_int_data_set_1d(self, *args)
def get_child_int_data_set_2d(self, *args):
"""
get_child_int_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntTraits,2 > props) -> IntConstDataSet2D
get_child_int_data_set_2d(ConstGroup self, std::string name) -> IntConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_int_data_set_2d(self, *args)
def get_child_int_data_set_3d(self, *args):
"""
get_child_int_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntTraits,3 > props) -> IntConstDataSet3D
get_child_int_data_set_3d(ConstGroup self, std::string name) -> IntConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_int_data_set_3d(self, *args)
def get_child_float_data_set_1d(self, *args):
"""
get_child_float_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatTraits,1 > props) -> FloatConstDataSet1D
get_child_float_data_set_1d(ConstGroup self, std::string name) -> FloatConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_float_data_set_1d(self, *args)
def get_child_float_data_set_2d(self, *args):
"""
get_child_float_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatTraits,2 > props) -> FloatConstDataSet2D
get_child_float_data_set_2d(ConstGroup self, std::string name) -> FloatConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_float_data_set_2d(self, *args)
def get_child_float_data_set_3d(self, *args):
"""
get_child_float_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatTraits,3 > props) -> FloatConstDataSet3D
get_child_float_data_set_3d(ConstGroup self, std::string name) -> FloatConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_float_data_set_3d(self, *args)
def get_child_index_data_set_1d(self, *args):
"""
get_child_index_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexTraits,1 > props) -> IndexConstDataSet1D
get_child_index_data_set_1d(ConstGroup self, std::string name) -> IndexConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_index_data_set_1d(self, *args)
def get_child_index_data_set_2d(self, *args):
"""
get_child_index_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexTraits,2 > props) -> IndexConstDataSet2D
get_child_index_data_set_2d(ConstGroup self, std::string name) -> IndexConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_index_data_set_2d(self, *args)
def get_child_index_data_set_3d(self, *args):
"""
get_child_index_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexTraits,3 > props) -> IndexConstDataSet3D
get_child_index_data_set_3d(ConstGroup self, std::string name) -> IndexConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_index_data_set_3d(self, *args)
def get_child_string_data_set_1d(self, *args):
"""
get_child_string_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringTraits,1 > props) -> StringConstDataSet1D
get_child_string_data_set_1d(ConstGroup self, std::string name) -> StringConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_string_data_set_1d(self, *args)
def get_child_string_data_set_2d(self, *args):
"""
get_child_string_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringTraits,2 > props) -> StringConstDataSet2D
get_child_string_data_set_2d(ConstGroup self, std::string name) -> StringConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_string_data_set_2d(self, *args)
def get_child_string_data_set_3d(self, *args):
"""
get_child_string_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringTraits,3 > props) -> StringConstDataSet3D
get_child_string_data_set_3d(ConstGroup self, std::string name) -> StringConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_string_data_set_3d(self, *args)
def get_child_strings_data_set_1d(self, *args):
"""
get_child_strings_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringsTraits,1 > props) -> StringsConstDataSet1D
get_child_strings_data_set_1d(ConstGroup self, std::string name) -> StringsConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_strings_data_set_1d(self, *args)
def get_child_strings_data_set_2d(self, *args):
"""
get_child_strings_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringsTraits,2 > props) -> StringsConstDataSet2D
get_child_strings_data_set_2d(ConstGroup self, std::string name) -> StringsConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_strings_data_set_2d(self, *args)
def get_child_strings_data_set_3d(self, *args):
"""
get_child_strings_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::StringsTraits,3 > props) -> StringsConstDataSet3D
get_child_strings_data_set_3d(ConstGroup self, std::string name) -> StringsConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_strings_data_set_3d(self, *args)
def get_child_floats_data_set_1d(self, *args):
"""
get_child_floats_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatsTraits,1 > props) -> FloatsConstDataSet1D
get_child_floats_data_set_1d(ConstGroup self, std::string name) -> FloatsConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_floats_data_set_1d(self, *args)
def get_child_floats_data_set_2d(self, *args):
"""
get_child_floats_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatsTraits,2 > props) -> FloatsConstDataSet2D
get_child_floats_data_set_2d(ConstGroup self, std::string name) -> FloatsConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_floats_data_set_2d(self, *args)
def get_child_floats_data_set_3d(self, *args):
"""
get_child_floats_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::FloatsTraits,3 > props) -> FloatsConstDataSet3D
get_child_floats_data_set_3d(ConstGroup self, std::string name) -> FloatsConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_floats_data_set_3d(self, *args)
def get_child_ints_data_set_1d(self, *args):
"""
get_child_ints_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntsTraits,1 > props) -> IntsConstDataSet1D
get_child_ints_data_set_1d(ConstGroup self, std::string name) -> IntsConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_ints_data_set_1d(self, *args)
def get_child_ints_data_set_2d(self, *args):
"""
get_child_ints_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntsTraits,2 > props) -> IntsConstDataSet2D
get_child_ints_data_set_2d(ConstGroup self, std::string name) -> IntsConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_ints_data_set_2d(self, *args)
def get_child_ints_data_set_3d(self, *args):
"""
get_child_ints_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IntsTraits,3 > props) -> IntsConstDataSet3D
get_child_ints_data_set_3d(ConstGroup self, std::string name) -> IntsConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_ints_data_set_3d(self, *args)
def get_child_indexes_data_set_1d(self, *args):
"""
get_child_indexes_data_set_1d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexesTraits,1 > props) -> IndexesConstDataSet1D
get_child_indexes_data_set_1d(ConstGroup self, std::string name) -> IndexesConstDataSet1D
"""
return _RMF_HDF5.ConstGroup_get_child_indexes_data_set_1d(self, *args)
def get_child_indexes_data_set_2d(self, *args):
"""
get_child_indexes_data_set_2d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexesTraits,2 > props) -> IndexesConstDataSet2D
get_child_indexes_data_set_2d(ConstGroup self, std::string name) -> IndexesConstDataSet2D
"""
return _RMF_HDF5.ConstGroup_get_child_indexes_data_set_2d(self, *args)
def get_child_indexes_data_set_3d(self, *args):
"""
get_child_indexes_data_set_3d(ConstGroup self, std::string name, RMF::HDF5::DataSetAccessPropertiesD< RMF::HDF5::IndexesTraits,3 > props) -> IndexesConstDataSet3D
get_child_indexes_data_set_3d(ConstGroup self, std::string name) -> IndexesConstDataSet3D
"""
return _RMF_HDF5.ConstGroup_get_child_indexes_data_set_3d(self, *args)
def get_number_of_children(self):
"""get_number_of_children(ConstGroup self) -> unsigned int"""
return _RMF_HDF5.ConstGroup_get_number_of_children(self)
def get_child_name(self, i):
"""get_child_name(ConstGroup self, unsigned int i) -> std::string"""
return _RMF_HDF5.ConstGroup_get_child_name(self, i)
def get_has_child(self, name):
"""get_has_child(ConstGroup self, std::string name) -> bool"""
return _RMF_HDF5.ConstGroup_get_has_child(self, name)
def get_child_is_data_set(self, i):
"""get_child_is_data_set(ConstGroup self, unsigned int i) -> bool"""
return _RMF_HDF5.ConstGroup_get_child_is_data_set(self, i)
def get_child_group(self, *args):
"""
get_child_group(ConstGroup self, unsigned int i) -> ConstGroup
get_child_group(ConstGroup self, std::string name) -> ConstGroup
"""
return _RMF_HDF5.ConstGroup_get_child_group(self, *args)
def get_child_is_group(self, *args):
"""
get_child_is_group(ConstGroup self, unsigned int i) -> bool
get_child_is_group(ConstGroup self, std::string name) -> bool
"""
return _RMF_HDF5.ConstGroup_get_child_is_group(self, *args)
__swig_destroy__ = _RMF_HDF5.delete_ConstGroup
__del__ = lambda self: None
ConstGroup_swigregister = _RMF_HDF5.ConstGroup_swigregister
ConstGroup_swigregister(ConstGroup)
class _HDF5MutableAttributesGroup(ConstGroup):
"""Proxy of C++ RMF::HDF5::MutableAttributes<(RMF::HDF5::ConstGroup)> class."""
__swig_setmethods__ = {}
for _s in [ConstGroup]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, _HDF5MutableAttributesGroup, name, value)
__swig_getmethods__ = {}
for _s in [ConstGroup]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, _HDF5MutableAttributesGroup, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_int_attribute(self, nm, value):
"""set_int_attribute(_HDF5MutableAttributesGroup self, std::string nm, Ints value)"""
return _RMF_HDF5._HDF5MutableAttributesGroup_set_int_attribute(self, nm, value)
def set_float_attribute(self, nm, value):
"""set_float_attribute(_HDF5MutableAttributesGroup self, std::string nm, Floats value)"""
return _RMF_HDF5._HDF5MutableAttributesGroup_set_float_attribute(self, nm, value)
def set_index_attribute(self, nm, value):
"""set_index_attribute(_HDF5MutableAttributesGroup self, std::string nm, Ints value)"""
return _RMF_HDF5._HDF5MutableAttributesGroup_set_index_attribute(self, nm, value)
def | |
<filename>peter_sslers/web/lib/form_utils.py
# pypi
import six
# local
from ...lib import db as lib_db
from ...lib import utils
from ...model import objects as model_objects
from ...model import utils as model_utils
from . import formhandling
# ==============================================================================
def decode_args(getcreate_args):
"""
support for Python2/3
"""
if six.PY3:
for (k, v) in list(getcreate_args.items()):
if isinstance(v, bytes):
getcreate_args[k] = v.decode("utf8")
return getcreate_args
# standardized mapping for `model_utils.DomainsChallenged` to a formStash
DOMAINS_CHALLENGED_FIELDS = {
"http-01": "domain_names_http01",
"dns-01": "domain_names_dns01",
}
class AcmeAccountUploadParser(object):
"""
An AcmeAccount may be uploaded multiple ways:
* a single PEM file
* an intra-associated three file triplet from a Certbot installation
This parser operates on a validated FormEncode results object (via `pyramid_formencode_classic`)
"""
# overwritten in __init__
getcreate_args = None
formStash = None
# tracked
acme_account_provider_id = None
account_key_pem = None
le_meta_jsons = None
le_pkey_jsons = None
le_reg_jsons = None
private_key_cycle_id = None
private_key_technology_id = None
upload_type = None # pem OR letsencrypt
def __init__(self, formStash):
self.formStash = formStash
self.getcreate_args = {}
def require_new(self, require_contact=None, require_technology=True):
"""
routine for creating a NEW AcmeAccount (peter_sslers generates the credentials)
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
:param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
formStash = self.formStash
acme_account_provider_id = formStash.results.get(
"acme_account_provider_id", None
)
if acme_account_provider_id is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id", message="No provider submitted."
)
private_key_cycle = formStash.results.get("account__private_key_cycle", None)
if private_key_cycle is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_cycle",
message="No PrivateKey cycle submitted.",
)
private_key_cycle_id = model_utils.PrivateKeyCycle.from_string(
private_key_cycle
)
private_key_technology_id = None
private_key_technology = formStash.results.get(
"account__private_key_technology", None
)
if private_key_technology:
private_key_technology_id = model_utils.KeyTechnology.from_string(
private_key_technology
)
if not private_key_technology_id and require_technology:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_technology",
message="No PrivateKey technology submitted.",
)
contact = formStash.results.get("account__contact", None)
if not contact and require_contact:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__contact",
message="`account__contact` is required.",
)
getcreate_args = {}
self.contact = getcreate_args["contact"] = contact
self.acme_account_provider_id = getcreate_args[
"acme_account_provider_id"
] = acme_account_provider_id
self.private_key_cycle_id = getcreate_args[
"private_key_cycle_id"
] = private_key_cycle_id
self.private_key_technology_id = getcreate_args[
"private_key_technology_id"
] = private_key_technology_id
self.getcreate_args = decode_args(getcreate_args)
def require_upload(self, require_contact=None, require_technology=None):
"""
routine for uploading an exiting AcmeAccount+AcmeAccountKey
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
:param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
formStash = self.formStash
# -------------------
# do a quick parse...
requirements_either_or = (
(
"account_key_file_pem",
# "acme_account_provider_id",
),
(
"account_key_file_le_meta",
"account_key_file_le_pkey",
"account_key_file_le_reg",
),
)
failures = []
passes = []
for idx, option_set in enumerate(requirements_either_or):
option_set_results = [
True if formStash.results[option_set_item] is not None else False
for option_set_item in option_set
]
# if we have any item, we need all of them
if any(option_set_results):
if not all(option_set_results):
failures.append(
"If any of %s is provided, all must be provided."
% str(option_set)
)
else:
passes.append(idx)
if (len(passes) != 1) or failures:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form(
"You must upload `account_key_file_pem` or all of (`account_key_file_le_meta`, `account_key_file_le_pkey`, `account_key_file_le_reg`)."
)
# -------------------
# validate the provider option
# will be None unless a pem is uploaded
# required for PEM, ignored otherwise
acme_account_provider_id = formStash.results.get(
"acme_account_provider_id", None
)
private_key_cycle = formStash.results.get("account__private_key_cycle", None)
if private_key_cycle is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_cycle",
message="No PrivateKey cycle submitted.",
)
private_key_cycle_id = model_utils.PrivateKeyCycle.from_string(
private_key_cycle
)
private_key_technology_id = None
private_key_technology = formStash.results.get(
"account__private_key_technology", None
)
if private_key_technology is not None:
private_key_technology_id = model_utils.KeyTechnology.from_string(
private_key_technology
)
if not private_key_technology_id and require_technology:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_technology",
message="No PrivateKey technology submitted.",
)
# require `contact` when uploading a PEM file
if formStash.results["account_key_file_pem"] is not None:
require_contact = True
contact = formStash.results.get("account__contact")
if not contact and require_contact:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__contact",
message="`account__contact` is required.",
)
getcreate_args = {}
self.contact = getcreate_args["contact"] = contact
self.private_key_cycle_id = getcreate_args[
"private_key_cycle_id"
] = private_key_cycle_id
self.private_key_technology_id = getcreate_args[
"private_key_technology_id"
] = private_key_technology_id
if formStash.results["account_key_file_pem"] is not None:
if acme_account_provider_id is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id", message="No provider submitted."
)
self.upload_type = "pem"
self.acme_account_provider_id = getcreate_args[
"acme_account_provider_id"
] = acme_account_provider_id
self.account_key_pem = getcreate_args[
"key_pem"
] = formhandling.slurp_file_field(formStash, "account_key_file_pem")
else:
# note that we use `jsonS` to indicate a string
self.le_meta_jsons = getcreate_args[
"le_meta_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_meta")
self.le_pkey_jsons = getcreate_args[
"le_pkey_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_pkey")
self.le_reg_jsons = getcreate_args[
"le_reg_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_reg")
self.getcreate_args = decode_args(getcreate_args)
class _PrivateKeyUploadParser(object):
"""
A PrivateKey is not a complex upload to parse itself
This code exists to mimic the AcmeAccount uploading.
"""
# overwritten in __init__
getcreate_args = None
formStash = None
# tracked
private_key_pem = None
upload_type = None # pem
def __init__(self, formStash):
self.formStash = formStash
self.getcreate_args = {}
def require_upload(self):
"""
routine for uploading an exiting PrivateKey
"""
formStash = self.formStash
getcreate_args = {}
if formStash.results["private_key_file_pem"] is not None:
self.upload_type = "pem"
self.private_key_pem = getcreate_args[
"key_pem"
] = formhandling.slurp_file_field(formStash, "private_key_file_pem")
self.getcreate_args = decode_args(getcreate_args)
class _AcmeAccountSelection(object):
"""
Class used to manage an uploaded AcmeAccount
"""
selection = None
upload_parsed = None # instance of AcmeAccountUploadParser or None
AcmeAccount = None
class _PrivateKeySelection(object):
selection = None
upload_parsed = None # instance of AcmeAccountUploadParser or None
private_key_strategy__requested = None
PrivateKey = None
@property
def private_key_strategy_id__requested(self):
return model_utils.PrivateKeyStrategy.from_string(
self.private_key_strategy__requested
)
def parse_AcmeAccountSelection(
request,
formStash,
account_key_option=None,
allow_none=None,
require_contact=None,
):
"""
:param formStash: an instance of `pyramid_formencode_classic.FormStash`
:param account_key_option:
:param allow_none:
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
account_key_pem = None
account_key_pem_md5 = None
dbAcmeAccount = None
is_global_default = None
# handle the explicit-option
acmeAccountSelection = _AcmeAccountSelection()
if account_key_option == "account_key_file":
# this will handle form validation and raise errors.
parser = AcmeAccountUploadParser(formStash)
# this will have: `contact`, `private_key_cycle`, `private_key_technology`
parser.require_upload(require_contact=require_contact)
# update our object
acmeAccountSelection.selection = "upload"
acmeAccountSelection.upload_parsed = parser
return acmeAccountSelection
else:
if account_key_option == "account_key_global_default":
acmeAccountSelection.selection = "global_default"
account_key_pem_md5 = formStash.results["account_key_global_default"]
is_global_default = True
elif account_key_option == "account_key_existing":
acmeAccountSelection.selection = "existing"
account_key_pem_md5 = formStash.results["account_key_existing"]
elif account_key_option == "account_key_reuse":
acmeAccountSelection.selection = "reuse"
account_key_pem_md5 = formStash.results["account_key_reuse"]
elif account_key_option == "none":
if not allow_none:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form(
"This form does not support no AcmeAccount selection."
)
# note the lowercase "none"; this is an explicit "no item" selection
# only certain routes allow this
acmeAccountSelection.selection = "none"
account_key_pem_md5 = None
return acmeAccountSelection
else:
formStash.fatal_form(
message="Invalid `account_key_option`",
)
if not account_key_pem_md5:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option, message="You did not provide a value"
)
dbAcmeAccount = lib_db.get.get__AcmeAccount__by_pemMd5(
request.api_context, account_key_pem_md5, is_active=True
)
if not dbAcmeAccount:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option,
message="The selected AcmeAccount is not enrolled in the system.",
)
if is_global_default and not dbAcmeAccount.is_global_default:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option,
message="The selected AcmeAccount is not the current default.",
)
acmeAccountSelection.AcmeAccount = dbAcmeAccount
return acmeAccountSelection
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("There was an error validating your form.")
def parse_PrivateKeySelection(request, formStash, private_key_option=None):
private_key_pem = None
private_key_pem_md5 = None
PrivateKey = None # :class:`model.objects.PrivateKey`
# handle the explicit-option
privateKeySelection = _PrivateKeySelection()
if private_key_option == "private_key_file":
# this will handle form validation and raise errors.
parser = _PrivateKeyUploadParser(formStash)
parser.require_upload()
# update our object
privateKeySelection.selection = "upload"
privateKeySelection.upload_parsed = parser
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["upload"]
)
return privateKeySelection
else:
if private_key_option == "private_key_existing":
privateKeySelection.selection = "existing"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["existing"]
)
private_key_pem_md5 = formStash.results["private_key_existing"]
elif private_key_option == "private_key_reuse":
privateKeySelection.selection = "reuse"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["reuse"]
)
private_key_pem_md5 = formStash.results["private_key_reuse"]
elif private_key_option in (
"private_key_generate",
"private_key_for_account_key",
):
dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0)
if not dbPrivateKey:
formStash.fatal_field(
field=private_key_option,
message="Could not load the placeholder PrivateKey.",
)
privateKeySelection.PrivateKey = dbPrivateKey
if private_key_option == "private_key_generate":
privateKeySelection.selection = "generate"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["generate"]
)
elif private_key_option == "private_key_for_account_key":
privateKeySelection.selection = "private_key_for_account_key"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy[
"private_key_for_account_key"
]
)
return privateKeySelection
else:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("Invalid `private_key_option`")
if not private_key_pem_md5:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=private_key_option, message="You did not provide a value"
)
dbPrivateKey = lib_db.get.get__PrivateKey__by_pemMd5(
request.api_context, private_key_pem_md5, is_active=True
)
if not dbPrivateKey:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=private_key_option,
message="The selected PrivateKey is not enrolled in the system.",
)
privateKeySelection.PrivateKey = dbPrivateKey
return privateKeySelection
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("There was an error validating | |
to
# verify the default
if logfile is not None:
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_log_files([logfile], self.config["user"])
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
logfile = self._default_logging_logfile_
cli_log_file_fmt = "cli_{0}_log_file_fmt".format(
self.get_prog_name().replace("-", "_")
)
if cli_log_file_fmt in self.config and not self.config.get(cli_log_file_fmt):
# Remove it from config so it inherits from log_fmt_logfile
self.config.pop(cli_log_file_fmt)
if self.config.get("log_fmt_logfile", None) is None:
# Remove it from config so it inherits from log_fmt_console
self.config.pop("log_fmt_logfile", None)
log_file_fmt = self.config.get(
"log_fmt_logfile",
self.config.get(
"log_fmt_console",
self.config.get("log_fmt", config._DFLT_LOG_FMT_CONSOLE),
),
)
if self.config.get("log_datefmt_logfile", None) is None:
# Remove it from config so it inherits from log_datefmt_console
self.config.pop("log_datefmt_logfile", None)
if self.config.get("log_datefmt_console", None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop("log_datefmt_console", None)
log_file_datefmt = self.config.get(
"log_datefmt_logfile",
self.config.get(
"log_datefmt_console",
self.config.get("log_datefmt", "%Y-%m-%d %H:%M:%S"),
),
)
if not is_writeable(logfile, check_parent=True):
# Since we're not be able to write to the log file or its parent
# directory (if the log file does not exit), are we the same user
# as the one defined in the configuration file?
current_user = salt.utils.user.get_user()
if self.config["user"] != current_user:
# Yep, not the same user!
# Is the current user in ACL?
acl = self.config["publisher_acl"]
if salt.utils.stringutils.check_whitelist_blacklist(
current_user, whitelist=six.iterkeys(acl)
):
# Yep, the user is in ACL!
# Let's write the logfile to its home directory instead.
xdg_dir = salt.utils.xdg.xdg_config_dir()
user_salt_dir = (
xdg_dir
if os.path.isdir(xdg_dir)
else os.path.expanduser("~/.salt")
)
if not os.path.isdir(user_salt_dir):
os.makedirs(user_salt_dir, 0o750)
logfile_basename = os.path.basename(self._default_logging_logfile_)
logger.debug(
"The user '%s' is not allowed to write to '%s'. "
"The log file will be stored in '~/.salt/'%s'.log'",
six.text_type(current_user),
six.text_type(logfile),
six.text_type(logfile_basename),
)
logfile = os.path.join(
user_salt_dir, "{0}.log".format(logfile_basename)
)
# If we haven't changed the logfile path and it's not writeable,
# salt will fail once we try to setup the logfile logging.
# Log rotate options
log_rotate_max_bytes = self.config.get("log_rotate_max_bytes", 0)
log_rotate_backup_count = self.config.get("log_rotate_backup_count", 0)
if not salt.utils.platform.is_windows():
# Not supported on platforms other than Windows.
# Other platforms may use an external tool such as 'logrotate'
if log_rotate_max_bytes != 0:
logger.warning("'log_rotate_max_bytes' is only supported on Windows")
log_rotate_max_bytes = 0
if log_rotate_backup_count != 0:
logger.warning("'log_rotate_backup_count' is only supported on Windows")
log_rotate_backup_count = 0
# Save the settings back to the configuration
self.config[self._logfile_config_setting_name_] = logfile
self.config[self._logfile_loglevel_config_setting_name_] = loglevel
self.config["log_fmt_logfile"] = log_file_fmt
self.config["log_datefmt_logfile"] = log_file_datefmt
self.config["log_rotate_max_bytes"] = log_rotate_max_bytes
self.config["log_rotate_backup_count"] = log_rotate_backup_count
def setup_logfile_logger(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all log file logging
# will go through the logging listener.
return
logfile = self.config[self._logfile_config_setting_name_]
loglevel = self.config[self._logfile_loglevel_config_setting_name_]
log_file_fmt = self.config["log_fmt_logfile"]
log_file_datefmt = self.config["log_datefmt_logfile"]
log_rotate_max_bytes = self.config["log_rotate_max_bytes"]
log_rotate_backup_count = self.config["log_rotate_backup_count"]
log.setup_logfile_logger(
logfile,
loglevel,
log_format=log_file_fmt,
date_format=log_file_datefmt,
max_bytes=log_rotate_max_bytes,
backup_count=log_rotate_backup_count,
)
for name, level in six.iteritems(self.config.get("log_granular_levels", {})):
log.set_logger_level(name, level)
def __setup_extended_logging(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all extended logging
# will go through the logging listener.
return
log.setup_extended_logging(self.config)
def _get_mp_logging_listener_queue(self):
return log.get_multiprocessing_logging_queue()
def _setup_mp_logging_listener(self):
if self._setup_mp_logging_listener_:
log.setup_multiprocessing_logging_listener(
self.config, self._get_mp_logging_listener_queue()
)
def _setup_mp_logging_client(self):
if self._setup_mp_logging_listener_:
# Set multiprocessing logging level even in non-Windows
# environments. In non-Windows environments, this setting will
# propogate from process to process via fork behavior and will be
# used by child processes if they invoke the multiprocessing
# logging client.
log.set_multiprocessing_logging_level_by_opts(self.config)
if salt.utils.platform.is_windows():
# On Windows, all logging including console and
# log file logging will go through the multiprocessing
# logging listener if it exists.
# This will allow log file rotation on Windows
# since only one process can own the log file
# for log file rotation to work.
log.setup_multiprocessing_logging(self._get_mp_logging_listener_queue())
# Remove the temp logger and any other configured loggers since
# all of our logging is going through the multiprocessing
# logging listener.
log.shutdown_temp_logging()
log.shutdown_console_logging()
log.shutdown_logfile_logging()
def __setup_console_logger_config(self):
# Since we're not going to be a daemon, setup the console logger
logfmt = self.config.get(
"log_fmt_console", self.config.get("log_fmt", config._DFLT_LOG_FMT_CONSOLE)
)
if self.config.get("log_datefmt_console", None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop("log_datefmt_console", None)
datefmt = self.config.get(
"log_datefmt_console", self.config.get("log_datefmt", "%Y-%m-%d %H:%M:%S")
)
# Save the settings back to the configuration
self.config["log_fmt_console"] = logfmt
self.config["log_datefmt_console"] = datefmt
def __setup_console_logger(self):
# If daemon is set force console logger to quiet
if getattr(self.options, "daemon", False) is True:
return
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all console logging
# will go through the logging listener.
return
# ensure that yaml stays valid with log output
if getattr(self.options, "output", None) == "yaml":
log_format = "# {0}".format(self.config["log_fmt_console"])
else:
log_format = self.config["log_fmt_console"]
log.setup_console_logger(
self.config["log_level"],
log_format=log_format,
date_format=self.config["log_datefmt_console"],
)
for name, level in six.iteritems(self.config.get("log_granular_levels", {})):
log.set_logger_level(name, level)
class RunUserMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
def _mixin_setup(self):
self.add_option(
"-u", "--user", help="Specify user to run {0}.".format(self.get_prog_name())
)
class DaemonMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
self.add_option(
"-d",
"--daemon",
default=False,
action="store_true",
help="Run the {0} as a daemon.".format(self.get_prog_name()),
)
self.add_option(
"--pid-file",
dest="pidfile",
default=os.path.join(
syspaths.PIDFILE_DIR, "{0}.pid".format(self.get_prog_name())
),
help="Specify the location of the pidfile. Default: '%default'.",
)
def _mixin_before_exit(self):
if hasattr(self, "config") and self.config.get("pidfile"):
# We've loaded and merged options into the configuration, it's safe
# to query about the pidfile
if self.check_pidfile():
try:
os.unlink(self.config["pidfile"])
except OSError as err:
# Log error only when running salt-master as a root user.
# Otherwise this can be ignored, since salt-master is able to
# overwrite the PIDfile on the next start.
err_msg = (
"PIDfile could not be deleted: %s",
six.text_type(self.config["pidfile"]),
)
if salt.utils.platform.is_windows():
user = salt.utils.win_functions.get_current_user()
if salt.utils.win_functions.is_admin(user):
logger.info(*err_msg)
logger.debug(six.text_type(err))
else:
if not os.getuid():
logger.info(*err_msg)
logger.debug(six.text_type(err))
def set_pidfile(self):
from salt.utils.process import set_pidfile
set_pidfile(self.config["pidfile"], self.config["user"])
def check_pidfile(self):
"""
Report whether a pidfile exists
"""
from salt.utils.process import check_pidfile
return check_pidfile(self.config["pidfile"])
def get_pidfile(self):
"""
Return a pid contained in a pidfile
"""
from salt.utils.process import get_pidfile
return get_pidfile(self.config["pidfile"])
def daemonize_if_required(self):
if self.options.daemon:
if self._setup_mp_logging_listener_ is True:
# Stop the logging queue listener for the current process
# We'll restart it once forked
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Late import so logging works correctly
salt.utils.process.daemonize()
# Setup the multiprocessing log queue listener if enabled
self._setup_mp_logging_listener()
def check_running(self):
"""
Check if a pid file exists and if it is associated with
a running process.
"""
if self.check_pidfile():
pid = self.get_pidfile()
if not salt.utils.platform.is_windows():
if (
self.check_pidfile()
and self.is_daemonized(pid)
and os.getppid() != pid
):
return True
else:
# We have no os.getppid() on Windows. Use salt.utils.win_functions.get_parent_pid
if (
self.check_pidfile()
and self.is_daemonized(pid)
and salt.utils.win_functions.get_parent_pid() != pid
):
return True
return False
def is_daemonized(self, pid):
from salt.utils.process import os_is_running
return os_is_running(pid)
# Common methods for scripts which can daemonize
def _install_signal_handlers(self):
signal.signal(signal.SIGTERM, self._handle_signals)
signal.signal(signal.SIGINT, self._handle_signals)
def prepare(self):
self.parse_args()
def start(self):
self.prepare()
self._install_signal_handlers()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
msg = self.__class__.__name__
if signum == signal.SIGINT:
msg += " received a SIGINT."
elif signum == signal.SIGTERM:
msg += " received a SIGTERM."
logging.getLogger(__name__).warning("%s Exiting.", msg)
self.shutdown(exitmsg="{0} Exited.".format(msg))
def shutdown(self, exitcode=0, exitmsg=None):
self.exit(exitcode, exitmsg)
class TargetOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
selected_target_option = None
def _mixin_setup(self):
group = self.target_options_group = optparse.OptionGroup(
self, "Target Options", "Target selection options."
)
self.add_option_group(group)
group.add_option(
"-H",
"--hosts",
default=False,
action="store_true",
dest="list_hosts",
help="List all known hosts to currently visible or other specified rosters",
)
group.add_option(
"-E",
"--pcre",
default=False,
action="store_true",
help=(
"Instead of using shell globs to evaluate the target "
"servers, use pcre regular expressions."
),
)
group.add_option(
"-L",
"--list",
default=False,
action="store_true",
help=(
"Instead of using shell globs to evaluate the target "
"servers, take a comma or whitespace delimited list of "
"servers."
),
)
group.add_option(
"-G",
"--grain",
default=False,
action="store_true",
help=(
"Instead of using shell globs to evaluate the target "
"use a grain value to identify targets, the syntax "
"for the target is the grain key followed by a glob"
'expression: "os:Arch*".'
),
)
group.add_option(
"-P",
"--grain-pcre",
default=False,
action="store_true",
help=(
"Instead of using shell globs to evaluate the target "
"use a grain value to identify targets, the syntax "
"for the target is the grain key followed by a pcre "
'regular expression: "os:Arch.*".'
),
)
group.add_option(
"-N",
"--nodegroup",
| |
############################################################################
# Copyright 2021 Plataux LLC #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# https://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
# https://datatracker.ietf.org/doc/html/rfc7518#section-3.1
from __future__ import annotations
import enum
import random
from typing import Dict, Any, Union, Optional, Tuple
from math import ceil
import os
import secrets
from random import choice
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.ec import SECP256R1, SECP384R1, SECP521R1
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives.ciphers.modes import GCM
from cryptography.hazmat.primitives import serialization as ser
from cryptography.hazmat.primitives.keywrap import aes_key_wrap, aes_key_unwrap
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.constant_time import bytes_eq
import webcrypt.convert as conv
import webcrypt.exceptions as tex
from uuid import uuid4
from pydantic import BaseModel, validator
import zlib
jwe_kty = Union[ec.EllipticCurvePrivateKey,
rsa.RSAPrivateKey, rsa.RSAPublicKey, bytes, str]
class JWE_Header(BaseModel):
"""
Pydantic Model to store, validate and serialize JWE during encryption and decryption
operations
"""
class EPK(BaseModel):
"""
Pydantic Model to Validate and Serialize epk data (Ephemeral Public Key) during
ECDH-ES Key agreement process.
:cvar kty: is the key type and is always set to "EC" in this case.
"""
kty: str = "EC"
crv: str
x: str
y: str
@validator('crv')
def _val_crv(cls, crv):
if crv not in ('P-256', 'P-384', 'P-521'):
raise ValueError("Invalid EC curve for the JOSE spec")
return crv
@validator('kty')
def _val_kty(cls, kty):
if kty != 'EC':
raise ValueError(f"Invalid kty for ECDH: {kty}")
return kty
alg: Optional[str]
enc: Optional[str]
kid: Optional[str]
zip: Optional[str]
iv: Optional[str]
tag: Optional[str]
apu: Optional[str]
apv: Optional[str]
p2s: Optional[str]
p2c: Optional[int]
epk: Optional["JWE_Header.EPK"]
@validator('alg')
def _val_alg(cls, alg):
if alg not in ['RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256', 'dir',
'A128KW', 'A192KW', 'A256KW', 'A128GCMKW', 'A192GCMKW', 'A256GCMKW',
'ECDH-ES', 'ECDH-ES+A128KW', 'ECDH-ES+A192KW', 'ECDH-ES+A256KW',
"PBES2-HS256+A128KW", "PBES2-HS384+A192KW", "PBES2-HS512+A256KW"]:
raise ValueError("Invalid Algorithm")
return alg
@validator('enc')
def _val_enc(cls, enc):
if enc not in ('A128GCM', 'A192GCM', 'A256GCM',
"A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"):
raise ValueError("Invalid JWE Encryption Algorithm")
return enc
JWE_Header.update_forward_refs()
class JWE:
class Algorithm(enum.Enum):
# Direct Encryption
DIR = "dir"
# wrapping a cek with a 128, 192, 256-bit key. No additional JWT Headers
A128KW = "A128KW"
A192KW = "A192KW"
A256KW = "A256KW"
# wrapping the cek with 128, 192, 256-bit key, adding the "iv" and "tag" JWT Headers
A128GCMKW = "A128GCMKW"
A192GCMKW = "A192GCMKW"
A256GCMKW = "A256GCMKW"
# Password Based Encryption
PBES2_HS256_A128KW = "PBES2-HS256+A128KW"
PBES2_HS384_A192KW = "PBES2-HS384+A192KW"
PBES2_HS512_A256KW = "PBES2-HS512+A256KW"
# RSA Key Wrapping of cek
RSA1_5 = 'RSA1_5'
RSA_OAEP = 'RSA-OAEP'
RSA_OAEP_256 = 'RSA-OAEP-256'
# ECDH Ephemeral Static Key Derivation between two parties
ECDH_ES = "ECDH-ES"
# ECDH-ES with key wrapping
ECDH_ES_A128KW = "ECDH-ES+A128KW"
ECDH_ES_A192KW = "ECDH-ES+A192KW"
ECDH_ES_A256KW = "ECDH-ES+A256KW"
class Encryption(enum.Enum):
A128GCM = 'A128GCM'
A192GCM = 'A192GCM'
A256GCM = 'A256GCM'
A128CBC_HS256 = "A128CBC-HS256"
A192CBC_HS384 = "A192CBC-HS384"
A256CBC_HS512 = "A256CBC-HS512"
_alg_map = {v.value: v for v in list(Algorithm)}
_enc_map = {v.value: v for v in list(Encryption)}
@staticmethod
def gcm_encrypt(key: bytes, auth_data: bytes,
plaintext: bytes) -> Tuple[bytes, bytes, bytes]:
"""
Implementation according to the spec at:
https://datatracker.ietf.org/doc/html/rfc7518#section-5.3
:param key: 128, 192 or 256-bit key in byte string form
:param auth_data: Authenticated Data in byte string form
:param plaintext: The data to be encrypted in byte string form
:return: A tuple of the iv, ciphertext, tag all in byte form
"""
# Use of an IV of size 96 bits is REQUIRED with this algorithm.
iv = os.urandom(12)
# Construct an AES-GCM Cipher object with the given key and a
# randomly generated IV.
encryptor = Cipher(
AES(key),
GCM(iv),
).encryptor()
# associated_data will be authenticated but not encrypted,
# it must also be passed in on decryption.
encryptor.authenticate_additional_data(auth_data)
# Encrypt the plaintext and get the associated ciphertext.
# GCM does not require padding.
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
# The requested size of the Authentication Tag output MUST be 128 bits,
# regardless of the key size.
assert len(encryptor.tag) == 16
return iv, ciphertext, encryptor.tag
@staticmethod
def gcm_decrypt(key: bytes, auth_data: bytes,
iv: bytes, ciphertext: bytes, tag: bytes) -> bytes:
"""
Implementation according to the spec at:
https://datatracker.ietf.org/doc/html/rfc7518#section-5.3
:param key: 128, 192 or 256 AES key in byte string
:param auth_data: Authenticated data in byte string
:param iv: Initialization Vector - expecting 96 bits length
:param ciphertext: encrypted data in bytes
:param tag: 128 bit tag
:return: plaintext if decryption is successful
:raises InvalidToken: if the any of the inputs is invalid, corrupted or tampered with
"""
try:
# Construct a Cipher object, with the key, iv, and additionally the
# GCM tag used for authenticating the message.
decryptor = Cipher(
AES(key),
GCM(iv, tag),
).decryptor()
# We put associated_data back in or the tag will fail to verify
# when we finalize the decryptor.
decryptor.authenticate_additional_data(auth_data)
# Decryption gets us the authenticated plaintext.
# If the tag does not match an InvalidTag exception will be raised.
ct: bytes = decryptor.update(ciphertext) + decryptor.finalize()
except Exception as ex:
raise tex.InvalidToken(f"Could not decrypt token, corrupted or tampered with: {ex}")
return ct
@staticmethod
def cbc_encrypt(comp_key: bytes, auth_data: bytes,
plaintext: bytes) -> Tuple[bytes, bytes, bytes]:
"""
Implemented according to the spec at:
https://datatracker.ietf.org/doc/html/rfc7518#section-5.2.2.1
:param comp_key: Composite Key: the 1st half for HMAC Authentication,
and the 2nd for Content Encryption
:param auth_data: Authenticated Data in bytes
:param plaintext: data to be encrypted in bytes
:return: a tuple of iv, ciphertext, tag all in bytes format
"""
if len(comp_key) not in (32, 48, 64):
raise ValueError("CBC key must be in 32, 48, 64 bytes long")
key_len = len(comp_key) // 2
hmac_key = comp_key[:key_len]
enc_key = comp_key[-key_len:]
if key_len == 16:
hash_alg: hashes.HashAlgorithm = hashes.SHA256()
elif key_len == 24:
hash_alg = hashes.SHA384()
elif key_len == 32:
hash_alg = hashes.SHA512()
else:
raise RuntimeError("unexpected key_len value")
# The IV used is a 128-bit value generated randomly or
# pseudo-randomly for use in the cipher.
iv = os.urandom(16)
cipher = Cipher(algorithms.AES(enc_key), modes.CBC(iv))
encryptor = cipher.encryptor()
padder = PKCS7(algorithms.AES.block_size).padder()
padded_data = padder.update(plaintext)
padded_data += padder.finalize()
ciphertext = encryptor.update(padded_data) + encryptor.finalize()
# The octet string AL is equal to the number of bits in the
# Additional Authenticated Data A expressed as a 64-bit unsigned big-endian integer
al = conv.int_to_bytes(len(auth_data) * 8, order='big', byte_size=8)
hmac_signer = hmac.HMAC(hmac_key, hash_alg)
hmac_signer.update(auth_data + iv + ciphertext + al)
tag = hmac_signer.finalize()[:key_len]
return iv, ciphertext, tag
@staticmethod
def cbc_decrypt(comp_key: bytes, auth_data: bytes,
iv: bytes, ciphertext: bytes, tag: bytes) -> bytes:
"""
Implemented according to the spec at:
https://datatracker.ietf.org/doc/html/rfc7518#section-5.2.2.2
:param comp_key: Composite Key: 1st half for HMAC, and 2nd for Content Decryption
:param auth_data: Authenticated Data in bytes
:param iv: Initialization Vector in bytes - expecting 128 bit iv
:param ciphertext: Ciphertext in bytes
:param tag: Auth tag in bytes
:return: decrypted plaintext
:raises InvalidSignature: If the tag is invalid
:raises InvalidToken: If any of the inputs is invalid, corrupted or tampered with in any way
"""
if len(comp_key) not in (32, 48, 64):
raise ValueError("CBC key must be in 32, 36, 64 bytes long")
key_len = len(comp_key) // 2
hmac_key = comp_key[:key_len]
enc_key = comp_key[-key_len:]
if key_len == 16:
hash_alg: hashes.HashAlgorithm = hashes.SHA256()
elif key_len == 24:
hash_alg = hashes.SHA384()
elif key_len == 32:
hash_alg = hashes.SHA512()
else:
raise RuntimeError("unexpected key_len value")
# The octet string AL is equal to the number of bits in the
# Additional Authenticated Data A expressed as a 64-bit unsigned big-endian integer
al = conv.int_to_bytes(len(auth_data) * 8, order='big', byte_size=8)
hmac_signer = hmac.HMAC(hmac_key, hash_alg)
hmac_signer.update(auth_data + iv + ciphertext + al)
sig = hmac_signer.finalize()
if not bytes_eq(sig[:key_len], tag):
raise tex.InvalidSignature("Tag invalid - Token Fabricated or Tampered With")
try:
cipher = Cipher(algorithms.AES(enc_key), modes.CBC(iv))
decryptor = cipher.decryptor()
padded_plain_text = decryptor.update(ciphertext)
padded_plain_text += decryptor.finalize()
de_padder = PKCS7(algorithms.AES.block_size).unpadder()
plaintext: bytes = de_padder.update(padded_plain_text)
| |
<gh_stars>100-1000
"""
grdtrack - Sample grids at specified (x,y) locations.
"""
import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
__doctest_skip__ = ["grdtrack"]
@fmt_docstring
@use_alias(
A="resample",
C="crossprofile",
D="dfile",
E="profile",
F="critical",
R="region",
N="no_skip",
S="stack",
T="radius",
V="verbose",
Z="z_only",
a="aspatial",
b="binary",
d="nodata",
e="find",
f="coltypes",
g="gap",
h="header",
i="incols",
j="distcalc",
n="interpolation",
o="outcols",
s="skiprows",
w="wrap",
)
@kwargs_to_strings(R="sequence", S="sequence", i="sequence_comma", o="sequence_comma")
def grdtrack(points, grid, newcolname=None, outfile=None, **kwargs):
r"""
Sample grids at specified (x,y) locations.
Reads one or more grid files and a table (from file or an array input; but
see ``profile`` for exception) with (x,y) [or (lon,lat)] positions in the
first two columns (more columns may be present). It interpolates the
grid(s) at the positions in the table and writes out the table with the
interpolated values added as (one or more) new columns. Alternatively
(``crossprofile``), the input is considered to be line-segments and we
create orthogonal cross-profiles at each data point or with an equidistant
separation and sample the grid(s) along these profiles. A bicubic
[Default], bilinear, B-spline or nearest-neighbor interpolation is used,
requiring boundary conditions at the limits of the region (see
``interpolation``; Default uses "natural" conditions (second partial
derivative normal to edge is zero) unless the grid is automatically
recognized as periodic.)
Full option list at :gmt-docs:`grdtrack.html`
{aliases}
Parameters
----------
points : str or {table-like}
Pass in either a file name to an ASCII data table, a 2D
{table-classes}.
grid : xarray.DataArray or str
Gridded array from which to sample values from, or a filename (netcdf
format).
newcolname : str
Required if ``points`` is a :class:`pandas.DataFrame`. The name for the
new column in the track :class:`pandas.DataFrame` table where the
sampled values will be placed.
outfile : str
The file name for the output ASCII file.
resample : str
**f**\|\ **p**\|\ **m**\|\ **r**\|\ **R**\ [**+l**]
For track resampling (if ``crossprofile`` or ``profile`` are set) we
can select how this is to be performed. Append **f** to keep original
points, but add intermediate points if needed [Default], **m** as
**f**, but first follow meridian (along y) then parallel (along x),
**p** as **f**, but first follow parallel (along y) then meridian
(along x), **r** to resample at equidistant locations; input points are
not necessarily included in the output, and **R** as **r**, but adjust
given spacing to fit the track length exactly. Finally, append
**+l** if geographic distances should be measured along rhumb lines
(loxodromes) instead of great circles. Ignored unless ``crossprofile``
is used.
crossprofile : str
*length*/\ *ds*\ [*/spacing*][**+a**\|\ **+v**][**l**\|\ **r**].
Use input line segments to create an equidistant and (optionally)
equally-spaced set of crossing profiles along which we sample the
grid(s) [Default simply samples the grid(s) at the input locations].
Specify two length scales that control how the sampling is done:
*length* sets the full length of each cross-profile, while *ds* is
the sampling spacing along each cross-profile. Optionally, append
**/**\ *spacing* for an equidistant spacing between cross-profiles
[Default erects cross-profiles at the input coordinates]; see
``resample`` for how resampling the input track is controlled. By
default, all cross-profiles have the same direction (left to right
as we look in the direction of the input line segment). Append **+a**
to alternate the direction of cross-profiles, or **v** to enforce
either a "west-to-east" or "south-to-north" view. By default the entire
profiles are output. Choose to only output the left or right halves
of the profiles by appending **+l** or **+r**, respectively. Append
suitable units to *length*; it sets the unit used for *ds* [and
*spacing*] (See :gmt-docs:`Units <grdtrack.html#units>`). The default
unit for geographic grids is meter while Cartesian grids implies the
user unit. The output columns will be *lon*, *lat*, *dist*, *azimuth*,
*z1*, *z2*, ..., *zn* (The *zi* are the sampled values for each of the
*n* grids).
dfile : str
In concert with ``crossprofile`` we can save the (possibly resampled)
original lines to *dfile* [Default only saves the cross-profiles]. The
columns will be *lon*, *lat*, *dist*, *azimuth*, *z1*, *z2*, ...
(sampled value for each grid).
profile : str
*line*\ [,\ *line*,...][**+a**\ *az*][**+c**][**+d**][**+g**]\
[**+i**\ *inc*][**+l**\ *length*][**+n**\ *np*][**+o**\ *az*]\
[**+r**\ *radius*].
Instead of reading input track coordinates, specify profiles via
coordinates and modifiers. The format of each *line* is
*start*/*stop*, where *start* or *stop* are either *lon*/*lat* (*x*/*y*
for Cartesian data) or a 2-character XY key that uses the
:gmt-docs:`text <text.html>`-style justification format to specify
a point on the map as [LCR][BMT]. Each line will be a separate segment
unless **+c** is used which will connect segments with shared joints
into a single segment. In addition to line coordinates, you can use Z-,
Z+ to mean the global minimum and maximum locations in the grid (only
available if a single grid is given via **outfile**). You may append
**+i**\ *inc* to set the sampling interval; if not given then we
default to half the minimum grid interval. For a *line* along parallels
or meridians you can add **+g** to report degrees of longitude or
latitude instead of great circle distances starting at zero. Instead of
two coordinates you can specify an origin and one of **+a**, **+o**, or
**+r**. The **+a** sets the azimuth of a profile of given length
starting at the given origin, while **+o** centers the profile on the
origin; both require **+l**. For circular sampling specify **+r** to
define a circle of given radius centered on the origin; this option
requires either **+n** or **+i**. The **+n**\ *np* modifier sets the
desired number of points, while **+l**\ *length* gives the total length
of the profile. Append **+d** to output the along-track distances after
the coordinates. **Note**: No track file will be read. Also note that
only one distance unit can be chosen. Giving different units will
result in an error. If no units are specified we default to great
circle distances in km (if geographic). If working with geographic data
you can use ``distcalc`` to control distance calculation mode [Default
is Great Circle]. **Note**: If ``crossprofile`` is set and *spacing* is
given then that sampling scheme overrules any modifier set in
``profile``.
critical : str
[**+b**][**+n**][**+r**][**+z**\ *z0*].
Find critical points along each cross-profile as a function of
along-track distance. Requires ``crossprofile`` and a single input grid
(*z*). We examine each cross-profile generated and report (*dist*,
*lonc*, *latc*, *distc*, *azimuthc*, *zc*) at the center peak of
maximum *z* value, (*lonl*, *latl*, *distl*) and (*lonr*, *latr*,
*distr*) at the first and last non-NaN point whose *z*-value exceeds
*z0*, respectively, and the *width* based on the two extreme points
found. Here, *dist* is the distance along the original input
``points`` and the other 12 output columns are a function of that
distance. When searching for the center peak and the extreme first and
last values that exceed the threshold we assume the profile is positive
up. If we instead are looking for a trough then you must use **+n** to
temporarily flip the profile to positive. The threshold *z0* value is
always given as >= 0; use **+z** to change it [Default is 0].
Alternatively, use **+b** to determine the balance point and standard
deviation of the profile; this is the weighted mean and weighted
standard deviation of the distances, with *z* acting as the weight.
Finally, use **+r** to obtain the weighted rms about the cross-track
center (*distc* == 0). **Note**: We round the exact results to the
nearest distance nodes along the cross-profiles. We write 13 output
columns per track: *dist, lonc, latc, distc, azimuthc, zc, lonl, latl,
distl, lonr, latr, distr, width*.
{R}
no_skip : bool
Do *not* skip points that fall outside the domain of the grid(s)
[Default only output points within grid domain].
stack : str or list
*method*/*modifiers*.
In conjunction with ``crossprofile``, compute a single stacked profile
from all profiles across each segment. Choose how stacking should be
computed | |
<reponame>thomasbuttler/leo-editor
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20210902073413.1: * @file ../unittests/core/test_leoAst.py
#@@first
"""Tests of leoAst.py"""
#@+<< leoAst imports >>
#@+node:ekr.20210902074548.1: ** << leoAst imports >>
import ast
import os
import sys
import textwrap
import time
import token as token_module
from typing import Any, Dict, List
import unittest
import warnings
warnings.simplefilter("ignore")
# pylint: disable=import-error
# Third-party.
try:
import asttokens
except Exception:
asttokens = None
try:
# Suppress a warning about imp being deprecated.
with warnings.catch_warnings():
import black
except Exception:
black = None
# pylint: disable=wrong-import-position
from leo.core import leoGlobals as g
from leo.core.leoAst import AstNotEqual
from leo.core.leoAst import Fstringify, Orange
from leo.core.leoAst import Token, TokenOrderGenerator, TokenOrderTraverser
from leo.core.leoAst import get_encoding_directive, read_file, strip_BOM
from leo.core.leoAst import make_tokens, parse_ast, tokens_to_string
from leo.core.leoAst import dump_ast, dump_contents, dump_tokens, dump_tree, _op_names
#@-<< leoAst imports >>
v1, v2, junk1, junk2, junk3 = sys.version_info
py_version = (v1, v2)
#@+others
#@+node:ekr.20200107114620.1: ** functions: unit testing
#@+node:ekr.20191027072126.1: *3* function: compare_asts & helpers
def compare_asts(ast1, ast2):
"""Compare two ast trees. Return True if they are equal."""
# Compare the two parse trees.
try:
_compare_asts(ast1, ast2)
except AstNotEqual:
dump_ast(ast1, tag='AST BEFORE')
dump_ast(ast2, tag='AST AFTER')
return False
except Exception:
g.trace("Unexpected exception")
g.es_exception()
return False
return True
#@+node:ekr.20191027071653.2: *4* function._compare_asts
def _compare_asts(node1, node2):
"""
Compare both nodes, and recursively compare their children.
See also: http://stackoverflow.com/questions/3312989/
"""
# Compare the nodes themselves.
_compare_nodes(node1, node2)
# Get the list of fields.
fields1 = getattr(node1, "_fields", []) # type:ignore
fields2 = getattr(node2, "_fields", []) # type:ignore
if fields1 != fields2:
raise AstNotEqual(
f"node1._fields: {fields1}\n" f"node2._fields: {fields2}")
# Recursively compare each field.
for field in fields1:
if field not in ('lineno', 'col_offset', 'ctx'):
attr1 = getattr(node1, field, None)
attr2 = getattr(node2, field, None)
if attr1.__class__.__name__ != attr2.__class__.__name__:
raise AstNotEqual(f"attrs1: {attr1},\n" f"attrs2: {attr2}")
_compare_asts(attr1, attr2)
#@+node:ekr.20191027071653.3: *4* function._compare_nodes
def _compare_nodes(node1, node2):
"""
Compare node1 and node2.
For lists and tuples, compare elements recursively.
Raise AstNotEqual if not equal.
"""
# Class names must always match.
if node1.__class__.__name__ != node2.__class__.__name__:
raise AstNotEqual(
f"node1.__class__.__name__: {node1.__class__.__name__}\n"
f"node2.__class__.__name__: {node2.__class__.__name_}"
)
# Special cases for strings and None
if node1 is None:
return
if isinstance(node1, str):
if node1 != node2:
raise AstNotEqual(f"node1: {node1!r}\n" f"node2: {node2!r}")
# Special cases for lists and tuples:
if isinstance(node1, (tuple, list)):
if len(node1) != len(node2):
raise AstNotEqual(f"node1: {node1}\n" f"node2: {node2}")
for i, item1 in enumerate(node1):
item2 = node2[i]
if item1.__class__.__name__ != item2.__class__.__name__:
raise AstNotEqual(
f"list item1: {i} {item1}\n" f"list item2: {i} {item2}"
)
_compare_asts(item1, item2)
#@+node:ekr.20191121081439.1: *3* function: compare_lists
def compare_lists(list1, list2):
"""
Compare two lists of strings, showing the first mismatch.
Return the index of the first mismatched lines, or None if identical.
"""
import itertools
it = itertools.zip_longest(list1, list2, fillvalue='Missing!')
for i, (s1, s2) in enumerate(it):
if s1 != s2:
return i
return None
#@+node:ekr.20191226071135.1: *3* function: get_time
def get_time():
return time.process_time()
#@+node:ekr.20210902074155.1: ** Test classes...
#@+node:ekr.20191227154302.1: *3* class BaseTest (TestCase)
class BaseTest(unittest.TestCase):
"""
The base class of all tests of leoAst.py.
This class contains only helpers.
"""
# Statistics.
counts: Dict[str, int] = {}
times: Dict[str, float] = {}
# Debugging traces & behavior.
# create_links: 'full-traceback'
# make_data: 'contents', 'tokens', 'tree',
# 'post-tokens', 'post-tree',
# 'unit-test'
debug_list: List[str] = []
link_error: Exception = None
#@+others
#@+node:ekr.20200110103036.1: *4* BaseTest.adjust_expected
def adjust_expected(self, s):
"""Adjust leading indentation in the expected string s."""
return textwrap.dedent(s.lstrip('\\\n')).rstrip() + '\n'
#@+node:ekr.20200110092217.1: *4* BaseTest.check_roundtrip
def check_roundtrip(self, contents):
"""Check that the tokenizer round-trips the given contents."""
contents, tokens, tree = self.make_data(contents)
results = tokens_to_string(tokens)
self.assertEqual(contents, results)
#@+node:ekr.20191227054856.1: *4* BaseTest.make_data
def make_data(self, contents, description=None):
"""Return (contents, tokens, tree) for the given contents."""
contents = contents.lstrip('\\\n')
if not contents:
return '', None, None
self.link_error = None
t1 = get_time()
self.update_counts('characters', len(contents))
# Ensure all tests end in exactly one newline.
contents = textwrap.dedent(contents).rstrip() + '\n'
# Create the TOG instance.
self.tog = TokenOrderGenerator()
self.tog.filename = description or g.callers(2).split(',')[0]
# Pass 0: create the tokens and parse tree
tokens = self.make_tokens(contents)
if not tokens:
self.fail('make_tokens failed')
tree = self.make_tree(contents)
if not tree:
self.fail('make_tree failed')
if 'contents' in self.debug_list:
dump_contents(contents)
if 'ast' in self.debug_list:
if py_version >= (3, 9):
# pylint: disable=unexpected-keyword-arg
g.printObj(ast.dump(tree, indent=2), tag='ast.dump')
else:
g.printObj(ast.dump(tree), tag='ast.dump')
if 'tree' in self.debug_list: # Excellent traces for tracking down mysteries.
dump_ast(tree)
if 'tokens' in self.debug_list:
dump_tokens(tokens)
self.balance_tokens(tokens)
# Pass 1: create the links.
self.create_links(tokens, tree)
if 'post-tree' in self.debug_list:
dump_tree(tokens, tree)
if 'post-tokens' in self.debug_list:
dump_tokens(tokens)
t2 = get_time()
self.update_times('90: TOTAL', t2 - t1)
if self.link_error:
self.fail(self.link_error)
return contents, tokens, tree
#@+node:ekr.20191227103533.1: *4* BaseTest.make_file_data
def make_file_data(self, filename):
"""Return (contents, tokens, tree) from the given file."""
directory = os.path.dirname(__file__)
filename = g.os_path_finalize_join(directory, '..', '..', 'core', filename)
assert os.path.exists(filename), repr(filename)
contents = read_file(filename)
contents, tokens, tree = self.make_data(contents, filename)
return contents, tokens, tree
#@+node:ekr.20191228101601.1: *4* BaseTest: passes...
#@+node:ekr.20191228095945.11: *5* 0.1: BaseTest.make_tokens
def make_tokens(self, contents):
"""
BaseTest.make_tokens.
Make tokens from contents.
"""
t1 = get_time()
# Tokenize.
tokens = make_tokens(contents)
t2 = get_time()
self.update_counts('tokens', len(tokens))
self.update_times('01: make-tokens', t2 - t1)
return tokens
#@+node:ekr.20191228102101.1: *5* 0.2: BaseTest.make_tree
def make_tree(self, contents):
"""
BaseTest.make_tree.
Return the parse tree for the given contents string.
"""
t1 = get_time()
tree = parse_ast(contents)
t2 = get_time()
self.update_times('02: parse_ast', t2 - t1)
return tree
#@+node:ekr.20191228185201.1: *5* 0.3: BaseTest.balance_tokens
def balance_tokens(self, tokens):
"""
BastTest.balance_tokens.
Insert links between corresponding paren tokens.
"""
t1 = get_time()
count = self.tog.balance_tokens(tokens)
t2 = get_time()
self.update_counts('paren-tokens', count)
self.update_times('03: balance-tokens', t2 - t1)
return count
#@+node:ekr.20191228101437.1: *5* 1.1: BaseTest.create_links
def create_links(self, tokens, tree, filename='unit test'):
"""
BaseTest.create_links.
Insert two-way links between the tokens and ast tree.
"""
tog = self.tog
try:
t1 = get_time()
# Yes, list *is* required here.
list(tog.create_links(tokens, tree))
t2 = get_time()
self.update_counts('nodes', tog.n_nodes)
self.update_times('11: create-links', t2 - t1)
except Exception as e:
print('\n')
g.trace(g.callers(), '\n')
if 'full-traceback' in self.debug_list:
g.es_exception()
# Weird: calling self.fail creates ugly failures.
self.link_error = e
#@+node:ekr.20191228095945.10: *5* 2.1: BaseTest.fstringify
def fstringify(self, contents, tokens, tree, filename=None, silent=False):
"""
BaseTest.fstringify.
"""
t1 = get_time()
if not filename:
filename = g.callers(1)
fs = Fstringify()
if silent:
fs.silent = True
result_s = fs.fstringify(contents, filename, tokens, tree)
t2 = get_time()
self.update_times('21: fstringify', t2 - t1)
return result_s
#@+node:ekr.20200107175223.1: *5* 2.2: BaseTest.beautify
def beautify(self, contents, tokens, tree, filename=None, max_join_line_length=None, max_split_line_length=None):
"""
BaseTest.beautify.
"""
t1 = get_time()
if not contents:
return ''
if not filename:
filename = g.callers(2).split(',')[0]
orange = Orange()
result_s = orange.beautify(contents, filename, tokens, tree,
max_join_line_length=max_join_line_length,
max_split_line_length=max_split_line_length)
t2 = get_time()
self.update_times('22: beautify', t2 - t1)
self.code_list = orange.code_list
return result_s
#@+node:ekr.20191228095945.1: *4* BaseTest: stats...
# Actions should fail by throwing an exception.
#@+node:ekr.20191228095945.12: *5* BaseTest.dump_stats & helpers
def dump_stats(self):
"""Show all calculated statistics."""
if self.counts or self.times:
print('')
self.dump_counts()
self.dump_times()
print('')
#@+node:ekr.20191228154757.1: *6* BaseTest.dump_counts
def dump_counts(self):
"""Show all calculated counts."""
for key, n in self.counts.items():
print(f"{key:>16}: {n:>6}")
#@+node:ekr.20191228154801.1: *6* BaseTest.dump_times
def dump_times(self):
"""
Show all calculated times.
Keys should start with a priority (sort order) of the form `[0-9][0-9]:`
"""
for key in sorted(self.times):
t = self.times.get(key)
key2 = key[3:]
print(f"{key2:>16}: {t:6.3f} sec.")
#@+node:ekr.20191228181624.1: *5* BaseTest.update_counts & update_times
def update_counts(self, key, n):
"""Update the count statistic given by key, n."""
old_n = self.counts.get(key, 0)
self.counts[key] = old_n + n
def update_times(self, key, t):
"""Update the timing statistic given by key, t."""
old_t = self.times.get(key, 0.0)
self.times[key] = old_t + t
#@-others
#@+node:ekr.20200122161530.1: *3* class Optional_TestFiles (BaseTest)
class Optional_TestFiles(BaseTest):
"""
Tests for the TokenOrderGenerator class that act on files.
These are optional tests. They take a long time and are not needed
for 100% coverage.
All of these tests failed at one time.
"""
#@+others
#@+node:ekr.20200726145235.2: *4* TestFiles.test_leoApp
def test_leoApp(self):
self.make_file_data('leoApp.py')
#@+node:ekr.20200726145235.1: *4* TestFiles.test_leoAst
def test_leoAst(self):
self.make_file_data('leoAst.py')
#@+node:ekr.20200726145333.1: *4* TestFiles.test_leoDebugger
def test_leoDebugger(self):
self.make_file_data('leoDebugger.py')
#@+node:ekr.20200726145333.2: *4* TestFiles.test_leoFind
def test_leoFind(self):
self.make_file_data('leoFind.py')
#@+node:ekr.20200726145333.3: *4* TestFiles.test_leoGlobals
def test_leoGlobals(self):
self.make_file_data('leoGlobals.py')
#@+node:ekr.20200726145333.4: *4* TestFiles.test_leoTips
def test_leoTips(self):
self.make_file_data('leoTips.py')
#@+node:ekr.20200726145735.1: *4* TestFiles.test_runLeo
def test_runLeo(self):
self.make_file_data('runLeo.py')
#@+node:ekr.20200115162419.1: *4* TestFiles.compare_tog_vs_asttokens
def compare_tog_vs_asttokens(self):
"""Compare asttokens token lists with TOG token lists."""
if not asttokens:
self.skipTest('requires asttokens')
# Define TestToken class and helper functions.
stack: List[ast.AST] = []
#@+others
#@+node:ekr.20200124024159.2: *5* class TestToken (internal)
class TestToken:
"""A patchable representation of the 5-tuples created by tokenize and used by asttokens."""
def __init__(self, kind, value):
self.kind = kind
self.value = value
self.node_list: List[ast.AST] = []
def __str__(self):
tokens_s = ', '.join([z.__class__.__name__ for z in self.node_list])
return f"{self.kind:14} {self.value:20} | |
"""
Tools for creating and manipulating 1,2, and 3D meshes.
.. inheritance-diagram:: proteus.MeshTools
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import input
from builtins import zip
from builtins import next
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
from .EGeometry import *
import numpy as np
import array
import h5py
import os
from xml.etree import ElementTree as ET
from .Archiver import *
from .LinearAlgebraTools import ParVec_petsc4py
from .Profiling import logEvent,memory
from . import Domain
from . import Comm
from subprocess import check_call, check_output
class Node(object):
"""A numbered point in 3D Euclidean space
:ivar N: node number
:ivar p: Euclidean coordinates
Comparison operators and a hash value are defined using the 3-tuple of
coordinates. This allows using Node objects and tuples of node objects as
dictionary keys, but in that use case one should be careful not to modify
the node coordinates.
>>> n0 = Node(nodeNumber=0,x=0.0,y=0.0,z=0.0)
>>> n1 = Node(nodeNumber=1,x=1.0,y=1.0,z=1.0)
>>> n1 >= n0
True
"""
xUnitVector = EVec(1.0,0.0,0.0)
yUnitVector = EVec(0.0,1.0,0.0)
zUnitVector = EVec(0.0,0.0,1.0)
def __init__(self,nodeNumber=0,x=0.0,y=0.0,z=0.0):
self.N=nodeNumber
self.p=EVec(x,y,z)
self.basis = [Node.xUnitVector,
Node.yUnitVector,
Node.zUnitVector]
self.elementBoundaries=[]
self.barycenter = self.p
self.length = 1.0
self.diameter=self.length
self.innerDiameter=self.length
self.hasGeometricInfo = True
self.unitNormal = Node.xUnitVector
self.nodes=(self,)
def computeGeometricInfo(self):
pass
def __str__(self):
return str(self.N)+":"+str(self.p)
def __hash__(self):
return hash((self.p[X],self.p[Y],self.p[Z]))
def __lt__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) < \
(other.p[X],other.p[Y],other.p[Z])
def __le__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) <= \
(other.p[X],other.p[Y],other.p[Z])
def __eq__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) == \
(other.p[X],other.p[Y],other.p[Z])
def __ne__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) != \
(other.p[X],other.p[Y],other.p[Z])
def __gt__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) > \
(other.p[X],other.p[Y],other.p[Z])
def __ge__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) >= \
(other.p[X],other.p[Y],other.p[Z])
class Element(object):
"""An numbered polytope in R^n
:ivar N: element number
:ivar nodes: sorted tuple of nodes defining the polytope
The nodes data member can be used as a dictionary key for the polytope as
long as the nodes aren't later modified.
"""
def __init__(self,elementNumber=0,nodes=[]):
self.N = elementNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
self.elementBoundaries=[]
class Edge(Element):
xUnitVector = EVec(1.0,1.0,0.0)
yUnitVector = EVec(0.0,1.0,0.0)
zUnitVector = EVec(0.0,0.0,1.0)
"""
1D Element--a line connecting two Nodes
The nodes are stored as a lexicographically sorted node list.
"""
def __init__(self,edgeNumber=0,nodes=[]):
#Element.__init__(self,edgeNumber,nodes)
#inline Element.__init__
self.N = edgeNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes
#self.nodes=nodes[:]
#self.nodes.sort()
self.elementBoundaries = [self.nodes[1],self.nodes[0]]
self.hasGeometricInfo = False
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
self.basis = [self.nodes[1].p - self.nodes[0].p,
Edge.yUnitVector,
Edge.zUnitVector]
self.barycenter = old_div((self.nodes[0].p + self.nodes[1].p),2.0)
self.length = enorm(self.basis[0])
self.normal = EVec(-self.basis[0][Y], self.basis[0][X],0.0)
norm = enorm(self.normal)
if norm:
self.unitNormal = old_div(self.normal,norm)
else:
#in 3D edge normals don't make sense in general so above
#may divide by zero if edge has zero projection onto x-y plane
self.normal = EVec(0.0, -self.basis[0][Z], self.basis[0][Y])
self.unitNormal = old_div(self.normal,enorm(self.normal))
self.diameter=self.length
self.innerDiameter = self.length
self.hasGeometricInfo = True
self.nodeUnitNormalList=[]
self.nodeUnitNormalList.append(old_div(-self.basis[0],self.length))
self.nodeUnitNormalList.append(old_div(self.basis[0],self.length))
self.elementBoundaryUnitNormalList=self.nodeUnitNormalList
self.elementBoundaryJacobianList=[Edge.xUnitVector,Edge.xUnitVector]
def getNodesFromEdges(edges):
"""Extract the subset of nodes from a list of edges."""
nodes={}
for e in edges:
for n in e.nodes:
nodes[n]=n
return list(nodes.values())
class Polygon(Element):
"""An abstract 2D element--a closed set of Edges connecting a set of Nodes.
The nodes and edges are stored as lexicographically sorted lists."""
def __init__(self,polygonNumber=0,nodes=[]):
Element.__init__(self,polygonNumber,nodes)
#the edges have to be set up by the specific polygon
self.edges=[]
def getEdgesFromPolygons(polygons):
"""Extract the subset of edges from a list of polygons"""
edges={}
for p in polygons:
for e in p.edges:
edges[e.nodes] = e
return list(edges.values())
class Triangle(Polygon):
"""A 2D triangular element"""
edgeMap = {(1,2):0,(0,2):1,(0,1):2}
zUnitVector = EVec(0.0,0.0,1.0)
def __init__(self,triangleNumber=0,nodes=[],edgeDict=None):
#Polygon.__init__(self,triangleNumber,nodes)
#inline
self.edges=[]
#Element.__init__
#inline
self.N = triangleNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes[:]
#self.nodes.sort()
#
edgeNodeList = [(self.nodes[1],self.nodes[2]),
(self.nodes[0],self.nodes[2]),
(self.nodes[0],self.nodes[1])]
if edgeDict is None:
self.edges = [Edge(eN,list(edgeNodes)) for \
eN,edgeNodes in enumerate(edgeNodeList)]
else:
self.edges = [edgeDict[edgeNodes] for edgeNodes in edgeNodeList]
self.hasGeometricInfo=False
self.elementBoundaries=self.edges
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
self.barycenter = old_div((self.nodes[0].p +
self.nodes[1].p +
self.nodes[2].p),3.0)
self.basis = [ n.p - self.nodes[0].p for n in self.nodes[1:]]
self.basis.append(Triangle.zUnitVector)
self.linearMap = ETen(self.basis[0],self.basis[1],self.basis[2])
self.normal = ecross(self.basis[0],self.basis[1])
normNormal = enorm(self.normal)
self.unitNormal = old_div(self.normal,normNormal)
self.area = 0.5*normNormal
for e in self.edges: e.computeGeometricInfo()
self.diameter = max([e.length for e in self.edges])
self.innerDiameter = 4.0*self.area/sum(
[e.length for e in self.edges])
self.edgeUnitNormalList=[]
for nNt,eN in Triangle.edgeMap.items():
unitNormal = self.edges[eN].unitNormal
if edot(unitNormal,self.nodes[nNt[0]].p - self.nodes[eN].p) < 0:
unitNormal*=-1.0
self.edgeUnitNormalList.append(unitNormal)
self.elementBoundaryUnitNormalList = self.edgeUnitNormalList
self.hasGeometricInfo=True
class Quadrilateral(Polygon):
"""A 2D quadrilateral element"""
def __init__(self,quadrilateralNumber=0,edges=[],simple=True):
Polygon.__init__(self,quadrilateralNumber)
self.edges = edges
nodeList = getNodesFromEdges(self.edges)
nodeList = self.sortNodes(nodeList)
self.nodes = tuple(nodeList)
self.hasGeometricInfo = False
self.elementBoundaries = self.edges
# This boolean flags whether the quadrilateral is simple
# (eg. a rectangle). Certain features are more difficult
# to implement if this is not the case.
self.simple = True
def sortNodes(self,nodeList):
newList = [None] * 4
coordinate_list = [1,1,1]
# initialize coordinate mins and maxs
xMin = nodeList[0].p[X]
xMax = nodeList[0].p[X]
yMin = nodeList[0].p[Y]
yMax = nodeList[0].p[Y]
zMin = nodeList[0].p[Z]
zMax = nodeList[0].p[Z]
for node in nodeList:
if xMin > node.p[X]:
xMin = node.p[X]
if xMax < node.p[X]:
xMax = node.p[X]
if yMin > node.p[Y]:
yMin = node.p[Y]
if yMax < node.p[Y]:
yMax = node.p[Y]
if zMin > node.p[Z]:
zMin = node.p[Z]
if zMax < node.p[Z]:
zMax = node.p[Z]
# indentify degenerate coordinate space.
# NOTE - this is not entirely accurate, but assumes
# 2D quadrilateral objects are orthogonal to one of
# the cononical coordinate axes
if xMin==xMax:
coordinate_list[0] = 0
if yMin==yMax:
coordinate_list[1] = 0
if zMin==zMax:
coordinate_list[2] = 0
if sum(coordinate_list) !=2:
assert 0, 'Invalid 2D quadrilateral object'
for i, t in enumerate(coordinate_list):
if t == 0:
case = i
# x is degenerate variable
if case == 0:
var1 = 1 # y marked as first node
var2 = 2 # z marked as second
var1_min = yMin
var1_max = yMax
var2_min = zMin
var2_max = zMax
# y is degenerate variable
elif case == 1:
var1 = 0 # x marked as first node
var2 = 2 # z marked as second
var1_min = xMin
var1_max = xMax
var2_min = zMin
var2_max = zMax
# z is degenerate variable
elif case == 2:
var1 = 0 # x marked as first node
var2 = 1 # y marked as second
var1_min = xMin
var1_max = xMax
var2_min = yMin
var2_max = yMax
else:
assert 0, 'Invalide Quadrilateral Mesh Case'
for node in nodeList:
if node.p[var1]==var1_min and node.p[var2]==var2_min:
newList[0] = node
elif node.p[var1]==var1_min and node.p[var2]==var2_max:
newList[1] = node
elif node.p[var1]==var1_max and node.p[var2]==var2_max:
newList[2] = node
elif node.p[var1]==var1_max and node.p[var2]==var2_min:
newList[3] = node
for i,item in enumerate(newList):
if not newList[i]:
assert 0,'Quadrialteral Mesh Generation Error '+str(newList)+" i = "+str(i)
return newList
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
for e in self.edges: e.computeGeometricInfo()
#the nodes must lie in a plane
#use triangles to compute area
#grab one triangle
t0 = Triangle(0,list(self.nodes[0:3]))
t0.computeGeometricInfo()
#find the nodes that lie on the new edge,diagonal0
for et in t0.edges:
edgeIsNew=True
for e in self.edges:
if e.nodes == et.nodes:
edgeIsNew=False
if edgeIsNew:
break
diagonal0=et
t1 = Triangle(0,[self.nodes[3],
diagonal0.nodes[0],
diagonal0.nodes[1]])
t1.computeGeometricInfo()
#get normal from one of the triangles
self.unitNormal = t0.unitNormal
self.area = t0.area + t1.area
#find the long diagonal
diagonalNode=0
for n in self.nodes[0:3]:
if n != diagonal0.nodes[0] and n != diagonal0.nodes[1]:
diagonalNode=n
break;
diagonal1 = Edge(0,[n,self.nodes[3]])
diagonal1.computeGeometricInfo()
self.diameter = max(diagonal1.length,diagonal0.length)
self.innerDiameter = 4.0*self.area/sum(
[e.length for e in self.edges])
# Calculate the coordinate of a simple quad
if self.simple==True:
self.xmin = self.nodes[0].p[X]
self.ymin = self.nodes[0].p[Y]
self.xmax = self.nodes[0].p[X]
self.ymax = self.nodes[0].p[Y]
for node in self.nodes:
if node.p[X] < self.xmin:
self.xmin = node.p[X]
elif node.p[X] > self.xmax:
self.xmax = node.p[X]
else:
pass
if node.p[Y] < self.ymin:
self.ymin = node.p[Y]
elif node.p[Y] > self.ymax:
self.ymax = node.p[Y]
else:
pass
self.xmid = old_div((self.xmin+self.xmax),2.)
self.ymid = old_div((self.ymin+self.ymax),2.)
self.zmid = 0.
class Polyhedron(Element):
"""
An abstract 3D Element--a closed set of Polygons connecting a set
of Edges.
The nodes and edges are stored as lexicographically sorted lists.
"""
def __init__(self,polyhedronNumber=0,nodes=[]):
Element.__init__(self,polyhedronNumber,nodes)
self.edges=[]
self.polygons=[]
def __cmp__(self,other):
return compareNodes(self.nodes,other.nodes)
class Tetrahedron(Polyhedron):
"""A 3D tetrahedral element"""
triangleMap = {(1,2,3):0,(0,2,3):1,(0,1,3):2,(0,1,2):3}
edgeMap = {(0,1): 0,
(0,2): 1,
(0,3): 2,
(1,2): 3,
(1,3): 4,
(2,3): 5}
def __init__(self,tetrahedronNumber,nodes,edgeDict=None,triangleDict=None):
#Polyhedron.__init__(self,tetrahedronNumber,nodes)
#inline
#Element.__init__
#inline
self.N = tetrahedronNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes[:]
#self.nodes.sort()
#
triangleNodeList = [(self.nodes[1],
self.nodes[2],
self.nodes[3]),
(self.nodes[0],
self.nodes[2],
self.nodes[3]),
(self.nodes[0],
self.nodes[1],
self.nodes[3]),
(self.nodes[0],
self.nodes[1],
self.nodes[2])]
| |
<gh_stars>0
"""
Gradients for truncated and untruncated latent variable models.
"""
import torch as ch
from torch import Tensor
from torch import sigmoid as sig
from torch.distributions import Gumbel, MultivariateNormal, Bernoulli
import math
from .utils.helpers import logistic, censored_sample_nll
class CensoredMultivariateNormalNLL(ch.autograd.Function):
"""
Computes the truncated negative population log likelihood for censored multivariate normal distribution.
Function calculates the truncated negative log likelihood in the forward method and then calculates the
gradients with respect mu and cov in the backward method. When sampling from the conditional distribution,
we sample batch_size * num_samples samples, we then filter out the samples that remain in the truncation set,
and retainup to batch_size of the filtered samples. If there are fewer than batch_size number of samples remain,
we provide a vector of zeros and calculate the untruncated log likelihood.
"""
@staticmethod
def forward(ctx, v, T, S, S_grad, phi, num_samples=10, eps=1e-5):
"""
Args:
v (torch.Tensor): reparameterize mean estimate (cov^(-1) * mu)
T (torch.Tensor): square reparameterized (cov^(-1)) covariance matrix with dim d
S (torch.Tensor): batch_size * dims, sample batch
S_grad (torch.Tenosr): batch_size * (dims + dim * dims) gradient for batch
phi (delphi.oracle): oracle for censored distribution
num_samples (int): number of samples to sample for each sample in batch
"""
# reparameterize distribution
sigma = T.inverse()
mu = (sigma@v).flatten()
# reparameterize distribution
M = MultivariateNormal(mu, sigma)
# sample num_samples * batch size samples from distribution
s = M.sample([num_samples * S.size(0)])
filtered = phi(s).nonzero(as_tuple=True)
"""
TODO: see if there is a better way to do this
"""
# z is a tensor of size batch size zeros, then fill with up to batch size num samples
z = ch.zeros(S.size())
elts = s[filtered][:S.size(0)]
z[:elts.size(0)] = elts
# standard negative log likelihood
nll = .5 * ch.bmm((S@T).view(S.size(0), 1, S.size(1)), S.view(S.size(0), S.size(1), 1)).squeeze(-1) - S@v[None,...].T
# normalizing constant for nll
norm_const = -.5 * ch.bmm((z@T).view(z.size(0), 1, z.size(1)), z.view(z.size(0), z.size(1), 1)).squeeze(-1) + z@v[None,...].T
ctx.save_for_backward(S_grad, z)
return (nll + norm_const).mean(0)
@staticmethod
def backward(ctx, grad_output):
S_grad, z = ctx.saved_tensors
# calculate gradient
grad = -S_grad + censored_sample_nll(z)
return grad[:,z.size(1) ** 2:] / z.size(0), (grad[:,:z.size(1) ** 2] / z.size(0)).view(-1, z.size(1), z.size(1)), None, None, None, None, None
class TruncatedMultivariateNormalNLL(ch.autograd.Function):
"""
Computes the negative population log likelihood for truncated multivariate normal distribution with unknown truncation.
Calculates the population log-likelihood for the current batch in the forward step, and
then calculates its gradient in the backwards step.
"""
@staticmethod
def forward(ctx, u, B, x, pdf, loc_grad, cov_grad, phi, exp_h):
"""
Args:
u (torch.Tensor): size (dims,) - current reparameterized mean estimate
B (torch.Tensor): size (dims, dims) - current reparameterized covariance matrix estimate
x (torch.Tensor): size (batch_size, dims) - batch of dataset samples
pdf (torch.Tensor): size (batch_size, 1) - batch of pdf for dataset samples
loc_grad (torch.Tensor): (batch_size, dims) - precomputed gradient for mean for batch
cov_grad (torch.Tensor): (batch_size, dims * dims) - precomputed gradient for covariance matrix for batch
phi (oracle.UnknownGaussian): oracle object for learning truncation set
exp_h (Exp_h): helper class object for calculating exponential in the gradient
"""
exp = exp_h(u, B, x)
psi = phi.psi_k(x)
loss = exp * pdf * psi
ctx.save_for_backward(loss, loc_grad, cov_grad)
return loss / x.size(0)
@staticmethod
def backward(ctx, grad_output):
loss, loc_grad, cov_grad = ctx.saved_tensors
return (loc_grad * loss) / loc_grad.size(0), ((cov_grad.flatten(1) * loss).unflatten(1, ch.Size([loc_grad.size(1), loc_grad.size(1)]))) / loc_grad.size(0), None, None, None, None, None, None
class TruncatedMSE(ch.autograd.Function):
"""
Computes the gradient of the negative population log likelihood for censored regression
with known noise variance.
"""
@staticmethod
def forward(ctx, pred, targ, phi, noise_var, num_samples=10, eps=1e-5):
"""
Args:
pred (torch.Tensor): size (batch_size, 1) matrix for regression model predictions
targ (torch.Tensor): size (batch_size, 1) matrix for regression target predictions
phi (oracle.oracle): dependent variable membership oracle
noise_var (float): noise distribution variance parameter
num_samples (int): number of sampels to generate per sample in batch in rejection sampling procedure
eps (float): denominator error constant to avoid divide by zero errors
"""
# make num_samples copies of pred, N x B x 1
stacked = pred[None, ...].repeat(num_samples, 1, 1)
# add random noise to each copy
noised = stacked + math.sqrt(noise_var) * ch.randn(stacked.size())
# filter out copies where pred is in bounds
filtered = phi(noised)
# average across truncated indices
z = (filtered * noised).sum(dim=0) / (filtered.sum(dim=0) + eps)
out = ((-.5 * noised.pow(2) + noised * pred) * filtered).sum(dim=0) / (filtered.sum(dim=0) + eps)
ctx.save_for_backward(pred, targ, z)
return (-.5 * targ.pow(2) + targ * pred - out).mean(0)
@staticmethod
def backward(ctx, grad_output):
pred, targ, z = ctx.saved_tensors
return (z - targ) / pred.size(0), targ / pred.size(0), None, None, None, None
class TruncatedUnknownVarianceMSE(ch.autograd.Function):
"""
Computes the gradient of negative population log likelihood for truncated linear regression
with unknown noise variance.
"""
@staticmethod
def forward(ctx, pred, targ, lambda_, phi, num_samples=10, eps=1e-5):
"""
Args:
pred (torch.Tensor): size (batch_size, 1) matrix for regression model predictions
targ (torch.Tensor): size (batch_size, 1) matrix for regression target predictions
lambda_ (float): current reparameterized variance estimate for noise distribution
phi (oracle.oracle): dependent variable membership oracle
num_samples (int): number of sampels to generate per sample in batch in rejection sampling procedure
eps (float): denominator error constant to avoid divide by zero errors
"""
# calculate std deviation of noise distribution estimate
sigma = ch.sqrt(lambda_.inverse())
stacked = pred[..., None].repeat(1, num_samples, 1)
# add noise to regression predictions
noised = stacked + sigma * ch.randn(stacked.size())
# filter out copies that fall outside of truncation set
filtered = phi(noised)
out = noised * filtered
z = out.sum(dim=1) / (filtered.sum(dim=1) + eps)
z_2 = Tensor([])
for i in range(filtered.size(0)):
z_2 = ch.cat([z_2, noised[i][filtered[i].squeeze(-1).sort(descending=True).indices[0]].pow(2)[None,...]])
z_2_ = out.pow(2).sum(dim=1) / (filtered.sum(dim=1) + eps)
nll = -0.5 * lambda_ * targ.pow(2) + lambda_ * targ * pred
const = -0.5 * lambda_ * z_2 + z * pred * lambda_
ctx.save_for_backward(pred, targ, lambda_, z, z_2, z_2_)
return nll - const
@staticmethod
def backward(ctx, grad_output):
pred, targ, lambda_, z, z_2, z_2_ = ctx.saved_tensors
"""
multiply the v gradient by lambda, because autograd computes
v_grad*x*variance, thus need v_grad*(1/variance) to cancel variance
factor
"""
lambda_grad = .5 * (targ.pow(2) - z_2)
return lambda_ * (z - targ) / pred.size(0), targ / pred.size(0), lambda_grad / pred.size(0), None, None, None
class TruncatedBCE(ch.autograd.Function):
"""
Truncated binary cross entropy gradient for truncated binary classification tasks.
"""
@staticmethod
def forward(ctx, pred, targ, phi, num_samples=10, eps=1e-5):
"""
Args:
pred (torch.Tensor): size (batch_size, 1) matrix for regression model predictions
targ (torch.Tensor): size (batch_size, 1) matrix for regression target predictions
phi (oracle.oracle): dependent variable membership oracle
num_samples (int): number of sampels to generate per sample in batch in rejection sampling procedure
eps (float): denominator error constant to avoid divide by zero errors
"""
ctx.save_for_backward()
stacked = pred[None, ...].repeat(num_samples, 1, 1)
rand_noise = logistic.sample(stacked.size())
# add noise
noised = stacked + rand_noise
noised_labs = noised >= 0
# filter
filtered = phi(noised)
mask = (noised_labs).eq(targ)
nll = (filtered * mask * logistic.log_prob(rand_noise)).sum(0) / ((filtered * mask).sum(0) + eps)
const = (filtered * logistic.log_prob(rand_noise)).sum(0) / (filtered.sum(0) + eps)
ctx.save_for_backward(mask, filtered, rand_noise)
ctx.eps = eps
return -(nll - const) / pred.size(0)
@staticmethod
def backward(ctx, grad_output):
mask, filtered, rand_noise = ctx.saved_tensors
avg = 2*(sig(rand_noise) * mask * filtered).sum(0) / ((mask * filtered).sum(0) + ctx.eps)
norm_const = (2 * sig(rand_noise) * filtered).sum(0) / (filtered.sum(0) + ctx.eps)
return -(avg - norm_const) / rand_noise.size(1), None, None, None, None
class TruncatedProbitMLE(ch.autograd.Function):
@staticmethod
def forward(ctx, pred, targ, phi, num_samples=10, eps=1e-5):
"""
Args:
pred (torch.Tensor): size (batch_size, 1) matrix for regression model predictions
targ (torch.Tensor): size (batch_size, 1) matrix for regression target predictions
phi (oracle.oracle): dependent variable membership oracle
num_samples (int): number of sampels to generate per sample in batch in rejection sampling procedure
eps (float): denominator error constant to avoid divide by zero errors
"""
M = MultivariateNormal(ch.zeros(1,), ch.eye(1, 1))
stacked = pred[None,...].repeat(num_samples, 1, 1)
rand_noise = ch.randn(stacked.size())
noised = stacked + rand_noise
noised_labs = noised >= 0
mask = noised_labs.eq(targ)
filtered = | |
if module_name in v2_modules:
# already v2
return
v2_modules.add(module_name)
if module_name in set_up:
set_up.remove(module_name)
if module_name in done:
# some submodules already loaded as v1 => reload
imports.extend(done[module_name].values())
del done[module_name]
if module_name in ast_by_top_level_mod:
del ast_by_top_level_mod[module_name]
def load_module_v2_requirements(module_like: ModuleLike) -> None:
"""
Loads all v2 modules explicitly required by the supplied module like instance, installing them if install=True. If
any of these requirements have already been loaded as v1, queues them for reload.
"""
for requirement in module_like.get_module_v2_requirements():
# load module
self.get_module(
requirement.key,
allow_v1=False,
install_v2=install,
bypass_module_cache=bypass_module_cache,
)
# queue AST reload
require_v2(requirement.key)
def setup_module(module: Module) -> None:
"""
Sets up a top level module, making sure all its v2 requirements are loaded correctly. V2 modules do not support
import-based installation because of security reasons (it would mean we implicitly trust any `inmanta-module-x`
package for the module we're trying to load). As a result we need to make sure all required v2 modules are present
in a set up stage.
"""
if module.name in set_up:
# already set up
return
load_module_v2_requirements(module)
set_up.add(module.name)
def load_sub_module(module: Module, submod: str) -> None:
"""
Loads a submodule's AST and processes its imports. Enforces dependency generation directionality (v1 can depend on
v2 but not the other way around). If any modules have already been loaded with an incompatible generation, queues
them for reload.
Does not install any v2 modules.
"""
parts: List[str] = submod.split("::")
for i in range(1, len(parts) + 1):
subs = "::".join(parts[0:i])
if subs in done[module_name]:
continue
(nstmt, nb) = module.get_ast(subs)
done[module_name][subs] = imp
ast_by_top_level_mod[module_name].append((subs, nstmt, nb))
# get imports and add to list
subs_imports: List[DefineImport] = module.get_imports(subs)
imports.extend(subs_imports)
if isinstance(module, ModuleV2):
# A V2 module can only depend on V2 modules. Ensure that all dependencies
# of this module will be loaded as a V2 module.
for dep_module_name in (subs_imp.name.split("::")[0] for subs_imp in subs_imports):
require_v2(dep_module_name)
# load this project's v2 requirements
load_module_v2_requirements(self)
# Loop over imports. For each import:
# 1. Load the top level module. For v1, install if install=True, for v2 import-based installation is disabled for
# security reasons. v2 modules installation is done in step 2.
# 2. Set up top level module if it has not been set up yet, loading v2 requirements and installing them if install=True.
# 3. Load AST for imported submodule and its parent modules, queueing any transient imports.
while len(imports) > 0:
imp: DefineImport = imports.pop()
ns: str = imp.name
module_name: str = ns.split("::")[0]
if ns in done[module_name]:
continue
try:
# get module
module: Module = self.get_module(
module_name,
allow_v1=module_name not in v2_modules,
install_v1=install,
install_v2=False,
bypass_module_cache=bypass_module_cache,
)
setup_module(module)
load_sub_module(module, ns)
except (InvalidModuleException, ModuleNotFoundException) as e:
raise ModuleLoadingException(ns, imp, e)
return list(chain.from_iterable(ast_by_top_level_mod.values()))
def load_module(
self,
module_name: str,
*,
allow_v1: bool = False,
install_v1: bool = False,
install_v2: bool = False,
) -> "Module":
"""
Get a module instance for a given module name. The install parameters allow to install the module if it has not been
installed yet. If both install parameters are False, the module is expected to be preinstalled.
:param module_name: The name of the module.
:param allow_v1: Allow this module to be loaded as v1.
:param install_v1: Allow installing this module as v1 if it has not yet been installed. This option is ignored if
allow_v1=False.
:param install_v2: Allow installing this module as v2 if it has not yet been installed, implicitly trusting any Python
package with the corresponding name.
"""
if not self.is_using_virtual_env():
self.use_virtual_env()
reqs: Mapping[str, List[InmantaModuleRequirement]] = self.collect_requirements()
module_reqs: List[InmantaModuleRequirement] = (
list(reqs[module_name]) if module_name in reqs else [InmantaModuleRequirement.parse(module_name)]
)
module: Optional[Union[ModuleV1, ModuleV2]]
try:
module = self.module_source.get_module(self, module_reqs, install=install_v2)
if module is not None and self.module_source_v1.path_for(module_name) is not None:
LOGGER.warning("Module %s is installed as a V1 module and a V2 module: V1 will be ignored.", module_name)
if module is None and allow_v1:
module = self.module_source_v1.get_module(self, module_reqs, install=install_v1)
except InvalidModuleException:
raise
except Exception as e:
raise InvalidModuleException(f"Could not load module {module_name}") from e
if module is None:
raise ModuleNotFoundException(
f"Could not find module {module_name}. Please make sure to add any module v2 requirements with"
" `inmanta module add --v2` and to install all the project's dependencies with `inmanta project install`."
)
self.modules[module_name] = module
return module
def load_plugins(self) -> None:
"""
Load all plug-ins
"""
if not self.loaded:
LOGGER.warning("loading plugins on project that has not been loaded completely")
# ensure the loader is properly configured
loader.PluginModuleFinder.configure_module_finder(self.modulepath)
for module in self.modules.values():
module.load_plugins()
def verify(self) -> None:
"""
Verifies the integrity of the loaded project, with respect to both inter-module requirements and the Python environment.
"""
self.verify_modules_cache()
self.verify_module_version_compatibility()
self.verify_python_requires()
def verify_python_environment(self) -> None:
"""
Verifies the integrity of the loaded project with respect to the Python environment, over which the project has no
direct control.
"""
self.verify_modules_cache()
self.verify_python_requires()
def verify_modules_cache(self) -> None:
if not self._modules_cache_is_valid():
raise CompilerException(
"Not all modules were loaded correctly as a result of transient dependencies. A recompile should load them"
" correctly."
)
def verify_module_version_compatibility(self) -> None:
if not self._module_versions_compatible():
raise CompilerException("Not all module dependencies have been met. Run `inmanta modules update` to resolve this.")
def verify_python_requires(self) -> None:
"""
Verifies no incompatibilities exist within the Python environment with respect to installed module v2 requirements.
"""
if not env.ActiveEnv.check(in_scope=re.compile(f"{ModuleV2.PKG_NAME_PREFIX}.*")):
raise CompilerException(
"Not all installed modules are compatible: requirements conflicts were found. Please resolve any conflicts"
" before attempting another compile. Run `pip check` to check for any incompatibilities."
)
def _modules_cache_is_valid(self) -> bool:
"""
Verify the modules cache after changes have been made to the Python environment. Returns False if any modules
somehow got installed as another generation or with another version as the one that has been loaded into the AST.
When this situation occurs, the compiler state is invalid and the compile needs to either abort or attempt recovery.
The modules cache, from which the AST was loaded, is out of date, therefore at least a partial AST regeneration
would be required to recover.
Scenario's that could trigger this state:
1.
- latest v2 mod a is installed
- some v1 mod depends on v2 mod b, which depends on a<2
- during loading, after a has been loaded, mod b is installed
- Python downgrades transient dependency a to a<2
2.
- latest v2 mod a is installed
- some v1 (or even v2 when in install mode) mod depends on a<2
- after loading, during plugin requirements install, `pip install a<2` is run
- Python downgrades direct dependency a to a<2
In both cases, a<2 might be a valid version, but since it was installed transiently after the compiler has loaded module
a, steps would need to be taken to take this change into account.
"""
result: bool = True
for name, module in self.modules.items():
installed: Optional[ModuleV2] = self.module_source.get_installed_module(self, name)
if installed is None:
if module.GENERATION == ModuleGeneration.V1:
# Loaded module as V1 and no installed V2 module found: no issues with this module
continue
raise CompilerException(
f"Invalid state: compiler has loaded module {name} as v2 but it is nowhere to be found."
)
else:
if module.GENERATION == ModuleGeneration.V1:
LOGGER.warning(
"Compiler has loaded module %s as v1 but it has later been installed as v2 as a side effect.", name
)
result = False
elif installed.version != module.version:
LOGGER.warning(
"Compiler has loaded module %s==%s but %s==%s has later been installed as a side effect.",
name,
module.version,
name,
installed.version,
)
result = False
return result
def _module_versions_compatible(self) -> bool:
"""
Check if all the required modules for this module have been loaded. Assumes the modules cache is valid and up to date.
"""
LOGGER.info("verifying project")
imports = set([x.name for x in self.get_complete_ast()[0] if isinstance(x, DefineImport)])
good = True
requirements: Dict[str, List[InmantaModuleRequirement]] = self.collect_requirements()
for name, spec in requirements.items():
if name not in imports:
continue
module = self.modules[name]
version = parse_version(str(module.version))
for r in spec:
if version not | |
<reponame>stippingerm/mypylib<filename>multivariate_distns/copula.py
"""General copula."""
# Author: <NAME>
# Inspired by and based on sci-kit learn mixture models
# License: BSD 3 clause
# See: Berkes, Wood & Pillow (2009). Characterizing neural dependencies with copula models. NIPS 21, 129–136.
# Retrieved from http://papers.nips.cc/paper/3593-characterizing-neural-dependencies-with-copula-models.pdf
# See also: https://www.vosesoftware.com/riskwiki/Archimedeancopulas-theClaytonFrankandGumbel.php
# Two-variate implementation of Archimedean copulas is available in copulalib but it is far from complete.
# The continuation of copulalib is https://pypi.org/project/ambhas/ still bivariate copulas only.
# Sampling from cdf is difficult but feasible for Archimedean copulas: https://stackoverflow.com/a/50981679
# http://pluto.huji.ac.il/~galelidan/Copulas%20and%20Machine%20Learning.pdf
import numpy as np
from scipy import stats
from scipy.stats import rv_continuous, norm, gaussian_kde
from scipy.stats._multivariate import multivariate_normal_gen, multi_rv_generic
from scipy.stats._distn_infrastructure import argsreduce
from sklearn.base import TransformerMixin
from scipy.misc import derivative
from scipy.special import gamma, gammaln
from abc import abstractmethod
from scipy.optimize import brentq as solve, minimize
def _exist(*unused):
"""Check if the argument exists: in fact the check happens
when calling this function and this function has nothing
to do with it. It's merely a way to enforce the check.
Parameters
----------
unused : any_type
list all variables as argument to be checked
Notes
-----
To pass code inspection the function has to deal with its input
therefore it deletes its own reference provided to the top level
variables but this should not imply any side effects.
"""
del unused
pass
def _broadcast_shapes(a, b, align_as_numpy=True):
a, b = np.atleast_1d(a, b)
if a.ndim > 1 or b.ndim > 1:
raise ValueError('broadcasting of single shapes is supported only')
result_shape = np.maximum(len(a), len(b))
padded_a = np.ones(result_shape, dtype=int)
padded_b = np.ones(result_shape, dtype=int)
if align_as_numpy:
if len(a):
padded_a[-len(a):] = a
if len(b):
padded_b[-len(b):] = b
else:
if len(a):
padded_a[:len(a)] = a
if len(b):
padded_b[:len(b)] = b
to_change_a = (padded_a != padded_b) & (padded_b != 1)
if np.any(padded_a[to_change_a] != 1):
raise ValueError('shapes %s and %s could not be broadcast together' % (a, b))
padded_a[to_change_a] = padded_b[to_change_a]
return padded_a
# ------------------------
# Parameters of a copula
# ------------------------
#
# Training data alignment (like scipy.stats.multivariate_normal):
# n_observation x n_comp
# optionally n_obs_ax1 x ... x n_obs_ax_N x n_comp
#
# Marginals:
# n_comp x n_stat_param
# TODO: allow shape 1 x n_stat_param if iid_marginals
#
# Joint is expected in the following form:
# n_comp x n_joint_param_per_comp
# e.g. for a gaussian without compression: n_comp x (n_comp + 1)
# But we cannot rely on this therefore it is stored as a simple list.
#
# However parameters have a well-defined shape they are not suitable for
# scipy.stat.rv_continuous because it tries to broadcast parameters with
# data.
# To circumvent this there are two options:
# 1) Store parameters in a way less efficient than np.array,
# e.g., hide details using a n_comp long list
# 2) Use multi_rv_generic which does not try to validate data.
# This way one loses the automatic shortcuts that infer functions not
# provided explicitly, e.g., rv_continuous.sf = 1 - rv_continuous.isf
# To some point this is reasonable because the quadratures that are
# designed for one dimension do not work in two or higher dimensions.
#
# ------------------
# How copula works
# ------------------
#
# The copula is a multivariate distribution with support of $[0,1]^n$.
# To achieve this one uses an arbitrary joint distribution $F_n$ and its
# marginal version $F_1$ (they have the support $I^n$ and $I$ respectively)
# and combines them to obtain the distribution of the copula:
#
# $$ C(u_1, ..., u_n) = F_nn(F_1^{-1}(u_1}, ..., F_n^{-1}(u_n}) $$
#
# here $F_i^{-1}$ denoting the inverse function of $F_i$ and $u_i \in [0,1]$.
# $\phi(u_i) = F_i^{-1}(u_i)$ is termed the generator of the copula. Its
# derivative is $d \phi(u_i) / d u_i = 1 / f_i(F_i^{-1}(u_i))$ therefore
# the pdf of the copula is as follows:
#
# $$ c(u_1, ..., u_n) = f_nn(F_1^{-1}(u_1), ..., F_n^{-1}(u_n} / (
# f_1(F_1^{-1}(u_1)) ... f_n(F_n^{-1}(u_n}) ) $$
#
# with $f_nn$ denoting the function $F_nn$ derived once in each of its args.
#
# Since the definition was given through the cdf no normalization is needed.
# In our implementation, Archimedean copula generators are implemented as
# `ppf` and the inverse of the generator is a `cdf` satisfying usual domain
# of definition. Where possible the nth derivative of the cdf is given too.
#
# The copula distribution accounts for the correlations between variables
# but it has to be mapped to the desired marginals $G_1, ..., G_n$ by the
# histogram normalization method:
#
# $$ u_i = G_i(y_i) $$
#
# Obviously this is a measure change $ d u_i / d y_i = g_i(y_i) $.
# $$ \Prod g_i(y_i) d y_i = c(u_1, ..., u_n) \Prod d u_i $$
#
def _process_parameters(dim, marginal, joint, dtype_marginal=float, dtype_joint=float):
"""
Infer dimensionality from marginal or covariance matrix, ensure that
marginal and covariance are full vector resp. matrix.
Parameters
----------
dim : None, int
number of dimensions of the multivariate distribution
marginal : numpy.ndarray, shape (n_comp, ...)
joint: numpy.ndarray, shape (n_comp, ...)
dtype_marginal: Union[Type, numpy.dtype]
dtype_joint: Union[Type, numpy.dtype]
"""
# Adapted form scipy.stats._multivariate._process_parameters
# Try to infer dimensionality
if dim is None:
if marginal is None:
if joint is None:
dim = 1
else:
joint = np.asarray(joint, dtype=dtype_joint)
if joint.ndim < 2:
dim = 1
else:
dim = joint.shape[0]
import warnings
warnings.warn("It is not safe to infer dim from 'joint'")
else:
marginal = np.asarray(marginal, dtype=dtype_marginal)
dim = marginal.shape[0]
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for marginal and joint if necessary
if marginal is None:
marginal = np.zeros(dim)
marginal = np.asarray(marginal, dtype=dtype_marginal)
if joint is None:
joint = 1.0
if dtype_joint is not None:
joint = np.asarray(joint, dtype=dtype_joint)
if marginal.shape[0] != dim:
raise ValueError("Array 'marginal' must be of length %d." % dim)
# if joint.shape[0] != dim:
# raise ValueError("Array 'joint' must be of length %d." % dim)
if marginal.ndim > 2:
raise ValueError("Array 'marginal' must be at most two-dimensional,"
" but marginal.ndim = %d" % marginal.ndim)
# if joint.ndim > 2:
# raise ValueError("Array 'joint' must be at most two-dimensional,"
# " but joint.ndim = %d" % joint.ndim)
return dim, marginal, joint
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
# Adapted form scipy.stats._multivariate._process_quantiles
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _align_vars(stat, data, params):
params = np.asarray(params)
data = np.asarray(data)
stat = np.asarray(stat)
n_sample = data.shape[-1]
stat = np.broadcast_to(stat, _broadcast_shapes(stat.shape, (n_sample,)))
params = np.broadcast_to(params.T, _broadcast_shapes(params.T.shape, (n_sample,))).T
return stat, data, params
def _fit_individual_marginals(stat, data):
"""Estimate the parameters for marginal distributions.
Parameters
----------
stat : scipy.stats.rv_continuous or an array of them, shape (n_comp,)
data : ndarray, shape (..., n_comp)
The components are listed in rows while realisations in columns
Returns
-------
params: list, shape (n_comp,) of tuples"""
stat, data, _ = _align_vars(stat, data, 0)
ret = [s.fit(comp) for s, comp in zip(stat, data.T)]
return np.asarray(ret)
def _range_check(stat, data, params=None):
"""
Check the range of input data against the support of the distribution
Parameters
----------
data : ndarray, shape (..., n_comp)
Data samples, with the last axis of `data` denoting the components.
"""
if stat is None:
return True
# FIXME: this is wrong because it does not account for loc and scale
if params is None:
lo, up = stat.a, stat.b
ret = (np.all(lo <= comp) & np.all(comp <= up) for comp in data.T)
else:
try:
limits = ((stat(p).a, stat(p).b) for p in params)
ret = (np.all(lo <= comp) & np.all(comp <= up) for (comp, (lo, up)) in zip(data.T, limits))
except AttributeError as e:
raise ValueError("Could not verify bounds.") from e
if not all(ret):
raise ValueError("Data point out of the support of marginal distribution")
def _no_range_check(*args, **kwargs):
del args, kwargs
pass
def _unit_interval_check(stat, data, params=None):
"""
Check the range of input data against the support of the distribution
Parameters
----------
data : ndarray, shape (..., n_comp)
Data samples, with the last axis of `data` denoting the components.
"""
del stat, params
lower, upper = 0, 1
ret = (np.all(lower <= comp) & np.all(comp <= upper) for comp in data.T)
if not all(ret):
raise ValueError("Data point out of the support [0,1] of marginal distribution")
def _repeat_params(params, dim):
"""Broadcast params | |
size is a multiple of this value.
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
length_fn: A function or list of functions (in case of a parallel dataset)
that take features as argument and return the associated sequence length.
padded_shapes: The padded shapes for this dataset. If ``None``, the shapes
are automatically inferred from the dataset output shapes.
Returns:
A ``tf.data.Dataset`` transformation.
Raises:
ValueError: if :obj:`batch_type` is not one of "examples" or "tokens".
ValueError: if :obj:`batch_type` is "tokens" but :obj:`length_bucket_width`
is not set.
ValueError: if the number of length functions in :obj:`length_fn` does not
match the number of parallel elements.
See Also:
:func:`opennmt.data.batch_dataset`
"""
batch_size = batch_size * batch_multiplier
def _get_bucket_id(features, length_fn):
default_id = tf.constant(0, dtype=tf.int32)
if length_fn is None:
return default_id
lengths = length_fn(features)
if lengths is None:
return default_id
if not isinstance(lengths, list):
lengths = [lengths] # Fallback to the general case of parallel inputs.
lengths = [length // length_bucket_width for length in lengths]
return tf.reduce_max(lengths)
def _key_func(*args):
length_fns = length_fn
if length_fns is None:
length_fns = [None for _ in args]
elif not isinstance(length_fns, (list, tuple)):
length_fns = [length_fns]
if len(length_fns) != len(args):
raise ValueError(
"%d length functions were passed but this dataset contains "
"%d parallel elements" % (len(length_fns), len(args))
)
# Take the highest bucket id.
bucket_id = tf.reduce_max(
[
_get_bucket_id(features, length_fn)
for features, length_fn in zip(args, length_fns)
]
)
return tf.cast(bucket_id, tf.int64)
def _reduce_func(unused_key, dataset):
return dataset.apply(batch_dataset(batch_size, padded_shapes=padded_shapes))
def _window_size_func(key):
if length_bucket_width > 1:
key += 1 # For length_bucket_width == 1, key 0 is unassigned.
size = batch_size // (key * length_bucket_width)
required_multiple = batch_multiplier * batch_size_multiple
if required_multiple > 1:
size = size + required_multiple - size % required_multiple
return tf.cast(tf.maximum(size, required_multiple), tf.int64)
def _group_by_window(*args, **kwargs):
# TODO: clean this API when TensorFlow requirement is updated to >=2.6.
if compat.tf_supports("data.Dataset.group_by_window"):
return lambda dataset: dataset.group_by_window(*args, **kwargs)
else:
return tf.data.experimental.group_by_window(*args, **kwargs)
if length_bucket_width is None:
if batch_type == "tokens":
raise ValueError(
"Batch type 'tokens' requires length bucketing (the parameter "
"length_bucket_width should be non null)"
)
return batch_dataset(batch_size, padded_shapes=padded_shapes)
if batch_type == "examples":
return _group_by_window(_key_func, _reduce_func, window_size=batch_size)
elif batch_type == "tokens":
return _group_by_window(
_key_func, _reduce_func, window_size_func=_window_size_func
)
else:
raise ValueError(
"Invalid batch type: '{}'; should be 'examples' or 'tokens'".format(
batch_type
)
)
def training_pipeline(
batch_size,
batch_type="examples",
batch_multiplier=1,
batch_size_multiple=1,
process_fn=None,
transform_fns=None,
length_bucket_width=None,
features_length_fn=None,
labels_length_fn=None,
maximum_features_length=None,
maximum_labels_length=None,
single_pass=False,
num_shards=1,
shard_index=0,
num_threads=None,
dataset_size=None,
shuffle_buffer_size=None,
prefetch_buffer_size=None,
cardinality_multiple=1,
):
"""Transformation that applies most of the dataset operations commonly used
for training on sequence data:
* sharding
* shuffling
* processing
* filtering
* bucketization
* batching
* prefetching
Example:
>>> dataset = dataset.apply(opennmt.data.training_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The training batching strategy to use: can be "examples" or
"tokens".
batch_multiplier: The batch size multiplier.
batch_size_multiple: When :obj:`batch_type` is "tokens", ensure that the
resulting batch size is a multiple of this value.
process_fn: The processing function to apply on each element.
transform_fns: List of dataset transformation functions (applied after
:obj:`process_fn` if defined).
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
features_length_fn: A function mapping features to a sequence length.
labels_length_fn: A function mapping labels to a sequence length.
maximum_features_length: The maximum length or list of maximum lengths of
the features sequence(s). ``None`` to not constrain the length.
maximum_labels_length: The maximum length of the labels sequence.
``None`` to not constrain the length.
single_pass: If ``True``, makes a single pass over the training data.
num_shards: The number of data shards (usually the number of workers in a
distributed setting).
shard_index: The shard index this data pipeline should read from.
num_threads: The number of elements processed in parallel.
dataset_size: If the dataset size is already known, it can be passed here to
avoid a slower generic computation of the dataset size later.
shuffle_buffer_size: The number of elements from which to sample.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
cardinality_multiple: Ensure that the dataset cardinality is a multiple of
this value when :obj:`single_pass` is ``True``.
Returns:
A ``tf.data.Dataset`` transformation.
See Also:
- :func:`opennmt.data.batch_sequence_dataset`
- :func:`opennmt.data.make_cardinality_multiple_of`
- :func:`opennmt.data.filter_examples_by_length`
- :func:`opennmt.data.filter_irregular_batches`
- :func:`opennmt.data.shuffle_dataset`
"""
if dataset_size is not None and num_shards > 1:
# Update dataset_size based on the shard size.
if isinstance(dataset_size, list):
dataset_size = [size // num_shards for size in dataset_size]
else:
dataset_size //= num_shards
def _make_weighted_dataset(datasets, weights):
if single_pass:
raise ValueError(
"single_pass parameter is not compatible with weighted datasets"
)
if not datasets:
raise ValueError("At least one dataset is required")
if weights is not None and len(weights) != len(datasets):
raise ValueError(
"%d dataset weights were provided, but %d were expected to match the "
"number of data files" % (len(weights), len(datasets))
)
if num_shards > 1:
datasets = [dataset.shard(num_shards, shard_index) for dataset in datasets]
weights = normalize_weights(datasets, weights=weights, sizes=dataset_size)
datasets = [dataset.repeat() for dataset in datasets]
dataset = tf.data.experimental.sample_from_datasets(datasets, weights=weights)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
if shuffle_buffer_size < 0:
raise ValueError(
"shuffle_buffer_size < 0 is not compatible with weighted datasets"
)
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset
def _make_single_dataset(dataset):
if num_shards > 1:
dataset = dataset.shard(num_shards, shard_index)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
dataset = dataset.apply(
shuffle_dataset(shuffle_buffer_size, dataset_size=dataset_size)
)
return dataset
def _pipeline(dataset):
if isinstance(dataset, tuple):
dataset, weights = dataset
else:
weights = None
is_weighted_dataset = isinstance(dataset, list)
if is_weighted_dataset:
dataset = _make_weighted_dataset(dataset, weights)
else:
dataset = _make_single_dataset(dataset)
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads or 4)
if transform_fns is not None:
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
dataset = dataset.apply(
filter_examples_by_length(
maximum_features_length=maximum_features_length,
maximum_labels_length=maximum_labels_length,
features_length_fn=features_length_fn,
labels_length_fn=labels_length_fn,
)
)
dataset = dataset.apply(
batch_sequence_dataset(
batch_size,
batch_type=batch_type,
batch_multiplier=batch_multiplier,
batch_size_multiple=batch_size_multiple,
length_bucket_width=length_bucket_width,
length_fn=[features_length_fn, labels_length_fn],
)
)
dataset = dataset.apply(filter_irregular_batches(batch_multiplier))
if not single_pass:
if not is_weighted_dataset: # Weighted dataset is repeated before sampling.
dataset = dataset.repeat()
else:
dataset = dataset.apply(make_cardinality_multiple_of(cardinality_multiple))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _pipeline
def inference_pipeline(
batch_size,
batch_type="examples",
process_fn=None,
transform_fns=None,
length_bucket_width=None,
length_fn=None,
num_threads=None,
prefetch_buffer_size=None,
):
"""Transformation that applies dataset operations for inference.
Example:
>>> dataset = dataset.apply(opennmt.data.inference_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The batching strategy to use: can be "examples" or "tokens".
process_fn: The processing function to apply on each element.
transform_fns: List of dataset transformation functions (applied after
:obj:`process_fn` if defined).
length_bucket_width: The width of the length buckets to select batch
candidates from. If set, this means the inference pipeline will be
reordered based on the examples length, the application is then
responsible to restore the predictions in order. An "index" key will be
inserted in the examples dictionary.
length_fn: A function mapping features to a sequence length.
num_threads: The number of elements processed in parallel.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
Returns:
A ``tf.data.Dataset`` transformation.
Raises:
ValueError: if :obj:`length_bucket_width` is set but not :obj:`length_fn`.
ValueError: if :obj:`length_bucket_width` is set but the dataset does not
output a dictionary structure.
"""
def _inject_index(index, x):
if isinstance(x, tuple):
features = x[0]
else:
features = x
features["index"] = index
return x
def _pipeline(dataset):
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads)
if transform_fns is not None:
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
if length_bucket_width is not None and length_bucket_width > 0:
if length_fn is None:
raise ValueError("length_fn is required when reordering by length")
output_shapes = _get_output_shapes(dataset)
if isinstance(output_shapes, tuple):
num_length_fn = (
len(length_fn) if isinstance(length_fn, (list, tuple)) else 1
)
if len(output_shapes) != num_length_fn:
raise ValueError(
"The dataset outputs %d parallel features, but got %d "
"length functions" % (len(output_shapes), num_length_fn)
)
output_shapes = output_shapes[0]
if not isinstance(output_shapes, dict):
raise ValueError(
"Reordering by length expects dataset elements to be Python dicts"
)
dataset = dataset.enumerate()
dataset = dataset.map(_inject_index)
dataset = dataset.apply(
batch_sequence_dataset(
batch_size,
batch_type=batch_type,
length_bucket_width=length_bucket_width,
length_fn=length_fn,
)
)
else:
dataset = dataset.apply(batch_dataset(batch_size))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return | |
the node must have all of the
properties defined in the frame.
:param state: the current framing state.
:param subject: the subject to check.
:param frame: the frame to check.
:param flags: the frame flags.
:return: True if the subject matches, False if not.
"""
# check ducktype
wildcard = True
matches_some = False
for k, v in sorted(frame.items()):
match_this = False
node_values = JsonLdProcessor.get_values(subject, k)
is_empty = len(v) == 0
if k == '@id':
# if @id is not a wildcard and is not empty, then match
# or not on specific value
if len(frame['@id']) == 0 or _is_empty_object(frame['@id'][0]):
match_this = True
elif frame['@id']:
match_this = node_values[0] in frame['@id']
if not flags['requireAll']:
return match_this
elif k == '@type':
wildcard = False
if is_empty:
if len(node_values) > 0:
# don't match on no @type
return False
match_this = True
elif len(frame['@type']) == 1 and _is_empty_object(frame['@type'][0]):
match_this = len(node_values) > 0
else:
# match on a specific @type
for tv in frame['@type']:
if _is_object(tv) and '@default' in tv:
# match on default object
match_this = True
elif not match_this:
match_this = tv in node_values
if not flags['requireAll']:
return match_this
elif _is_keyword(k):
continue
else:
# force a copy of this frame entry so it can be manipulated
this_frame = JsonLdProcessor.get_values(frame, k)
this_frame = this_frame[0] if this_frame else None
has_default = False
if this_frame:
self._validate_frame([this_frame])
has_default = '@default' in this_frame
# no longer a wildcard pattern if frame has any non-keyword
# properties
wildcard = False
# skip, but allow match if node has no value for property, and
# frame has a default value
if not node_values and has_default:
continue
# if frame value is empty, don't match if subject has any value
if node_values and is_empty:
return False
if this_frame is None:
# node does not match if values is not empty and the value of
# property in frame is match none.
if node_values:
return False
match_this = True
else:
if _is_list(this_frame):
list_value = this_frame['@list'][0] if this_frame['@list'] else None
if _is_list(node_values[0] if node_values else None):
node_list_values = node_values[0]['@list']
if _is_value(list_value):
match_this = any(
self._value_match(list_value, lv) for lv in node_list_values)
elif _is_subject(list_value) or _is_subject_reference(list_value):
match_this = any(
self._node_match(state, list_value, lv, flags) for lv in node_list_values)
elif _is_value(this_frame):
# match on any matching value
match_this = any(self._value_match(this_frame, nv) for nv in node_values)
elif _is_subject_reference(this_frame):
match_this = any(
self._node_match(state, this_frame, nv, flags) for nv in node_values)
elif _is_object(this_frame):
match_this = len(node_values) > 0
else:
match_this = False
# all non-defaulted values must match if requireAll is set
if not match_this and flags['requireAll']:
return False
matches_some = matches_some or match_this
# return true if wildcard or subject matches some properties
return wildcard or matches_some
def _remove_embed(self, state, id_):
"""
Removes an existing embed.
:param state: the current framing state.
:param id_: the @id of the embed to remove.
"""
# get existing embed
embeds = state['uniqueEmbeds'][state['graph']]
embed = embeds[id_]
property = embed['property']
# create reference to replace embed
subject = {'@id': id_}
# remove existing embed
if _is_array(embed['parent']):
# replace subject with reference
for i, parent in enumerate(embed['parent']):
if JsonLdProcessor.compare_values(parent, subject):
embed['parent'][i] = subject
break
else:
# replace subject with reference
use_array = _is_array(embed['parent'][property])
JsonLdProcessor.remove_value(
embed['parent'], property, subject,
{'propertyIsArray': use_array})
JsonLdProcessor.add_value(
embed['parent'], property, subject,
{'propertyIsArray': use_array})
# recursively remove dependent dangling embeds
def remove_dependents(id_):
# get embed keys as a separate array to enable deleting keys
# in map
try:
ids = list(embeds.iterkeys())
except AttributeError:
ids = list(embeds.keys())
for next in ids:
if (next in embeds and
_is_object(embeds[next]['parent']) and
'@id' in embeds[next]['parent'] and # could be @list
embeds[next]['parent']['@id'] == id_):
del embeds[next]
remove_dependents(next)
remove_dependents(id_)
def _add_frame_output(self, parent, property, output):
"""
Adds framing output to the given parent.
:param parent: the parent to add to.
:param property: the parent property.
:param output: the output to add.
"""
if _is_object(parent):
JsonLdProcessor.add_value(
parent, property, output, {'propertyIsArray': True})
else:
parent.append(output)
def _node_match(self, state, pattern, value, flags):
"""
Node matches if it is a node, and matches the pattern as a frame.
:param state: the current framing state.
:param pattern: used to match value.
:param value: to check.
:param flags: the frame flags.
"""
if '@id' not in value:
return False
node_object = state['subjects'][value['@id']]
return node_object and self._filter_subject(state, node_object, pattern, flags)
def _value_match(self, pattern, value):
"""
Value matches if it is a value and matches the value pattern
- `pattern` is empty
- @values are the same, or `pattern[@value]` is a wildcard,
- @types are the same or `value[@type]` is not None
and `pattern[@type]` is `{}` or `value[@type]` is None
and `pattern[@type]` is None or `[]`, and
- @languages are the same or `value[@language]` is not None
and `pattern[@language]` is `{}`, or `value[@language]` is None
and `pattern[@language]` is None or `[]`
:param pattern: used to match value.
:param value: to check.
"""
v1, t1, l1 = value.get('@value'), value.get('@type'), value.get('@language')
v2 = JsonLdProcessor.get_values(pattern, '@value')
t2 = JsonLdProcessor.get_values(pattern, '@type')
l2 = JsonLdProcessor.get_values(pattern, '@language')
if not v2 and not t2 and not l2:
return True
if not (v1 in v2 or _is_empty_object(v2[0])):
return False
if not ((not t1 and not t2) or (t1 in t2) or (t1 and t2 and _is_empty_object(t2[0]))):
return False
if not ((not l1 and not l2) or (l1 in l2) or (l1 and l2 and _is_empty_object(l2[0]))):
return False
return True
def _cleanup_preserve(self, input_, options):
"""
Removes the @preserve keywords as the last step of the framing
algorithm.
:param input_: the framed, compacted output.
:param options: the compaction options used.
:return: the resulting output.
"""
# recurse through arrays
if _is_array(input_):
output = []
for e in input_:
result = self._cleanup_preserve(e, options)
# drop Nones from arrays
# XXX needed?
if result is not None:
output.append(result)
return output
elif _is_object(input_):
# remove @preserve
if '@preserve' in input_:
#if input_['@preserve'] == '@null':
# return None
return input_['@preserve'][0]
# skip @values
if _is_value(input_):
return input_
# recurse through @lists
if _is_list(input_):
input_['@list'] = self._cleanup_preserve(
input_['@list'], options)
return input_
# handle in-memory linked nodes
if '@id' in input_:
id_ = input_['@id']
if id_ in options['link']:
try:
idx = options['link'][id_].index(input_)
# already visited
return options['link'][id_][idx]
except:
# prevent circular visitation
options['link'][id_].append(input_)
else:
# prevent circular visitation
options['link'][id_] = [input_]
# potentially remove the id, if it is an unreferenced bnode
if input_.get('@id') in options['bnodesToClear']:
input_.pop('@id')
# recurse through properties
for prop, v in input_.items():
input_[prop] = self._cleanup_preserve(v, options)
return input_
def _cleanup_null(self, input_, options):
"""
Replace '@null' with None, removing it from arrays.
:param input_: the framed, compacted output.
:param options: the compaction options used.
:return: the resulting output.
"""
# recurse through arrays
if _is_array(input_):
no_nulls = [self._cleanup_null(v, options) for v in input_]
return [v for v in no_nulls if v is not None]
if input_ == '@null':
return None
if _is_object(input_):
# handle in-memory linked nodes
if '@id' in input_:
id_ = input_['@id']
if id_ in options['link']:
try:
idx = options['link'][id_].index(input_)
# already visited
return options['link'][id_][idx]
except:
# prevent circular visitation
options['link'][id_].append(input_)
else:
# prevent circular visitation
options['link'][id_] = [input_]
for prop, v in input_.items():
input_[prop] = self._cleanup_null(v, options)
return input_
def _select_term(
self, active_ctx, iri, value, containers,
type_or_language, type_or_language_value):
"""
Picks the preferred compaction term from the inverse context entry.
:param active_ctx: the active context.
:param iri: the IRI to pick the term for.
:param value: the value to pick the term for.
:param containers: the preferred containers.
:param type_or_language: either '@type' or '@language'.
:param type_or_language_value: the preferred value for '@type' or
'@language'
:return: the preferred term.
"""
if type_or_language_value is None:
type_or_language_value = '@null'
# preferred options for the value of @type or language
prefs = []
# determine prefs for @id based on whether value compacts to term
if ((type_or_language_value == '@id' or
type_or_language_value == '@reverse') and
_is_object(value) and '@id' in value):
# prefer @reverse first
if type_or_language_value == '@reverse':
prefs.append('@reverse')
# try to compact value to a | |
from cgitb import small
import pygame
import button
import random
import webbrowser
import display_functions # this will store dark/light mode, music manipulation, etc.
from pygame import mixer
from time import time
import math
from backend_functions import *
from checkDouble import *
# initialize pygame and fonts
pygame.init()
pygame.font.init()
# setting up the window
width = 450
height = 600
screen = pygame.display.set_mode([width, height])
pygame.display.set_caption('VORDLE')
pygame.mouse.set_visible(True)
pointerImg = pygame.image.load('vishy_pointerImg.png')
pointerImg = pygame.transform.scale(pointerImg, (25,35))
pointerImg_rect = pointerImg.get_rect()
pointerImg_rect.size = (25,35)
# game variables
guess = ''
turn = 0
guess_list = []
result_list = []
result = []
# word lists
word_list = create_wordpick_array()
all_words = create_wordcheck_array(word_list)
# pick word from list
word = pick_random_word(word_list, 0.2)
# background music
pygame.mixer.init()
L = ['fairytale.mp3', 'island.mp3', 'ittybitty.mp3', 'kawai.mp3', 'monkeys.mp3','sunshine.mp3', 'vacation.mp3', 'waltz.mp3', 'weasel.mp3']
track = random.choice(L)
pygame.mixer.music.load(track)
mixer.music.play(-1)
# window colors
black = (0,0,0)
white = (255,255,255)
gray = (150,150,150)
title_green = (60,186,84)
title_yellow = (244,194,13)
red = (219,50,54)
blue = (72,133,237)
box_green =(106,172,100)
box_yellow = (204, 180, 84)
background = (18,18,19)
dark_gray = (58,58,60)
off_white = (200,200,200)
# Vishnu confetti
Vishies = []
for q in range(100):
x = random.randrange(0, width)
y = random.randrange(0, height)
Vishies.append([x, y])
vishnu_img = pygame.image.load("vishy_pointerImg.png")
vishnu_img = pygame.transform.scale(vishnu_img, (25, 25))
vishnu_confetti = vishnu_img.get_rect()
vishnu_confetti.size = (10, 10)
# load button images
stats_img = pygame.image.load('stats.png').convert_alpha()
stats_button = button.Button(416, 8, stats_img, 0.38)
settings_img = pygame.image.load('settings.png').convert_alpha()
settings_button = button.Button(442, 8, settings_img, 0.42)
# initializa game board boxes
turn = 0
boxes = [[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "],
[" ", " ", " ", " ", " "]]
keys1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']
keys2 = ['<KEY>']
keys3 = ['<KEY>']
# frame rate
fps = 60
timer = pygame.time.Clock()
# initialize font, create title (Google themed because Uno got a job at Google!)
game_font = pygame.font.Font('freesansbold.ttf', 15)
title_font = pygame.font.Font('freesansbold.ttf', 30)
tiny_font = pygame.font.Font('freesansbold.ttf', 12)
guess_font = pg.font.Font('freesansbold.ttf', 34)
correct_font =pg.font.Font('freesansbold.ttf', 30)
title1 = title_font.render('V', True, blue)
titleRect1 = title1.get_rect()
titleRect1.center = (167, 23)
title2 = title_font.render('O', True, red)
titleRect2 = title2.get_rect()
titleRect2.center = (189, 23)
title3 = title_font.render('R', True, title_yellow)
titleRect3 = title3.get_rect()
titleRect3.center = (214, 23)
title4 = title_font.render('D', True, blue)
titleRect4 = title4.get_rect()
titleRect4.center = (236, 23)
title5 = title_font.render('L', True, title_green)
titleRect5 = title5.get_rect()
titleRect5.center = (257, 23)
title6 = title_font.render('E', True, red)
titleRect6 = title6.get_rect()
titleRect6.center = (278, 23)
# games functions
def draw_boxes():
'''
Draws the box grid to the screen.
'''
global turn
global boxes
for col in range(0,5):
for row in range(0,6):
pygame.draw.rect(screen, white, [col * 75 + 49, row * 75 + 45, 53, 53], 2)
def draw_boxes_row(line):
'''
Draws a single row of boxes to the screen.
Args: line: a list of the form range(line to draw, line to draw + 1)
Returns: None
'''
for col in range(0,5):
for row in line:
pygame.draw.rect(screen, white, [col * 75 + 49, row * 75 + 45, 53, 53], 2)
# draw keyboard line by line
def draw_keys1():
'''
Draw the first line of the keyboard to the screen.
'''
global turn
global keys1
for col in range(0,10):
pygame.draw.rect(screen, gray, [col * 30 + 75, 488, 23, 30], border_radius=7)
keys_text = game_font.render(keys1[col], True, white)
screen.blit(keys_text, (col * 30 + 83, 497))
def draw_keys2():
'''
Draw the second line of the keyboard to the screen.
'''
global turn
global keys2
for col in range(0,9):
pygame.draw.rect(screen, gray, [col * 30 + 90, 525, 23, 30], border_radius=7)
keys_text = game_font.render(keys2[col], True, white)
screen.blit(keys_text, (col * 30 + 99, 533))
def draw_keys3():
'''
Draw the third line of the keyboard to the screen.
'''
global turn
global keys3
for col in range(0,7):
pygame.draw.rect(screen, gray, [col * 30 + 120, 563, 23, 30], border_radius=7)
keys_text = game_font.render(keys3[col], True, white)
screen.blit(keys_text, (col * 30 + 128, 573))
def change_box_color(color, box):
'''
Change the color of a single box on the screen
Args: color: a tuple
box: a tuple, index location of the box to be changed
Returns: None
'''
pygame.draw.rect(screen, color, [box[1]*75 + 49, box[0] * 75 + 45, 53, 53])
def change_key_color(color, letter):
'''
Changes the color of a single key on the keyboard.
Args: color: a tuple
letter: a string, the letter on the key to be changed
Returns: None
'''
if letter in keys1:
for i in range(len(keys1)):
if letter == keys1[i]:
pygame.draw.rect(screen, color, [i*30 + 75, 488, 23, 30], border_radius = 7)
keys_text = game_font.render(keys1[i], True, white)
screen.blit(keys_text, (i * 30 + 83, 497))
if letter in keys2:
for i in range(len(keys2)):
if letter == keys2[i]:
pygame.draw.rect(screen, color, [i*30 + 90, 525, 23, 30], border_radius = 7)
keys_text = game_font.render(keys2[i], True, white)
screen.blit(keys_text, (i * 30 + 99, 533))
if letter in keys3:
for i in range(len(keys3)):
if letter == keys3[i]:
pygame.draw.rect(screen, color, [i*30 + 120, 563, 23, 30], border_radius = 7)
keys_text = game_font.render(keys3[i], True, white)
screen.blit(keys_text, (i * 30 + 128, 573))
def print_guess(guess, turn):
'''
Display the user's guess on the screen in the correct row.
Args: guess: a string
turn: an int. Determines which row to print the guess to
Returns: None
'''
for i, letter in enumerate(guess):
text_surface = guess_font.render(letter.upper(), True, white)
screen.blit(text_surface,(i*75 + 63, turn*75 + 55, 53, 53))
pygame.display.flip()
def show_result(result, turn, guess):
'''
Change the box and key colors to the correct color when the user submits their guess.
Re-print guess after colors are changed.
Args: result: a list. Determines the color to change to.
turn: an int. Determines the row.
guess: a string. Determines which keyboard letter to change the color of.
Returns: None
'''
for i in range(len(result)):
if result[i] == 0:
change_box_color(dark_gray, (turn, i))
change_key_color(dark_gray, guess[i])
pg.display.flip()
if result[i] == 1:
change_box_color(box_yellow, (turn, i))
pg.display.flip()
change_key_color(box_yellow, guess[i])
if result[i] == 2:
change_box_color(box_green, (turn, i))
pg.display.flip()
change_key_color(box_green, guess[i])
print_guess(guess, turn)
def screen_fill():
'''
Initializes the screen with background, game board, keyboard, and previous guesses/results.
'''
global guess_list
global result_list
screen.fill(background)
# draw game board
draw_boxes()
draw_keys1()
draw_keys2()
draw_keys3()
# display previous guesses and fill boxes
turn_list = range(6)
if len(guess_list) != 0:
for i in range(len(guess_list)):
show_result(result_list[i], turn_list[i], guess_list[i])
# show text
screen.blit(title1, titleRect1)
screen.blit(title2, titleRect2)
screen.blit(title3, titleRect3)
screen.blit(title4, titleRect4)
screen.blit(title5, titleRect5)
screen.blit(title6, titleRect6)
# stats button
def stats():
'''
Initializes the stats screen when the stats button is pressed.
'''
# initiate screen
width = 375
height = 300
screen2 = pygame.display.set_mode([width, height])
pygame.display.set_caption('STATISTICS')
# statistics title
stats_font = pygame.font.Font('freesansbold.ttf', 15)
stats_title = stats_font.render('STATISTICS', True, white, background)
statsRect1 = stats_title.get_rect()
statsRect1.center = (width // 2, height - 255)
# number of games played
display_font = pygame.font.Font('freesansbold.ttf', 30)
small_font = pygame.font.Font('freesansbold.ttf', 9)
stats_played = small_font.render('Played', True, white, background)
statsRect2 = stats_played.get_rect()
statsRect2.center = (width // 2 - 100, height - 195)
games_played = display_font.render(str(get_number_of_games()), True, white, background)
games_playedRect = games_played.get_rect()
games_playedRect.center = (width // 2 - 100, height - 220)
# percentage of player's wins
stats_wins = small_font.render('Win %', True, white, background)
statsRect3 = stats_wins.get_rect()
statsRect3.center = (width // 2 - 50, height - 195)
percent = display_font.render(str(get_win_percentage()), True, white, background)
percentRect = percent.get_rect()
percentRect.center = (width // 2 -50, height - 220)
# current streak
stats_current = small_font.render('Current', True, white, background)
statsRect4 = stats_current.get_rect()
statsRect4.center = (width // 2, height - 195)
stats_current2 = small_font.render('Streak', True, white, background)
statsRect5 = stats_current2.get_rect()
statsRect5.center = (width // 2, height - 186)
current_streak = display_font.render(str(get_current_streak()), True, white, background)
current_streakRect = current_streak.get_rect()
current_streakRect.center = (width // 2, height - 220)
# max streak
stats_max = small_font.render('Max', True, white, background)
statsRect6 = stats_max.get_rect()
statsRect6.center = (width // 2 + 50, height - 195)
stats_max2 = small_font.render('Streak', True, white, background)
statsRect7 = stats_max2.get_rect()
statsRect7.center = (width // 2 + 50, height - 186)
max_streak = display_font.render(str(get_longest_streak()), True, white, background)
max_streakRect = max_streak.get_rect()
max_streakRect.center = (width // 2 + 50, height - 220)
# best/fastest time
time_font = pygame.font.Font('freesansbold.ttf', 17)
stats_time = small_font.render('Fastest', True, white, background)
statsRect8 = stats_time.get_rect()
statsRect8.center = (width // 2 + 100, height - 195)
stats_time2 = small_font.render('Time', True, white, background)
statsRect9 = stats_time2.get_rect()
statsRect9.center = (width // 2 + 100, height - 186)
fastest = time_font.render(str(get_fastest_time()), True, white, background)
fastestRect = fastest.get_rect()
fastestRect.center = (width // 2 + 100, height - 220)
# | |
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
self.statistics_ = self._sparse_fit(
X, self.strategy, self.missing_values, fill_value
)
else:
self.statistics_ = self._dense_fit(
X, self.strategy, self.missing_values, fill_value
)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
missing_mask = _get_mask(X, missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i] : X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column, n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
missing_mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# Avoid use of scipy.stats.mstats.mode due to the required
# additional overhead and slow benchmarking performance.
# See Issue 14325 and PR 14399 for full discussion.
# To be able access the elements by columns
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape \
(n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
X = self._validate_input(X, in_fit=False)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0])
)
# compute mask before eliminating invalid features
missing_mask = _get_mask(X, self.missing_values)
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
valid_statistics_indexes = None
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn(
"Deleting features without observed values: %s" % missing
)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
# if no invalid statistics are found, use the mask computed
# before, else recompute mask
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(
np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
)[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
# use mask computed before eliminating invalid mask
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that ``inverse_transform`` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at ``fit`` time, the feature won't have a binary
indicator, and the imputation done at ``transform`` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape \
(n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original X with missing values as it was prior
to imputation.
"""
check_is_fitted(self)
if not self.add_indicator:
raise ValueError(
"'inverse_transform' works only when "
"'SimpleImputer' is instantiated with "
"'add_indicator=True'. "
f"Got 'add_indicator={self.add_indicator}' "
"instead."
)
n_features_missing = len(self.indicator_.features_)
non_empty_feature_count = X.shape[1] - n_features_missing
array_imputed = X[:, :non_empty_feature_count].copy()
missing_mask = X[:, non_empty_feature_count:].astype(bool)
n_features_original = len(self.statistics_)
shape_original = (X.shape[0], n_features_original)
X_original = np.zeros(shape_original)
X_original[:, self.indicator_.features_] = missing_mask
full_mask = X_original.astype(bool)
imputed_idx, original_idx = 0, 0
while imputed_idx < len(array_imputed.T):
if not np.all(X_original[:, original_idx]):
X_original[:, original_idx] = array_imputed.T[imputed_idx]
imputed_idx += 1
original_idx += 1
else:
original_idx += 1
X_original[full_mask] = self.missing_values
return X_original
class MissingIndicator(TransformerMixin, BaseEstimator):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`Pipeline` consisting of transformers and a classifier, but rather
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
Parameters
----------
missing_values : int, float, string, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
features : {'missing-only', 'all'}, default='missing-only'
Whether the imputer mask should represent all or a subset of
features.
- If 'missing-only' (default), the imputer mask will only represent
features containing missing values during fit time.
- If 'all', the imputer mask will represent all features.
sparse : bool or 'auto', default='auto'
Whether the imputer mask format should be sparse or dense.
- If 'auto' (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : bool, default=True
If True, transform will raise an error when there are features with
missing values in transform that have no missing values in fit. This is
applicable only when `features='missing-only'`.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator()
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(
self,
*,
missing_values=np.nan,
features="missing-only",
sparse="auto",
error_on_new=True,
):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if not self._precomputed:
imputer_mask | |
# python imports
import os
import os.path
import sys
import string
import re
from Tkinter import *
from tkFileDialog import *
# dependency imports
sys.path.append('./Dependencies')
import vtk
#import vtkTkImageViewerWidget
import Pmw
# internal imports
import MolecularSystem
import MolecularViewer
import GUICards
sys.path.append(os.getcwd())
import SystemMemory
sys.path.append('./Tools/DatabaseManager')
sys.path.append('./Tools/ConservationTools')
import ConservationTools
import DatabaseDomainViewer
import DatabaseHandlers
#sys.path.append('./Applications/Modification')
#import ModificationViewer
sys.path.append('./Applications/AlignmentEditor')
import TreeSystem
#sys.path.append('./Applications/Dynamics')
#import DynamicsViewer
def load_application(parent, app, selected_title, active_window, active_window_key, chain=None):
if app == 'Modification':
reload(ModificationViewer)
#modification_system = MolecularSystem.ModificationSystem(self.item)
pass
#CM_viewer = ModificationViewer.CMViewer(systems[active_window_key],
# active_window.application_pages[selected_title],
# active_window.viewer)
elif app == 'AlignmentEditor':
print 'loading Alignment Editor for chain %s'%(chain)
reload(TreeSystem)
window = active_window.application_pages[selected_title]
viewer = active_window.viewer
alignment_viewer = TreeSystem.TreeSystem(window, systems[active_window_key], None, 'editor', chain, viewer)
alignment_viewer.pack(expand=YES, fill=BOTH)
elif app == 'Dynamics':
active_window.pw.configurepane('info', size=0.4)
active_window.pw.configurepane('viewer', size=0.6)
reload(DynamicsViewer)
window = active_window.application_pages[selected_title]
viewer = active_window.viewer
active_system = systems[active_window_key]
dynamics_viewer = DynamicsViewer.MDViewer(active_system, window, viewer)
class SPADEGUI(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.master.title('SPADE')
#self.run_mode = 'demo' # just a thought
self.run_mode = 'work'
self.menu = MenuSystem(self)
self.pack(expand=NO, fill=X)
self.applicationbox = ApplicationBox(spade)
self.objectbox = ObjectBox(self.run_mode)
self.openCommandWindow()
self.menu.fill_menu()
self.system_windows = {}
self.database_windows = {}
def openCommandWindow(self):
self.toplevel = Toplevel(spade)
self.toplevel.title('Command Window')
border=8
self.toplevel.top_width=int(0.8*spade.winfo_screenwidth()-2*border)
self.toplevel.top_height=int(0.182*spade.winfo_screenheight())
geometry_string = "%dx%d%+d%+d"%(self.toplevel.top_width,
self.toplevel.top_height,
int(0.2*spade.winfo_screenwidth()+border),
int(0.8*0.91*spade.winfo_screenheight())) # width,height,x-offset,y-offset
self.toplevel.geometry(geometry_string)
self.codebox = CodeBox(self.toplevel)
class MenuSystem(Frame):
def __init__(self, parent):
""" create the main menu """
Frame.__init__(self, parent)
self.pack(side=TOP, fill=X, expand=NO)
self.parent = parent
# File operations
file_button=Menubutton(self, text='File', underline=0)
file_button.pack(side=LEFT, fill=X, expand=NO)
file=Menu(file_button)
file.add_command(label='Open a PDB', command=self.openNewPDB, underline=0)
file.add_command(label='Open a System', command=self.openNewSystem, underline=0)
file_button.config(menu=file)
c_lambda = lambda: self.openDatabase("user_defined")
file.add_command(label='Open a Database', command=c_lambda, underline=0)
c_lambda = lambda: self.openDatabase("SCOP")
file.add_command(label='Open the SCOP Database', command=c_lambda, underline=0)
c_lambda = lambda: self.openDatabase("new")
file.add_command(label='Create a Database', command=c_lambda, underline=0)
c_lambda = lambda: self.openDatabase("transform")
file.add_command(label='Transform a Database', command=c_lambda, underline=0)
def fill_menu(self):
# window controls
file_button=Menubutton(self, text='Window', underline=0)
file_button.pack(side=LEFT, fill=X, expand=NO)
file=Menu(file_button)
file.add_command(label='Command Window', command=self.parent.openCommandWindow, underline=0)
file_button.config(menu=file)
#file.add_command(label='Object Window', command=self.parent.openObjectWindow, underline=0)
#file_button.config(menu=file)
# script controls
#file_button=Menubutton(self, text='Scripts', underline=0)
#file_button.pack(side=LEFT, fill=X, expand=NO)
#file=Menu(file_button)
#file.add_command(label='Execute', command=self.parent.codebox.executeCurrentScript, underline=0)
#file_button.config(menu=file)
#file.add_command(label='New Script', command=self.parent.codebox.addNewScript, underline=0)
#file_button.config(menu=file)
#file.add_command(label='Close Current Script', command=self.parent.codebox.closeCurrentScript, underline=0)
#file_button.config(menu=file)
#file.add_command(label='Save Current Script', command=self.parent.codebox.saveCurrentScript, underline=0)
#file_button.config(menu=file)
def openDatabase(self, type):
if type == "new" or type == "transform":
# a database transformation draws from one source and supplies a user-defined database.
# Create a window for choosing a target database name and a database source.
tnsf_win = Toplevel(spade)
geometry_string = "%dx%d%+d%+d" %(250,100,400,200) # width,height,x-offset,y-offset
tnsf_win.geometry(geometry_string)
# add an entry form for the target database name
tnsf_win.target_entry = Pmw.EntryField(tnsf_win, labelpos = 'w', label_text = 'Target Database:', validate = None, value='default_database')
tnsf_win.target_entry.pack(side=TOP,fill='x', expand=1, padx=10, pady=5)
# add a button box for the different sources
tnsf_win.source_buttonbox = Pmw.ButtonBox(tnsf_win, labelpos = 'w',label_text='Data Sources:',frame_borderwidth=2, orient='vertical', frame_relief = 'groove')
c_lambda = lambda: DatabaseHandlers.download_from_rcsb(tnsf_win.target_entry.getvalue())
tnsf_win.source_buttonbox.add('PDBs from the RCSB', command = c_lambda)
c_lambda = lambda: DatabaseHandlers.transform_from_pdbs(tnsf_win.target_entry.getvalue())
tnsf_win.source_buttonbox.add('From local PDB files', command = c_lambda)
tnsf_win.source_buttonbox.pack(side=TOP,fill='x', expand=1, padx=10, pady=5)
elif type == "SCOP":
self.parent.objectbox.database_listbox.insert('end',"SCOP")
# reset the selection
for i in range(self.parent.objectbox.database_listbox.size()):
self.parent.objectbox.database_listbox.select_clear(i)
self.parent.objectbox.database_listbox.select_set(len(databases)-1)
elif type == "user_defined":
""" load a new database """
root_path = os.path.normpath("./Databases")
db_path = os.path.normpath(askdirectory(initialdir=root_path, title="Select the Database Directory", mustexist=1))
if len(db_path) > 0:
soln = re.search('.*SPADE.*',db_path)
if not soln:
print "The directory must be within SPADE' Databases subdirectory\n"
self.openNewDatabase() # if it fails, recurse through this function
return
# now remove everything up to 'Databases'. This assumption allows short and meaningful names.
db_token = os.path.normpath(os.path.abspath("./"))
rep_db_path = string.split(db_path, db_token, 1)
databases.append(rep_db_path[1]) # slice out the slash that gets left behind
self.parent.objectbox.database_listbox.insert('end',"%s"%(rep_db_path[1]))
# reset the selection
for i in range(self.parent.objectbox.database_listbox.size()):
self.parent.objectbox.database_listbox.select_clear(i)
self.parent.objectbox.database_listbox.select_set(len(databases)-1)
def openNewSystem(self):
# these two functions should be condensed and create a new system directory from an arbitrary pdb or sps file
pass
def openNewPDB(self):
pass
def closeSystem(self, which_window):
""" destroy the which_window'th system window and delete it from the list
-- superseded by objectbox.close_window()"""
spade.ui.system_windows[which_window].destroy()
if len(spade.ui.system_windows) > which_window+1:
spade.ui.system_windows = spade.ui.system_windows[:which_window] + spade.ui.system_windows[which_windows+1:]
else:
spade.ui.system_windows = spade.ui.system_windows[:which_window]
class ApplicationBox:
def __init__(self, parent):
""" SPADE' ApplicationBox is a simple construct for launching
applications into active windows
"""
self.parent = parent
self.toplevel = spade
screenwidth=spade.top_width
screenheight=spade.top_height
self.applications_listbox = Pmw.ScrolledListBox(self.toplevel,
listbox_exportselection=0,
labelpos='nw',
label_text='applications',
usehullsize=1,
selectioncommand=self.select_target_system)
self.applications_listbox.pack(side=TOP,expand=1,fill='both', anchor=N)
self.applications_available = os.listdir('./Applications')
for application in self.applications_available:
if not application.startswith("README"):
self.applications_listbox.insert('end', '%s'%(application))
self.active_window_key = None
def select_target_system(self, event=None):
# if more than one system_window is open, ask which one to apply to
# can't use a buttonbox here because it disallows use of '_' keys in titles
active_window = None
if len(spade.ui.system_windows.keys()) + len(spade.ui.database_windows.keys()) > 1:
buttonlist = ['Ok']
self.dialog = Pmw.Dialog(self.parent,
buttons = buttonlist,
buttonbox_orient = 'vertical',
defaultbutton = buttonlist[0],
title = 'Select a System Window',
command = self.return_active_window)
self.dialog.withdraw()
# Add some contents to the dialog.
w = Label(self.dialog.interior(),text = 'Select a Window')
w.pack(expand = 1, fill = 'both', padx = 4, pady = 4)
syss = spade.ui.system_windows.keys()
dbs = spade.ui.database_windows.keys()
self.dialog.listbox = Listbox(self.dialog.interior(), exportselection=0)
for sys in syss:
self.dialog.listbox.insert(END, sys)
for db in dbs:
self.dialog.listbox.insert(END, db)
self.dialog.listbox.pack(expand=1, fill='both')
self.dialog.show()
elif len(spade.ui.system_windows.keys()) == 1 or len(spade.ui.database_windows.keys()) == 1:
buttonlist = ['Ok', 'Cancel']
self.launch_decision = 0
self.dialog = Pmw.Dialog(self.parent,
buttons = buttonlist,
buttonbox_orient = 'horizontal',
defaultbutton = buttonlist[0],
title = 'Confirm Application Launch',
command = self.decide_on_launch)
self.dialog.withdraw()
# Add some contents to the dialog.
if len(spade.ui.system_windows.keys()) == 1:
w = Label(self.dialog.interior(),text = 'Launching an application on %s'%(spade.ui.system_windows.keys()[0]))
else:
w = Label(self.dialog.interior(),text = 'Launching an application on %s'%(spade.ui.database_windows.keys()[0]))
w.pack(expand = 1, fill = 'both', padx = 4, pady = 4)
self.dialog.show()
else:
print 'open a system or database first'
def decide_on_launch(self, result):
if result == 'Ok':
if len(spade.ui.system_windows.keys()) == 1:
self.active_window_key = spade.ui.system_windows.keys()[0]
elif len(spade.ui.database_windows.keys()) == 1:
self.active_window_key = spade.ui.database_windows.keys()[0]
self.launchApplication()
self.dialog.destroy()
else:
self.dialog.destroy()
def return_active_window(self, result):
i = self.dialog.listbox.curselection()
if len(i) > 0:
idx = int(i[0])
if idx <= len(spade.system_windows.keys()):
self.active_window_key = spade.ui.system_windows.keys()[idx]
else: # the listbox curselection content order is system keys first database keys second, so
self.active_window_key = spade.ui.database_windows.keys()[idx-len(spade.ui.system_windows.keys())]
self.dialog.destroy()
self.launchApplication()
def launchApplication(self):
""" for now, write code here or write a function and access it from here. Keep It Simple """
if self.active_window_key == None or len(self.applications_listbox.curselection())==0:
return
x = self.applications_listbox.curselection()[0]
y = self.applications_listbox.get(int(x))
selected_title = string.split(y)[0]
cntr = 2
keys = string.split(selected_title, ' ')
if keys[0] == 'AlignmentEditor':
active_window = spade.ui.system_windows[self.active_window_key]
for pchain in systems[self.active_window_key].ProteinList:
pchain_seq = pchain.get_sequence()
if len(pchain_seq) < 40:
print "skipping load application on chain %s, len %s %s"%(pchain.chain_name, len(pchain_seq), pchain_seq)
continue
selected_title = 'AlignmentEditor %s'%(pchain.chain_name)
active_window.application_pages[selected_title] = active_window.application_notebook.add(selected_title)
active_window.pw.configurepane('info', size=0.5) # default sizes
active_window.pw.configurepane('viewer', size=0.5)
active_window.application_notebook.selectpage(selected_title)
# open a new notebook page
load_application(self, keys[0], selected_title, active_window, self.active_window_key, pchain.chain_name)
active_window.pw.setnaturalsize()
active_window.viewer.display('volume', 1)
elif keys[0] == 'Procast':
active_window = spade.ui.database_windows[self.active_window_key]
# preprocess here by performing the alignment from a selected database.
# This will eventually be replaced with a new keys option in this function, that will opt
# that Procast is called to interact with a previously opened, interactive (or at least
# re-runnable) multiply positioned alignment Application
# collect all systems from the database
selected_index = self.parent.ui.objectbox.database_listbox.curselection()
if len(selected_index) == 0:
print "none selected"
return
selected_title = string.strip(self.parent.ui.objectbox.database_listbox.get(selected_index)[1:])
print 'title %s'%(selected_title)
db_dir = './Databases/' + selected_title
# open a new System window
self.parent.ui.objectbox.launchWindow('system', 'empty')
# call the Procast UI construct to open the superimposed systems
elif self.active_window_key in spade.ui.system_windows.keys(): # all other System Window Applications
active_window = spade.ui.system_windows[self.active_window_key]
while selected_title in active_window.application_pages.keys():
if cntr == 2:
selected_title = '%s %s'%(selected_title, '2')
else:
selected_title = '%s %s'%(selected_title[:-2], '%s'%(cntr))
cntr += 1
active_window.application_pages[selected_title] = active_window.application_notebook.add(selected_title)
active_window.pw.configurepane('info', size=0.5) # default sizes
active_window.pw.configurepane('viewer', size=0.5)
active_window.application_notebook.selectpage(selected_title)
# open a new notebook page
load_application(self, keys[0], selected_title, active_window, self.active_window_key)
active_window.pw.setnaturalsize()
class ObjectBox:
def __init__(self, run_mode):
"""SPADE' ObjectBox has three panels. A list of open system objects and databases are on the top two,
and the bottom contains a MolecularViewer. Click the 'O' to open a viewer. Click 'X' to close
the selected item and its viewer.
"""
self.run_mode = run_mode
if run_mode == 'demo':
self.systems_directory = 'Examples'
elif run_mode == 'work':
self.systems_directory = 'Systems'
self.databases_directory = 'Databases'
self.toplevel = spade
# First create the pane widget
self.pw = Pmw.PanedWidget(self.toplevel,orient='vertical')
self.object_pane = self.pw.add('systems', min=.1,max=.9,size=.35)
self.database_pane = self.pw.add('databases', min=.1,max=.9,size=.3)
self.viewer_pane = self.pw.add('Viewer', min=.1,max=.9)
self.pw.pack(side=TOP,expand=1,fill='both')
# make two scrolled listboxes, | |
# Runge-Kutta Driver for quaternions-described object orientation
# Import other packages used
import numpy as np
# Import constants and parameters used
from Parameters_and_Constants import pi, moi
# Dual Matrix to a vector
# Inputs:
# - 3D array ( vec[ 3 ] )
# Outputs:
# - 3x3 matrix ( dvec ) which is its dual for vector product ( vec x a = dvec . a )
def dual_matrix( vec ):
dvec = [ [ 0.0 , - vec[ 2 ] , vec[ 1 ] ] ,
[ vec[ 2 ] , 0.0 , - vec[ 0 ] ] ,
[ - vec[ 1 ] , vec[ 0 ] , 0.0 ] ]
return dvec
# Dual Matrix to a quaternion
# Inputs:
# - 4D array for the quaternion ( qu[ 4 ] )
# Outputs:
# - 4x3 matrix ( dquat ) which is its dual for quaternion left multiplication with a vector
def q_mat( qu ):
dquat = [ [ - qu[ 1 ] , - qu[ 2 ] , - qu[ 3 ] ] ,
[ qu[ 0 ] , - qu[ 3 ] , qu[ 2 ] ] ,
[ qu[ 3 ] , qu[ 0 ] , - qu[ 1 ] ] ,
[ - qu[ 2 ] , qu[ 1 ] , qu[ 0 ] ] ]
return dquat
# Directive Cosine Matrix (DCM) from a Quaternion
# Inputs:
# - 4D array for the unit quaternion ( qu[ 4 ] )
# Outputs:
# - Directive Cosine Matrix (DCM) which is a 3x3 SO(3) rep dcm[ 3 ][ 3 ]
# NOTE: The quaternion is renormed for each case (to make it unit even if it's not)
def dcm_from_q( qu ):
# If the length of the quaternion was wrong - return unit matrix and print the error
if len( qu ) != 4:
print( "Wrong quaternion length, len( qu ) == 4 is required!" )
print( "Returning unit DCM = diag( 1 , 1 , 1 )" )
dcm = [ [ 1.0 , 0.0 , 0.0 ] ,
[ 0.0 , 1.0 , 0.0 ] ,
[ 0.0 , 0.0 , 1.0 ] ]
return dcm
# Find the norm and renorm the Quaternion for each case
q_norm = np.sqrt( np.dot( qu , qu ) )
for i in range( 0 , 4 ):
qu[ i ] /= q_norm
# Compute squares of quaternion and then compute the dcm
q0s = qu[ 0 ]*qu[ 0 ]
q1s = qu[ 1 ]*qu[ 1 ]
q2s = qu[ 2 ]*qu[ 2 ]
q3s = qu[ 3 ]*qu[ 3 ]
dcm = [ [ q0s + q1s - q2s - q3s , 2.0*( qu[ 1 ]*qu[ 2 ] + qu[ 0 ]*qu[ 3 ] ) , 2.0*( qu[ 1 ]*qu[ 3 ] - qu[ 0 ]*qu[ 2 ] ) ] ,
[ 2.0*( qu[ 2 ]*qu[ 1 ] - qu[ 0 ]*qu[ 3 ] ) , q0s - q1s + q2s - q3s , 2.0*( qu[ 2 ]*qu[ 3 ] + qu[ 0 ]*qu[ 1 ] ) ] ,
[ 2.0*( qu[ 3 ]*qu[ 1 ] + qu[ 0 ]*qu[ 2 ] ) , 2.0*( qu[ 3 ]*qu[ 2 ] - qu[ 0 ]*qu[ 1 ] ) , q0s - q1s - q2s + q3s ] ]
return dcm
# Torque function
# Inputs:
# - time from some arbitrary epoch time [sec]
# Outputs:
# - torque vector as a 3D array [N*m]
# NOTE: CURRENTLY RETURNING 0 VECTOR -> NO EXTERNAL TORQUE CONSIDERED
def torque_func( time ):
torque = [ 0.0 , 0.0 , 0.0 ]
return torque
# Right-Hand-Side (RHS) of the body attitude state
# Inputs:
# - Attitude State ( state[ 2 ] = [ qu[ 4 ] , om[ 3 ] ] ) consisting of:
# -- quaternion attitude ( qu[ 4 ] ) [-]
# -- angular velocity ( om[ 3 ] ) [rad/s] in Body Frame
# - External Torques ( torque[ 3 ] ) [N*m] in Inertial Frame
# Outputs:
# - Right-Hand-Side of state's derivative ( rhs_state[ 2 ] = [ rhs_qu[ 4 ] , rhs_om[ 3 ] ] ) consisting of:
# -- quaternion RHS ( rhs_qu[ 4 ] ) [1/s]
# -- angular velocity RHS ( rhs_om[ 3 ] ) [rad/s^2]
def rhs_quaternion( state , torque ):
dual_om = dual_matrix( state[ 1 ] ) # get angular velocity dual matrix
dual_quat = q_mat( state[ 0 ] ) # get quaternion dual matrix
# Get RHS for the angular rate part from angular momentum conservation equation
rhs_om = - np.matmul( dual_om , np.matmul( moi , state[ 1 ] ) ) + np.array( torque )
rhs_om = np.matmul( np.linalg.inv( moi ) , rhs_om )
# Get RHS for the quaternion part from quaternion derivative equation
rhs_qu = 0.5*np.matmul( dual_quat , state[ 1 ] )
rhs_state = [ rhs_qu , rhs_om ]
return rhs_state
# Perform a Runge-Kutta Step of the attitude state
# Inputs:
# - Starting Attitude State ( state_in[ 2 ] = [ qu[ 4 ] , om[ 3 ] ] ) consisting of:
# -- quaternion attitude ( qu[ 4 ] ) [-]
# -- angular velocity ( om[ 3 ] ) [rad/s] in Body Frame
# - Initial Time [sec] (measured from some initial epoch)
# - Time Step (between input and output attitude states) [sec]
# Outputs:
# - Resulting Attitude State ( state_out[ 2 ] = [ qu[ 4 ] , om[ 3 ] ] ) consisting of:
# -- quaternion attitude ( qu[ 4 ] ) [-]
# -- angular velocity ( om[ 3 ] ) [rad/s] in Body Frame
def rk_step( state_in , t_i , dt ):
# Initialize Runge-Kutta coefficients arrays for the quaternion and angular velocity components
rk_qu = np.zeros( ( 4 , 4 ) ) # Quaternion RK coefficients 4 x <dimension>
rk_om = np.zeros( ( 4 , 3 ) ) # Angular rate RK coefficients 4 x <dimension>
# Initialize intermediate state to be populated for intermediate computations
inter_state = [ np.zeros( 4 ) , np.zeros( 3 ) ]
# Find external torque at initial step
torque = torque_func( t_i )
# Populate RK constants values at the first step ( k1 )
rhs_state = rhs_quaternion( state_in , torque )
rk_qu[ 0 ] = rhs_state[ 0 ]*dt
rk_om[ 0 ] = rhs_state[ 1 ]*dt
# Find intermediate state ( t + dt/2 , x + k1/2 ) and corresponding torque in preparation for next step
inter_state[ 0 ] = state_in[ 0 ] + rk_qu[ 0 ]/2.0
inter_state[ 1 ] = state_in[ 1 ] + rk_om[ 0 ]/2.0
torque = torque_func( t_i + dt/2.0 )
# Populate RK constants values at the second step ( k2 )
rhs_state = rhs_quaternion( inter_state , torque )
rk_qu[ 1 ] = rhs_state[ 0 ]*dt
rk_om[ 1 ] = rhs_state[ 1 ]*dt
# Find intermediate state ( t + dt/2 , x + k2/2 ), corresponding torque is the same (same time)
inter_state[ 0 ] = state_in[ 0 ] + rk_qu[ 1 ]/2.0
inter_state[ 1 ] = state_in[ 1 ] + rk_om[ 1 ]/2.0
# Populate RK constants values at the third step ( k3 )
rhs_state = rhs_quaternion( inter_state , torque )
rk_qu[ 2 ] = rhs_state[ 0 ]*dt
rk_om[ 2 ] = rhs_state[ 1 ]*dt
# Find intermediate state ( t + dt , x + k3 ) and corresponding torque in preparation for the last step
inter_state[ 0 ] = state_in[ 0 ] + rk_qu[ 2 ]
inter_state[ 1 ] = state_in[ 1 ] + rk_om[ 2 ]
torque = torque_func( t_i + dt )
# Populate RK constants values at the last (forth) step ( k4 )
rhs_state = rhs_quaternion( inter_state , torque )
rk_qu[ 3 ] = rhs_state[ 0 ]*dt
rk_om[ 3 ] = rhs_state[ 1 ]*dt
# Compute the state at t_i + dt based on the RK values computed - populate this in inter_state
inter_state[ 0 ] | |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
Date: Aug 2017
"""
import numpy as np
import pandas as pd
from ..simenv import SimEnv
from .. import precomp_funs as _pf
class MixingValve(SimEnv):
"""
type: MixingValve class.
The MixingValve **mixes or separates** a flow. The flow on the 2-end-side
is mixed/separated by the factors n1 and n2, with **n1 + n1 = 1** and
**n1 >= 0** and **n2 >= 0**.
When mixing the temperatures and mass flows of the respective streams are
mixed by the rule of *dm_out = dm_in1 + dm_in2*.
When separating one stream is separated into two streams with the
same temperature and the massflows *dm_in = n1*dm_out1 + n2*dm_out2*.
The resulting flow of mixing/separating is calculated after each timestep
and intermediate step depending on the given control algorithm and the
measured values in the specified measuring port.
The MixingValve class does not contain a differential method as it only
passes the values of the part connected to its 'in'-port(s) to its
'out'-port(s) and the values of the part connected to its 'out'-port(s) to
its 'in'-port(s) and only applying the mixing/separating. Thus it is
not involved in solving the equations using the specified solver algorithm.
Parameters:
-----------
name: string
Name of the part.
mix_or_sep: string, default: 'mix'
Specifies if the MixingValve is supposed to mix or separate strings.
It can be set to 'mix' for mixing or 'sep' for separating. When 'mix'
is set, there are two inlet ports 'in1' and 'in1' and one outlet port
'out' which have to be connected. When 'sep' is set there is one inlet
port 'in1' two outlet ports 'out1' and 'out2' which have to be
connected.
"""
def __init__(self, name, master_cls, mix_or_split='mix', **kwargs):
self._models = master_cls
self.constr_type = 'Valve_3w' # define construction type
base_err = ( # define leading base error message
'While adding {0} `{1}` to the simulation '
'environment, the following error occurred:\n'
).format(self.constr_type, str(name))
arg_err = ( # define leading error for missing/incorrect argument
'Missing argument or incorrect type/value: {0}\n\n'
)
self._base_err = base_err # save to self to access it in methods
self._arg_err = arg_err # save to self to access it in methods
self.name = name
self._unit = '[%]' # unit of the actuator
self.part_id = self._models.num_parts - 1
self.kind = mix_or_split
# save smallest possible float number for avoiding 0-division:
self._tiny = self._models._tiny
# even though this part is not using numeric solving, number of
# gridpoints are specified anyways:
self.num_gp = 3
# preallocate grids:
self.T = np.zeros(3, dtype=np.float64)
self._T_init = np.zeros_like(self.T) # init temp for resetting env.
# preallocate T ports array (here only used for dimension checking)
self._T_port = np.zeros_like(self.T)
self.dm = np.zeros(3)
# self.U = np.zeros(3)
# preallocate grids for port connection parameters
# cross section area of wall of connected pipe, fluid cross section
# area of, gridspacing and lambda of wall of connected pipe
self._A_wll_conn_p = np.zeros_like(self._T_port)
self._A_fld_conn_p = np.zeros_like(self._T_port)
self._port_gsp = np.full_like(self._T_port, self._tiny)
self._lam_wll_conn_p = np.full_like(self._T_port, self._tiny)
self._lam_port_fld = np.full_like(self._T_port, self._tiny)
# port_definition (first, second and last array element):
self.port_num = 3
# Index to own value array to get values of own ports, meaning if I
# index a FLATTENED self.T.flat with self._port_own_idx, I need to
# get values accoring to the order given in self.port_names.
# That is, this must yield the value of the cell in self.T, which is
# belonging to the port 'in':
# self.T.flat[self._port_own_idx[self.port_names.index('in')]]
self._port_own_idx = np.array(
(0, 1, self.T.shape[0] - 1), dtype=np.int32
)
self._port_own_idx_2D = self._port_own_idx # save for compatibility
"""port_array"""
self.port_ids = np.array((), dtype=np.int32)
# set to read-only to avoid manipulation, same for port_name by using
# tuple:
# self._port_own_idx.flags.writeable = False
# preallocate port values to avoid allocating in loop:
self._port_vals = np.zeros(self.port_num)
# preallocate list to mark ports which have already been solved in
# topology (to enable creating subnets)
self._solved_ports = list()
# port setup depending on mixer or separator valve
# mixing or separating factors for each port are saved in the dict
# port_factors, with the factor 1 being a tuple (can't be changed!):
if mix_or_split == 'mix':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('in', 'in', 'out'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port in1 # port in2
) # port out
elif mix_or_split == 'split':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('out', 'out', 'in'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port out1 # port out2
) # port in
else:
err_str = 'mix_or_split has to be set to \'mix\' or\'split\'!'
raise ValueError(err_str)
# make dict for easy lookup of portfactors with memory views:
self.port_factors = dict(
{
'A': self.pf_arr[0:1],
'B': self.pf_arr[1:2],
'AB': self.pf_arr[2:3],
}
)
# construct partname+portname to get fast access to own ports:
dummy_var = list(self.port_names)
for i in range(self.port_num):
dummy_var[i] = self.name + ';' + dummy_var[i]
self._own_ports = tuple(dummy_var)
# preallocate result grids with one row. An estimate of total rows will
# be preallocated before simulation start in initialize_sim. massflow
# grid is preallocated in set_initial_cond:
self.res = np.zeros((1, self.port_num))
self.res_dm = np.zeros((2, self.port_num))
# set if type has to be solved numeric:
self.solve_numeric = False
# if port arrays shall be collapsed to amount of ports to improve speed
self.collapse_arrays = False
self._collapsed = False # bool checker if already collapsed
# determine if part is treated as hydraulic compensator
self.hydr_comp = False
# if part can be a parent part of a primary flow net:
self._flow_net_parent = False
# add each flow channel of part to hydr_comps (will be removed once its
# massflow solving method is completely integrated in flow_net.
# remaining parts except real hydr comps will be used to generate an
# error):
self._models._hydr_comps.add(self.name)
# if the topology construction method has to stop when it reaches the
# part to solve more ports from other sides before completely solving
# the massflow of it. This will be set to false as soon as only one
# port to solve is remaining:
self.break_topology = False
# count how many ports are still open to be solved by topology. If
# break topology is True, this is used to set it to False if 1 is
# reached.
self._cnt_open_prts = self.port_num # not required here
self._port_heatcond = True # if heatcond. over ports is enabled
# determine if part has the capability to affect massflow (dm) by
# diverting flow through ports or adding flow through ports:
self.affect_dm = True
# if the massflow (dm) has the same value in all cells of the part
# (respectively in each flow channel for parts with multiple flows):
self.dm_invariant = False
# if the part has multiple separated flow channels which do NOT mix
# (like a heat exchanger for exampe):
self.multiple_flows = False
# bool checker if flows were updated in update_flownet to avoid
# processing flows in get_diff each time (array for referencing):
self._process_flows = np.array([True])
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
self._actuator_CV = self.pf_arr[:] # set array to be controlled
self._actuator_CV_name = 'port_opening'
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = True
# initialize bool if control specified:
self.ctrl_defined = False
# if the parts get_diff method is solved with memory views entirely and
# thus has arrays which are extended by +2 (+1 at each end):
self.enlarged_memview = False
| |
A boolean flag denoting whether the symbol is a
next proc function. The argument is mutually exclusive with arguments:
'parametric_values' and 'is_implicit_token'.
Returns:
The "mangled" symbol string.
"""
# Type validation for optional inputs.
if parametric_values and type(parametric_values) != "tuple":
fail("Argument 'parametric_values' must be of tuple type.")
if is_implicit_token and type(is_implicit_token) != type(True):
fail("Argument 'is_implicit_token' must be of boolean type.")
if is_proc_next and type(is_proc_next) != type(True):
fail("Argument 'is_proc_next' must be of boolean type.")
# Presence validation for optional inputs.
if is_proc_next and (parametric_values or is_implicit_token):
fail("Argument 'is_proc_next' is mutually exclusive with arguments: " +
"'parametric_values' and 'is_implicit_token'.")
prefix_str = ""
if is_implicit_token:
prefix_str = "itok__"
suffix = ""
if parametric_values:
suffix = "__" + "_".join(
[
str(v)
for v in parametric_values
],
)
mangled_name = "__{}{}__{}{}".format(
prefix_str,
module_name,
function_name,
suffix,
)
if is_proc_next:
mangled_name = mangled_name.replace(":", "_")
mangled_name = mangled_name.replace("->", "__")
mangled_name = mangled_name + "next"
return mangled_name
xls_ir_top_attrs = {
"top": attr.string(
doc = "The (*mangled*) name of the entry point. See " +
"get_mangled_ir_symbol. Defines the 'top' argument of the " +
"IR tool/application.",
),
}
xls_ir_common_attrs = {
"src": attr.label(
doc = "The IR source file for the rule. A single source file must be " +
"provided. The file must have a '.ir' extension.",
mandatory = True,
allow_single_file = [_IR_FILE_EXTENSION],
),
}
xls_dslx_ir_attrs = dicts.add(
xls_dslx_library_as_input_attrs,
{
"dslx_top": attr.string(
doc = "Defines the 'entry' argument of the" +
"//xls/dslx/ir_converter_main.cc application.",
mandatory = True,
),
# TODO(b/220380384) 2022-02-19 When bypass_dslx_top is no longer needed,
# remove attribute below.
"bypass_dslx_top": attr.bool(
doc = "DO NOT USE. Bypasses the dsl_top requirement.",
default = False,
),
"ir_conv_args": attr.string_dict(
doc = "Arguments of the IR conversion tool. For details on the " +
"arguments, refer to the ir_converter_main application at " +
"//xls/dslx/ir_converter_main.cc. Note the " +
"'entry' argument is not assigned using this attribute.",
),
"ir_file": attr.output(
doc = "Filename of the generated IR. If not specified, the " +
"target name of the bazel rule followed by an " +
_IR_FILE_EXTENSION + " extension is used.",
),
},
)
def xls_dslx_ir_impl(ctx):
"""The implementation of the 'xls_dslx_ir' rule.
Converts a DSLX source file to an IR file.
Args:
ctx: The current rule's context object.
Returns:
DslxModuleInfo provider
ConvIRInfo provider
DefaultInfo provider
"""
src = None
dep_src_list = []
srcs = ctx.files.srcs
deps = ctx.attr.deps
srcs, dep_src_list = get_files_from_dslx_library_as_input(ctx)
if srcs and len(srcs) != 1:
fail("A single source file must be specified.")
src = srcs[0]
ir_file = _convert_to_ir(ctx, src, dep_src_list)
dslx_module_info = DslxModuleInfo(
dslx_source_files = dep_src_list,
dslx_source_module_file = src,
)
return [
dslx_module_info,
ConvIRInfo(
conv_ir_file = ir_file,
),
DefaultInfo(files = depset([ir_file])),
]
xls_dslx_ir = rule(
doc = """
A build rule that converts a DSLX source file to an IR file.
Examples:
1. A simple IR conversion.
```
# Assume a xls_dslx_library target bc_dslx is present.
xls_dslx_ir(
name = "d_ir",
srcs = ["d.x"],
deps = [":bc_dslx"],
)
```
1. An IR conversion with an entry defined.
```
# Assume a xls_dslx_library target bc_dslx is present.
xls_dslx_ir(
name = "d_ir",
srcs = ["d.x"],
deps = [":bc_dslx"],
dslx_top = "d",
)
```
""",
implementation = xls_dslx_ir_impl,
attrs = dicts.add(
xls_dslx_ir_attrs,
CONFIG["xls_outs_attrs"],
xls_toolchain_attr,
),
)
def xls_ir_opt_ir_impl(ctx, src):
"""The implementation of the 'xls_ir_opt_ir' rule.
Optimizes an IR file.
Args:
ctx: The current rule's context object.
src: The source file.
Returns:
OptIRInfo provider
DefaultInfo provider
"""
opt_ir_file = _optimize_ir(ctx, src)
return [
OptIRInfo(
input_ir_file = src,
opt_ir_file = opt_ir_file,
opt_ir_args = ctx.attr.opt_ir_args,
),
DefaultInfo(files = depset([opt_ir_file])),
]
xls_ir_opt_ir_attrs = dicts.add(
xls_ir_top_attrs,
{
"opt_ir_args": attr.string_dict(
doc = "Arguments of the IR optimizer tool. For details on the" +
"arguments, refer to the opt_main application at" +
"//xls/tools/opt_main.cc. The 'entry' " +
"argument is not assigned using this attribute.",
),
"opt_ir_file": attr.output(
doc = "Filename of the generated optimized IR. If not specified, " +
"the target name of the bazel rule followed by an " +
_OPT_IR_FILE_EXTENSION + " extension is used.",
),
},
)
def _xls_ir_opt_ir_impl_wrapper(ctx):
"""The implementation of the 'xls_ir_opt_ir' rule.
Wrapper for xls_ir_opt_ir_impl. See: xls_ir_opt_ir_impl.
Args:
ctx: The current rule's context object.
Returns:
See: xls_ir_opt_ir_impl.
"""
return xls_ir_opt_ir_impl(ctx, ctx.file.src)
xls_ir_opt_ir = rule(
doc = """A build rule that optimizes an IR file.
Examples:
1. Optimizing an IR file with an entry defined.
```
xls_ir_opt_ir(
name = "a_opt_ir",
src = "a.ir",
opt_ir_args = {
"entry" : "a",
},
)
```
1. A target as the source.
```
xls_dslx_ir(
name = "a_ir",
srcs = ["a.x"],
)
xls_ir_opt_ir(
name = "a_opt_ir",
src = ":a_ir",
)
```
""",
implementation = _xls_ir_opt_ir_impl_wrapper,
attrs = dicts.add(
xls_ir_common_attrs,
xls_ir_opt_ir_attrs,
CONFIG["xls_outs_attrs"],
xls_toolchain_attr,
),
)
def _xls_ir_equivalence_test_impl(ctx):
"""The implementation of the 'xls_ir_equivalence_test' rule.
Executes the equivalence tool on two IR files.
Args:
ctx: The current rule's context object.
Returns:
DefaultInfo provider
"""
ir_file_a = ctx.file.src_0
ir_file_b = ctx.file.src_1
runfiles, cmd = get_ir_equivalence_test_cmd(ctx, ir_file_a, ir_file_b)
executable_file = ctx.actions.declare_file(ctx.label.name + ".sh")
ctx.actions.write(
output = executable_file,
content = "\n".join([
"#!/bin/bash",
"set -e",
cmd,
"exit 0",
]),
is_executable = True,
)
return [
DefaultInfo(
runfiles = ctx.runfiles(files = runfiles),
files = depset([executable_file]),
executable = executable_file,
),
]
_two_ir_files_attrs = {
"src_0": attr.label(
doc = "An IR source file for the rule. A single source file must be " +
"provided. The file must have a '.ir' extension.",
mandatory = True,
allow_single_file = [_IR_FILE_EXTENSION],
),
"src_1": attr.label(
doc = "An IR source file for the rule. A single source file must be " +
"provided. The file must have a '.ir' extension.",
mandatory = True,
allow_single_file = [_IR_FILE_EXTENSION],
),
}
xls_ir_equivalence_test_attrs = {
"ir_equivalence_args": attr.string_dict(
doc = "Arguments of the IR equivalence tool. For details on the " +
"arguments, refer to the check_ir_equivalence_main application " +
"at //xls/tools/check_ir_equivalence_main.cc. " +
"The 'function' argument is not assigned using this attribute.",
),
}
xls_ir_equivalence_test = rule(
doc = """Executes the equivalence tool on two IR files.
Examples:
1. A file as the source.
```
xls_ir_equivalence_test(
name = "ab_ir_equivalence_test",
src_0 = "a.ir",
src_1 = "b.ir",
)
```
1. A target as the source.
```
xls_dslx_ir(
name = "b_ir",
srcs = ["b.x"],
)
xls_ir_equivalence_test(
name = "ab_ir_equivalence_test",
src_0 = "a.ir",
src_1 = ":b_ir",
)
```
""",
implementation = _xls_ir_equivalence_test_impl,
attrs = dicts.add(
_two_ir_files_attrs,
xls_ir_equivalence_test_attrs,
xls_ir_top_attrs,
xls_toolchain_attr,
),
test = True,
)
def _xls_eval_ir_test_impl(ctx):
"""The implementation of the 'xls_eval_ir_test' rule.
Executes the IR Interpreter on an IR file.
Args:
ctx: The current rule's context object.
Returns:
DefaultInfo provider
"""
if ctx.attr.input_validator and ctx.attr.input_validator_expr:
fail(msg = "Only one of \"input_validator\" or \"input_validator_expr\" " +
"may be specified for a single \"xls_eval_ir_test\" rule.")
src = ctx.file.src
runfiles, cmd = get_eval_ir_test_cmd(ctx, src)
executable_file = ctx.actions.declare_file(ctx.label.name + ".sh")
ctx.actions.write(
output = executable_file,
content = "\n".join([
"#!/bin/bash",
"set -e",
cmd,
"exit 0",
]),
is_executable = True,
)
return [
DefaultInfo(
runfiles = ctx.runfiles(files = runfiles),
files = depset([executable_file]),
executable = executable_file,
),
]
xls_eval_ir_test_attrs = {
"input_validator": attr.label(
doc = "The DSLX library defining the input validator for this test. " +
"Mutually exclusive with \"input_validator_expr\".",
providers = [DslxInfo],
allow_files = True,
),
"input_validator_expr": attr.string(
doc = "The expression to validate an input for the test function. " +
"Mutually exclusive with \"input_validator\".",
),
"ir_eval_args": attr.string_dict(
doc = "Arguments of the IR interpreter. For details on the " +
"arguments, refer to the eval_ir_main application at " +
"//xls/tools/eval_ir_main.cc." +
"The 'entry' argument is not assigned using this attribute.",
default = _DEFAULT_IR_EVAL_TEST_ARGS,
),
}
xls_eval_ir_test = rule(
doc = """Executes the IR interpreter on an IR file.
Examples:
1. A file as the source.
```
xls_eval_ir_test(
name = "a_eval_ir_test",
src = "a.ir",
)
```
1. An xls_ir_opt_ir target as the source.
```
xls_ir_opt_ir(
name = "a_opt_ir",
src = "a.ir",
)
xls_eval_ir_test(
name = "a_eval_ir_test",
src = ":a_opt_ir",
)
```
""",
implementation = _xls_eval_ir_test_impl,
attrs = dicts.add(
xls_ir_common_attrs,
xls_eval_ir_test_attrs,
xls_ir_top_attrs,
xls_toolchain_attr,
),
test = True,
)
def _xls_benchmark_ir_impl(ctx):
"""The implementation of the 'xls_benchmark_ir' rule.
Executes the benchmark tool on an IR file.
Args:
ctx: The current rule's context object.
Returns:
DefaultInfo provider
"""
src = ctx.file.src
runfiles, cmd = get_benchmark_ir_cmd(ctx, src)
executable_file = ctx.actions.declare_file(ctx.label.name + ".sh")
ctx.actions.write(
| |
"clusters": [
{
"id": "clqr4b0ox",
"name": "KPS Jobs",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-07-22T14:50:10.170Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 92,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 38,
"capacityId": 23,
"vmType": {
"label": "p2.xlarge",
"kind": "aws-gpu",
"cpus": 4,
"ram": "65498251264",
"gpuModelId": 11,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:20:22.108Z",
"dtModified": "2019-08-28T17:40:50.779Z",
"isPreemptible": False,
"deploymentType": "gpu",
"deploymentSize": "small",
"id": 38,
"gpuModel": {
"model": "passthrough",
"label": "Tesla K80",
"gpuGroupId": 8,
"memInBytes": "12884901888",
"memInMb": 12288,
"memInGb": "12",
"id": 11
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 38,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:33:05.626Z",
"isPreemptible": False,
"id": 134,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "clqr4b0ox",
"name": "KPS Jobs",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-07-22T14:50:10.170Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 92,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 39,
"capacityId": 24,
"vmType": {
"label": "p3.2xlarge",
"kind": "aws-gpu",
"cpus": 8,
"ram": "65498251264",
"gpuModelId": 10,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:20:22.108Z",
"dtModified": "2019-08-28T17:40:50.779Z",
"isPreemptible": False,
"deploymentType": "gpu",
"deploymentSize": "medium",
"id": 39,
"gpuModel": {
"model": "passthrough",
"label": "Tesla V100",
"gpuGroupId": 7,
"memInBytes": "17179869184",
"memInMb": 16384,
"memInGb": "16",
"id": 10
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 39,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:33:05.626Z",
"isPreemptible": False,
"id": 135,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "clqr4b0ox",
"name": "<NAME>",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-07-22T14:50:10.170Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 92,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 40,
"capacityId": 25,
"vmType": {
"label": "p3.16xlarge",
"kind": "aws-gpu",
"cpus": 64,
"ram": "523986010112",
"gpuModelId": 10,
"gpuCount": 8,
"internalDescription": None,
"dtCreated": "2019-07-22T15:20:22.108Z",
"dtModified": "2019-08-28T17:40:50.779Z",
"isPreemptible": False,
"deploymentType": "gpu",
"deploymentSize": "large",
"id": 40,
"gpuModel": {
"model": "passthrough",
"label": "Tesla V100",
"gpuGroupId": 7,
"memInBytes": "17179869184",
"memInMb": 16384,
"memInGb": "16",
"id": 10
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 40,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:33:14.282Z",
"isPreemptible": False,
"id": 136,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "clqr4b0ox",
"name": "KPS Jobs",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-07-22T14:50:10.170Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 92,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
}
],
"102": [
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 65,
"capacityId": 32,
"vmType": {
"label": "Wolfpass-CPU",
"kind": "cpu",
"cpus": 24,
"ram": "34359738368",
"gpuModelId": 6,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-11-11T19:53:21.298Z",
"dtModified": "2019-11-19T19:09:03.288Z",
"isPreemptible": False,
"deploymentType": "cpu",
"deploymentSize": "small",
"id": 65,
"gpuModel": {
"model": "None",
"label": "None",
"gpuGroupId": 3,
"memInBytes": "0",
"memInMb": 0,
"memInGb": "0",
"id": 6
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 65,
"type": "hourly",
"usageRateId": 18,
"dtCreated": "2019-11-11T19:57:04.870Z",
"isPreemptible": False,
"id": 150,
"usageRate": {
"description": "VIP",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "VIP",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": False,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 18
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "clfe0kr2p",
"name": "<NAME>",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "intelwolfpass",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-11-11T16:34:29.495Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 102,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
}
],
"103": [
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 35,
"capacityId": 33,
"vmType": {
"label": "c5.xlarge",
"kind": "aws-cpu",
"cpus": 4,
"ram": "8589934592",
"gpuModelId": 6,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:12:41.506Z",
"dtModified": "2019-08-28T17:40:50.773Z",
"isPreemptible": False,
"deploymentType": "cpu",
"deploymentSize": "small",
"id": 35,
"gpuModel": {
"model": "None",
"label": "None",
"gpuGroupId": 3,
"memInBytes": "0",
"memInMb": 0,
"memInGb": "0",
"id": 6
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 35,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:32:41.602Z",
"isPreemptible": False,
"id": 131,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "cluwffvkb",
"name": "EKS testing",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-11-21T07:27:37.010Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 103,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 36,
"capacityId": 34,
"vmType": {
"label": "c5.4xlarge",
"kind": "aws-cpu",
"cpus": 16,
"ram": "34359738368",
"gpuModelId": 6,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:14:06.425Z",
"dtModified": "2019-08-28T17:40:50.773Z",
"isPreemptible": False,
"deploymentType": "cpu",
"deploymentSize": "medium",
"id": 36,
"gpuModel": {
"model": "None",
"label": "None",
"gpuGroupId": 3,
"memInBytes": "0",
"memInMb": 0,
"memInGb": "0",
"id": 6
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 36,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:32:41.602Z",
"isPreemptible": False,
"id": 132,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "cluwffvkb",
"name": "EKS testing",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-11-21T07:27:37.010Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 103,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 37,
"capacityId": 35,
"vmType": {
"label": "c5.24xlarge",
"kind": "aws-cpu",
"cpus": 94,
"ram": "206158430208",
"gpuModelId": 6,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:14:06.425Z",
"dtModified": "2019-08-28T17:40:50.773Z",
"isPreemptible": False,
"deploymentType": "cpu",
"deploymentSize": "large",
"id": 37,
"gpuModel": {
"model": "None",
"label": "None",
"gpuGroupId": 3,
"memInBytes": "0",
"memInMb": 0,
"memInGb": "0",
"id": 6
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 37,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:33:05.626Z",
"isPreemptible": False,
"id": 133,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "cluwffvkb",
"name": "EKS testing",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-11-21T07:27:37.010Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 103,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 39,
"capacityId": 36,
"vmType": {
"label": "p3.2xlarge",
"kind": "aws-gpu",
"cpus": 8,
"ram": "65498251264",
"gpuModelId": 10,
"gpuCount": 1,
"internalDescription": None,
"dtCreated": "2019-07-22T15:20:22.108Z",
"dtModified": "2019-08-28T17:40:50.779Z",
"isPreemptible": False,
"deploymentType": "gpu",
"deploymentSize": "medium",
"id": 39,
"gpuModel": {
"model": "passthrough",
"label": "Tesla V100",
"gpuGroupId": 7,
"memInBytes": "17179869184",
"memInMb": 16384,
"memInGb": "16",
"id": 10
},
"availableTemplatesWithOperatingSystems": [],
"availableRegions": [],
"permissions": [],
"defaultUsageRates": [
{
"vmTypeId": 39,
"type": "hourly",
"usageRateId": 10,
"dtCreated": "2019-07-22T15:33:05.626Z",
"isPreemptible": False,
"id": 135,
"usageRate": {
"description": "Employee",
"rate": "0.00",
"type": "monthly",
"gpuModelId": 1,
"rateHourly": "0.00",
"rateMonthly": "0.00",
"label": "Employee",
"period": "monthly",
"kind": "air",
"isEarlyAccess": False,
"isEmployeeOnly": True,
"numCpus": 2,
"ramInBytes": "4294967296",
"id": 10
}
}
],
"defaultUsageRateOverrides": []
},
"clusters": [
{
"id": "cluwffvkb",
"name": "EKS testing",
"type": "Kubernetes Processing Site",
"region": "Private",
"cloud": "aws",
"teamId": "teo6raui0",
"isDefault": False,
"dtCreated": "2019-11-21T07:27:37.010Z",
"dtModified": "2020-02-28T20:58:26.134Z",
"clusterId": 103,
"isPrivate": True,
"modelName": "team",
"modelId": 1170
}
]
},
{
"showDisabled": False,
"isAvailable": True,
"isPreemptible": False,
"vmTypeId": 40,
"capacityId": 37,
"vmType": {
"label": "p3.16xlarge",
"kind": "aws-gpu",
"cpus": 64,
"ram": "523986010112",
"gpuModelId": 10,
"gpuCount": 8,
"internalDescription": None,
"dtCreated": "2019-07-22T15:20:22.108Z",
"dtModified": "2019-08-28T17:40:50.779Z",
"isPreemptible": False,
"deploymentType": "gpu",
"deploymentSize": "large",
"id": 40,
"gpuModel": {
"model": "passthrough",
"label": "Tesla V100",
"gpuGroupId": 7,
"memInBytes": "17179869184",
"memInMb": 16384,
"memInGb": |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.