input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.mail import send_mail
import json
from .models import *
from .forms import *
from django.http import HttpResponseRedirect,HttpResponse
from django.template import loader
import os
import random, string
from .config import *
import csv
import pyrebase
import email
import imaplib
def home(request):
return render(request, 'index.html')
def nineteen(request):
return redirect('/')
def get_events(request):
return render(request, 'events.html')
def technical(request):
return render(request, 'eventstechcat.html')
def cultural(request):
return render(request, 'eventsculcat.html')
def id(request):
return render(request,'gen_id.html')
def gallery(request):
return render(request,'gallery.html')
def sponsors(request):
return render(request,'sponsors.html')
def forgotpassmail(request):
return render(request,'Resetpassemail.html')
def team(request):
return render(request,'team.html')
def accomodation_brochure(request):
return render(request,'acc_brochure.html')
def transport(request):
return render(request,'transport.html')
def schedule(request):
return render(request,'schedule.html')
def sports_handbook(request):
return render(request,'sportshandbook.html')
def accomodation(request):
context = {"name": "","email": "","phno": "","college": ""}
if(request.user.id is not None):
context = {"name":request.user.profile.name,"email": request.user.email,"phno": request.user.profile.contact,"college": request.user.profile.college}
return render(request,'accomodation.html',context=context)
def dashboard(request):
if request.user.id is not None:
profile = Registration.objects.filter(userId=request.user)
accreg = AccomRegistration.objects.filter(userId=request.user)
context = {'profiles':profile,"accomodations": accreg}
return render(request,'dashboard.html',context=context)
def signin(request):
try:
val = request.GET['prev']
except Exception as exception:
val = ""
context = {'prev': val}
return render(request,'Signin.html',context=context)
def register(request):
try:
val = requet.GET['prev']
except Exception as exception:
val = ""
context = {'prev': val}
return render(request,'Signup.html',context=context)
def login1(request):
if request.method == 'POST':
user = authenticate(username=request.POST['username'], password = request.POST['password'])
if not user or not user.is_active:
return JsonResponse({
"message": '#invalidUser'
})
try:
login(request,user)
except Exception as exception:
return JsonResponse({
"message": '#couldNotLogin'
})
return JsonResponse({
"message": "success"
})
def specificEventView(request,category,subcategory):
color = "#e25c7f"
if category == "technical":
color = "#fafafa"
events = Events.objects.filter(category=category[0]).filter(subCategory=subcategory)
data_dict = {}
for i in range(0,len(events)):
fee = transform(events[i].fee)
if len(str(events[i].prizes).strip()) > 1 or str(events[i].prize) == 'null':
prize = events[i].prizes
else:
prize = transform(events[i].prize)
if(events[i].fee_snu != -1):
fee = "Outside Participants: " + fee + " | SNU Participants: " + transform(events[i].fee_snu)
data_dict[events[i].id] = {
"name": events[i].name,
"description": events[i].description,
"rules": events[i].rules,
"date": str(events[i].date),
"prize": prize,
"fee": fee,
"contact_name": events[i].contact_market,
"include": int(events[i].include)
}
js_data = json.dumps(data_dict)
context = {'events': events, 'subcategory': subcategory,"color": color,'category': category,"js_data": js_data}
return render(request, 'eventssubcat.html',context=context)
def sports(request):
events = Events.objects.filter(category='s')
data_dict = {}
for i in range(0,len(events)):
fee = transform(events[i].fee)
if(events[i].fee_snu != -1):
fee = "Outside Participants: " + str(fee) + " | SNU Participants: " + transform(events[i].fee_snu)
data_dict[events[i].id] = {
"name": events[i].name,
"rules": events[i].rules,
"date": str(events[i].date),
"prize": events[i].prizes,
"fee": fee + " Per head",
"contact_name": events[i].contact_market,
"include": int(events[i].include)
}
js_data = json.dumps(data_dict)
context = {"js_data": js_data,"events": events}
return render(request, 'eventssportscat.html',context=context)
def get_id(request):
ids = Id.objects.all()
print(id)
return HttpResponse("/");
def gen_id(request):
try:
if request.method == 'POST':
name = request.POST['name']
college = request.POST['college']
rollno = request.POST['rollno']
email = request.POST['email']
yos = request.POST['yos']
number = request.POST['phno'],
participant = request.POST["participant"]
subject = "Breeze19 ID"
from_email = settings.DEFAULT_FROM_EMAIL
to_list = [email]
id_obj = Id(name=name, rollno=rollno, email=email,phno=number,college=college,yearofstudy=yos,parti=participant)
try:
id_obj.save();
except Exception as exception:
print(exception)
return JsonResponse({
"message": "ID creation failed. Try again."
})
html_message = loader.render_to_string(
os.getcwd() + '/Breeze/templates/id_mail.html',
{
"name": name,
"college": college,
"rollno": rollno,
"yos": yos,
"email": email,
'participant': participant,
'phno': number
})
try:
send_mail(subject, subject, "Breeze'19 "+from_email, to_list, fail_silently=False, html_message=html_message)
except Exception as exception:
print(exception)
return JsonResponse({
"message": "ID creation failed. Try again."
})
return JsonResponse({
"message": "Breeze ID has been emailed to you.\nPlease show the same along with your College ID to the Security Team to gain entry"
})
except Exception as exception:
print(exception)
def createaccount(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
username = email
password =request.POST['password']
confirm = request.POST['confirmpass']
contact = request.POST['contact']
college = request.POST['college']
subject = "Welcome to Breeze'19"
message = "Welcome to Breeze 19 by SNU. "
from_email = settings.DEFAULT_FROM_EMAIL
to_list = [email]
html_message = loader.render_to_string(
os.getcwd()+'/Breeze/templates/signup_mail.html',
{
'name' : name,
'username': email,
'password': password
}
)
try:
if not (User.objects.filter(username=username).exists() or User.objects.filter(email=email).exists()):
User.objects.create_user(username, email, password)
x = User.objects.last()
Profile_obj = Profile.objects.create(user=x, name=name, contact=contact,college=college)
try:
Profile_obj.save()
except Exception as exp:
print(exp)
user = authenticate(username=username, password=password)
login(request, user)
try:
send_mail(subject, message, "Breeze'19 "+from_email, to_list, fail_silently=False, html_message=html_message)
except Exception as exception:
print(exception)
return JsonResponse({
"message": "success"
})
else:
return JsonResponse({
"message": "#userExists"
})
except Exception as exception:
print(exception)
return JsonResponse({
"message": "Try Again"
})
else:
return JsonResponse({
"message": "#invalidSignup"
})
def event_register2(request):
if request.method == 'POST' and request.user.id is not None:
e = int(request.POST['event'])
event = Events.objects.get(id=e)
uid = 'EV19{:02}{:04}'.format(event.id, request.user.id)
payable = 0
try:
if event.fee_type == 'team':
payable = event.fee
if event.fee_snu != -1:
if(str(request.user.email).endswith('snu.edu.in')):
payable = event.fee_snu
if(event.name == 'Aagaaz'):
if int(request.POST['nop']) > 20:
payable = event.fee + (100 * (int(request.POST['nop']) - 20))
elif event.fee_type == 'head':
payable = event.fee * int(request.POST['nop'])
if event.fee_snu != -1:
if(str(request.user.email).endswith('snu.edu.in')):
payable = event.fee_snu * int(request.POST['nop'])
except Exception as exception:
print(exception)
if event.fee == 0:
transaction_status = 'p'
else:
transaction_status = 'u'
try:
usr_profile = Profile.objects.filter(user=request.user)
if len(usr_profile) > 0:
register = Registration(eventId=event, userId=request.user,
college=usr_profile.last().college, registration_id=uid,transaction_status=transaction_status,
payable=payable,nop=int(request.POST['nop']))
else:
return JsonResponse({
"message": "Error"
})
except Exception as exception:
return JsonResponse({
"message": "Try again"
})
print(exception)
try:
register.save()
except Exception as exception:
print(exception)
return JsonResponse({
"message": "Error while recording registration. Please try again."
})
form_url = ""
if(event.form_url != "null") :
form_url = event.form_url
subject = "Event Registration Successful | Breeze'19"
message = "Event Registration Successful."
from_email = settings.DEFAULT_FROM_EMAIL
to_list = [request.user.email]
if(event.form_url == "null"):
html_message = loader.render_to_string(
os.getcwd()+'/Breeze/templates/reg_mail.html',
{
'name' : request.user.profile.name,
'email' : request.user.email,
'reg_id' : uid,
'event_name' : event.name,
'status': transaction_status,
'amount': transform(payable)
}
)
else:
html_message = loader.render_to_string(
os.getcwd()+'/Breeze/templates/reg_mail1.html',
{
'name' : request.user.profile.name,
'email' : request.user.email,
'reg_id' : uid,
'event_name' : event.name,
'status': transaction_status,
'form_url': form_url,
'amount': transform(payable)
}
)
try:
send_mail(subject, message, from_email, to_list, fail_silently=False, html_message=html_message)
except Exception as e:
print("Mail not sent")
print (e.message, e.args)
return JsonResponse({
"message": "success"
})
else:
return JsonResponse({
"message": "Please signin first."
})
def accom_register(request):
if request.method == 'POST':
if request.user.id is not None:
package = request.POST['package']
days = int(request.POST['days'])
food = request.POST['meal']
if food == 'Without Meals':
packageid = AccPackage.objects.get(name=package)
else:
packageid = AccPackage.objects.get(name=package + " (" + food + ")")
fee = 0
if(package == 'Per Day Package'):
fee = days * 300
if(food == 'With Meals'):
fee += days * 150
else:
if(food == 'With Meals'):
fee = 1250
else:
fee = 800
uid = 'AC{:04}{:04}'.format(request.user.id, random.randint(1,9999))
register = AccomRegistration(packageId=packageid, userId=request.user,
college=request.user.profile.college, registration_id=uid, days=days, payable=fee)
try:
register.save()
except:
return JsonResponse({
"message": "Already registered for accomodation"
})
subject = "Accomodation Registration Successful | Breeze'19"
message = "Accomodation Registration Successful."
from_email = settings.DEFAULT_FROM_EMAIL
to_list = [request.user.email]
try:
html_message = loader.render_to_string(
os.getcwd()+'/Breeze/templates/accomodation_mail.html',
{
'name' : request.user.profile.name,
'reg_id' : uid,
'package' : packageid.name,
'amount': transform1(fee)
}
)
send_mail(subject, message, from_email, to_list, fail_silently=False, html_message=html_message)
except Exception as exception:
print(exception)
return JsonResponse({
"message": 'success'
})
else:
return JsonResponse({
"message": "Please signin first"
})
def forgotmail(request):
if request.method == "POST" :
form=ForgotPassMailForm(request.POST)
if form.is_valid():
subject = "Reset Password | Breeze'19"
message = "You can change your password here:- "
from_email = settings.DEFAULT_FROM_EMAIL
to_list = [request.POST['email']]
url_hash= "".join(random.choice(string.ascii_letters + string.digits) for _ in range(64))
try:
user=User.objects.filter(username=request.POST['email'].strip())
if(user.exists()):
ForgetPass.objects.create(token=url_hash,user=user[0])
except Exception as exception:
print(exception)
return JsonResponse({
"message": "Password reset error"
})
html_message = loader.render_to_string(
os.getcwd()+'/Breeze/templates/forgot_pass.html',
{
'link' : 'https://breeze19.appspot.com/forgotPassword/' + url_hash,
'subject': 'Password reset email'
}
)
try:
send_mail(subject, message, from_email, to_list, fail_silently=False, html_message=html_message)
except Exception as e:
print("Mail not sent")
print (e.message, e.args)
return JsonResponse({
"message": "success"
})
else:
raise forms.ValidationError("Form can not be Validated.")
def forgot(request,hashkey):
if request.method == "POST":
password = request.POST['password']
confirm = request.POST['repassword']
if(password==confirm):
try:
user = ForgetPass.objects.filter(token=hashkey)[0]
user = user.user
user.set_password(password)
user.save()
ForgetPass.objects.filter(token=hashkey).delete()
return JsonResponse({
"message": "success"
})
except:
raise forms.ValidationError("Unable to Change Password")
else:
return JsonResponse({
"message": "You had one job; Type the same password"
})
else:
if(len(hashkey)!=64):
return HttpResponseRedirect('/')
forget_pass_object = ForgetPass.objects.filter(token=hashkey)
if not forget_pass_object:
return HttpResponseRedirect('/')
return render(request, "Resetpass.html", {"hashkey" : hashkey})
# Util functions
def ga_tracking_id(request):
return {'ga_tracking_id': GA_TRACKING_ID}
def transform(amount):
t_amt = "Rs "
try:
amt = str(amount)
except Exception as exception:
print(exception)
try:
if amount == 0:
return "No Registration Fee"
if amount >= 100000:
t_amt += amt[0:1] + "," + amt[1:3] + "," + amt[3:]
elif amount >= 10000 and | |
data
def get_land_sea_mask(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('LandSeaMask')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Geolocation/LandSeaMask')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 7)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_land_cover(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('LandCover')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Geolocation/LandCover')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 17)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_sensor_azimuth(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('SensorAzimuth')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get(
'/Geolocation/SensorAzimuth')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < -18000, data_pre > 18000)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre / 100.
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_sensor_zenith(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('SensorZenith')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Geolocation/SensorZenith')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 18000)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre / 100.
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_solar_azimuth(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('SolarAzimuth')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Geolocation/SolarAzimuth')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < -18000, data_pre > 18000)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre / 100.
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_solar_zenith(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.in_file
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('SolarZenith')[:s[0], :s[1]]
elif self.satellite in satellite_type2:
data_file = self.__get_geo_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Geolocation/SolarZenith')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 18000)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data = data_pre / 100.
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_timestamp(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
if self.satellite in satellite_type1:
seconds_of_file = 300 # 一个时次持续 300 秒
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
file_date = datetime.strptime(self.ymd + self.hms, '%Y%m%d%H%M%S')
timestamp = (
file_date - datetime(1970, 1, 1, 0, 0, 0)).total_seconds()
row_length = self.data_shape[0]
delta = np.linspace(0, seconds_of_file - 1, row_length)
data = np.full(self.data_shape, np.nan, dtype=np.float64)
data[:] = (delta + timestamp).reshape(-1, 1)
data = data.astype(np.int32)
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_central_wave_number(self):
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
if self.satellite in satellite_type1:
# 固定值
# 中心波数: wn(cm-1) = 10 ^ 7 / wave_length(nm)
data = {'CH_03': 2673.796, 'CH_04': 925.925, 'CH_05': 833.333}
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return data
def get_spectral_response(self):
wave_number_dict = dict()
response_dict = dict()
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
if self.satellite in satellite_type1:
dtype = {
'names': ('wave_length', 'response'), 'formats': ('f4', 'f4')}
for i in range(self.channels):
k = i + 1
channel_name = "CH_{:02d}".format(k)
file_name = '{}_{}_SRF_CH{:02d}_Pub.txt'.format(
self.satellite, self.sensor, k)
data_file = os.path.join(g_main_path, 'SRF', file_name)
if not os.path.isfile(data_file):
continue
datas = np.loadtxt(data_file, dtype=dtype)
wave_length = datas['wave_length'][::-1]
wave_number_channel = 10 ** 7 / wave_length
wave_number_dict[channel_name] = wave_number_channel
response_channel = datas['response'][::-1]
response_dict[channel_name] = response_channel
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
'Cant read this data, please check its resolution: {}'.format(self.in_file))
return wave_number_dict, response_dict
def __get_geo_file(self):
"""
返回 GEO 文件
:return:
"""
if self.geo_file is not None:
return self.geo_file
if self.resolution == 1000:
satellite_type1 = ['FY3C']
if self.satellite in satellite_type1:
geo_file = self.in_file[:-12] + 'GEOXX_MS.HDF'
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
return geo_file
def __get_obc_file(self):
"""
返回 OBC 文件
:return:
"""
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B', 'FY3C']
if self.satellite in satellite_type1:
obc_file = self.in_file[:-12] + 'OBCXX_MS.HDF'
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
else:
raise ValueError(
"Cant handle this resolution: ".format(self.resolution))
return obc_file
# 徐寒列 需要增加VIRR的OBC相关内容
def get_packet_count(self):
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.__get_obc_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('Packet_Count')[:s[0]]
elif self.satellite in satellite_type2:
data_file = self.__get_obc_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Calibration/Packet_Count')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 16383)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
print(('data_pre', data_pre.shape))
data = np.full(self.data_shape, np.nan, dtype=np.float32)
data[:] = data_pre.reshape(-1, 1)
return data
def get_prt1_count(self):
data = None
if self.resolution == 1000:
satellite_type1 = ['FY3A', 'FY3B']
satellite_type2 = ['FY3C']
if self.satellite in satellite_type1:
data_file = self.__get_obc_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
# FY3A数据不规整,存在 1810,2048 的数据,取 1800,2048
s = self.data_shape
data_pre = hdf5_file.get('PRT1_Count')[:s[0]]
elif self.satellite in satellite_type2:
data_file = self.__get_obc_file()
if not os.path.isfile(data_file):
raise ValueError(
'Data file is not exist. {}'.format(data_file))
with h5py.File(data_file, 'r') as hdf5_file:
data_pre = hdf5_file.get('/Calibration/PRT1_Count')[:]
else:
raise ValueError(
'Cant read this satellite`s data.: {}'.format(self.satellite))
# 过滤无效值
invalid_index = np.logical_or(data_pre < 0, data_pre > 1023)
data_pre = data_pre.astype(np.float32)
data_pre[invalid_index] = np.nan
data_pre = np.nanmean(data_pre, axis=1)
data = np.full(self.data_shape, np.nan, dtype=np.float32)
data[:] = data_pre.reshape(-1, 1)
return | |
from __future__ import print_function, absolute_import
import os, sys, subprocess, shlex, tempfile, time, sklearn.base, math
import numpy as np
import pandas as pd
from pandas_extensions import *
_vw_default_path = 'utils/lib/vw'
class _VW(sklearn.base.BaseEstimator):
def __init__(self,
logger=None,
vw=_vw_default_path,
moniker='vowpal_wabbit',
name=None,
bits=None,
loss=None,
passes=10,
silent=False,
l1=None,
l2=None,
learning_rate=None,
quadratic=None,
audit=None,
power_t=None,
adaptive=False,
working_dir=None,
decay_learning_rate=None,
initial_t=None,
minibatch=None,
total=None,
node=None,
unique_id=None,
span_server=None,
bfgs=None,
oaa=None,
ect=None,
csoaa=None,
wap=None,
cb=None,
cb_type=None,
old_model=None,
incremental=False,
mem=None,
nn=None,
invariant=False,
normalized=False,
sgd=False,
ignore=None,
columns=None
):
self.logger = logger
self.vw = vw
self.moniker = moniker
self.name = name
self.bits = bits
self.loss = loss
self.passes = passes
self.silent = silent
self.l1 = l1
self.l2 = l2
self.learning_rate = learning_rate
self.quadratic = quadratic
self.audit = audit
self.power_t = power_t
self.adaptive = adaptive
self.working_dir = working_dir
self.decay_learning_rate = decay_learning_rate
self.initial_t = initial_t
self.minibatch = minibatch
self.total = total
self.node = node
self.unique_id = unique_id
self.span_server = span_server
self.bfgs = bfgs
self.oaa = oaa
self.ect = ect
self.csoaa=csoaa
self.wap = wap
self.cb = cb
self.cb_type = cb_type
self.old_model = old_model
self.incremental = incremental
self.mem = mem
self.nn = nn
self.invariant = invariant
self.normalized = normalized
self.sgd = sgd
self.ignore = ignore
self.columns = columns
if hasattr(self.columns, 'tolist'): self.columns = self.columns.tolist()
def fit(self, X, y=None):
if type(X) is np.ndarray:
if self.columns is None: raise Exception('VowpalWabbit requires columns be set')
X = pd.DataFrame(X, columns=self.columns)
if type(X) is pd.DataFrame:
X = X.to_vw(y)
self.vw_ = VW(
logger=self.logger,
vw=self.vw,
moniker=self.moniker,
name=self.name,
bits=self.bits,
loss=self.loss,
passes=self.passes,
silent=self.silent,
l1=self.l1,
l2=self.l2,
learning_rate=self.learning_rate,
quadratic=self.quadratic,
audit=self.audit,
power_t=self.power_t,
adaptive=self.adaptive,
working_dir=self.working_dir,
decay_learning_rate=self.decay_learning_rate,
initial_t=self.initial_t,
minibatch=self.minibatch,
total=self.total,
node=self.node,
unique_id=self.unique_id,
span_server=self.span_server,
bfgs=self.bfgs,
oaa=self.oaa,
ect=self.ect,
csoaa=self.csoaa,
wap=self.wap,
cb=self.cb,
cb_type=self.cb_type,
old_model=self.old_model,
incremental=self.incremental,
mem=self.mem,
nn=self.nn,
invariant=self.invariant,
normalized=self.normalized,
sgd=self.sgd,
ignore=self.ignore
)
self.vw_.training(X)
return self
def predict(self, X):
if type(X) is np.ndarray:
if self.columns is None: raise Exception('VowpalWabbit requires columns be set')
X = pd.DataFrame(X, columns=self.columns)
if type(X) is pd.DataFrame: X = X.to_vw()
self.vw_.predicting(X)
raw = self.vw_.read_predictions_()
return np.asarray(list(raw))
def predict_proba(self, X):
if type(X) is np.ndarray:
if self.columns is None: raise Exception('VowpalWabbit requires columns be set')
X = pd.DataFrame(X, columns=self.columns)
if type(X) is pd.DataFrame: X = X.to_vw()
self.vw_.predicting(X)
preds = list(self.vw_.read_predictions_())
def sig(p):
if p < -100: return 0
return 1 / (1 + math.exp(-p))
predictions = np.asarray(map(sig, preds))
return np.vstack([1 - predictions, predictions]).T
class VowpalWabbitRegressor(sklearn.base.RegressorMixin, _VW):
pass
class VowpalWabbitClassifier(sklearn.base.ClassifierMixin, _VW):
pass
class VW:
def __init__(self,
logger=None,
vw=_vw_default_path,
moniker=None,
name=None,
bits=None,
loss=None,
passes=None,
silent=False,
l1=None,
l2=None,
learning_rate=None,
quadratic=None,
cubic=None,
audit=None,
power_t=None,
adaptive=False,
working_dir=None,
decay_learning_rate=None,
initial_t=None,
lda=None,
lda_D=None,
lda_rho=None,
lda_alpha=None,
minibatch=None,
total=None,
node=None,
unique_id=None,
span_server=None,
bfgs=None,
oaa=None,
ect=None,
csoaa=None,
wap=None,
cb=None,
cb_type=None,
old_model=None,
incremental=False,
mem=None,
nn=None,
holdout_off=None,
no_model=None,
invariant=False,
normalized=False,
sgd=False,
ignore=None,
**kwargs):
assert moniker and passes
self.node = node
self.total = total
self.unique_id = unique_id
self.span_server = span_server
if self.node is not None:
assert self.total is not None
assert self.unique_id is not None
assert self.span_server is not None
if name is None:
self.handle = '%s' % moniker
else:
self.handle = '%s.%s' % (moniker, name)
if self.node is not None:
self.handle = "%s.%d" % (self.handle, self.node)
if old_model is None:
self.filename = '%s.model' % self.handle
self.incremental = False
else:
self.filename = old_model
self.incremental = True
self.name = name
self.bits = bits
self.loss = loss
self.vw = vw
self.l1 = l1
self.l2 = l2
self.learning_rate = learning_rate
self.silent = silent
self.passes = passes
self.quadratic = quadratic
self.cubic = cubic
self.power_t = power_t
self.adaptive = adaptive
self.decay_learning_rate = decay_learning_rate
self.audit = audit
self.initial_t = initial_t
self.sgd = sgd
self.lda = lda
self.lda_D = lda_D
self.lda_rho = lda_rho
self.lda_alpha = lda_alpha
self.minibatch = minibatch
self.oaa = oaa
self.ect=ect
self.csoaa=csoaa
self.wap=wap
self.cb=cb
self.cb_type=cb_type
self.bfgs = bfgs
self.mem = mem
self.nn = nn
self.holdout_off = holdout_off
self.no_model = no_model
self.invariant = invariant
self.normalized = normalized
self.sgd = sgd
self.ignore = ignore
self.tmpdir = 'tmpfiles'
if not os.path.isdir(self.tmpdir): os.mkdir(self.tmpdir)
# Do some sanity checking for compatability between models
if self.lda:
assert not self.l1
assert not self.l1
assert not self.l2
assert not self.loss
assert not self.adaptive
assert not self.oaa
assert not self.csoaa
assert not self.wap
assert not self.cb
assert not self.cb_type
assert not self.ect
assert not self.bfgs
else:
assert not self.lda_D
assert not self.lda_rho
assert not self.lda_alpha
assert not self.minibatch
if self.sgd:
assert not self.adaptive
assert not self.invariant
assert not self.normalized
self.working_directory = working_dir or os.getcwd()
def vw_base_command(self, base, is_train):
l = base
if self.no_model is None: l.append('-f %s' % self.get_model_file())
if self.bits is not None: l.append('-b %d' % self.bits)
if self.learning_rate is not None: l.append('--learning_rate=%f' % self.learning_rate)
if self.l1 is not None: l.append('--l1=%f' % self.l1)
if self.l2 is not None: l.append('--l2=%f' % self.l2)
if self.initial_t is not None: l.append('--initial_t=%f' % self.initial_t)
if self.quadratic is not None: l.append('-q %s' % self.quadratic)
if self.cubic is not None: l.append('--cubic %s' % self.cubic)
if self.power_t is not None: l.append('--power_t=%f' % self.power_t)
if self.loss is not None: l.append('--loss_function=%s' % self.loss)
if self.decay_learning_rate is not None: l.append('--decay_learning_rate=%f' % self.decay_learning_rate)
if self.lda is not None: l.append('--lda=%d' % self.lda)
if self.lda_D is not None: l.append('--lda_D=%d' % self.lda_D)
if self.lda_rho is not None: l.append('--lda_rho=%f' % self.lda_rho)
if self.lda_alpha is not None: l.append('--lda_alpha=%f' % self.lda_alpha)
if self.minibatch is not None: l.append('--minibatch=%d' % self.minibatch)
if is_train:
if self.oaa is not None: l.append('--oaa=%d' % self.oaa)
if self.ect is not None: l.append('--ect=%d' % self.ect)
if self.csoaa is not None: l.append('--csoaa=%d' % self.csoaa)
if self.wap is not None: l.append('--wap=%d' % self.wap)
if self.cb is not None: l.append('--cb=%d' % self.cb)
if self.cb_type is not None: l.append('--cb_type %s' % self.cb_type)
if self.unique_id is not None: l.append('--unique_id=%d' % self.unique_id)
if self.total is not None: l.append('--total=%d' % self.total)
if self.node is not None: l.append('--node=%d' % self.node)
if self.span_server is not None: l.append('--span_server=%s' % self.span_server)
if self.mem is not None: l.append('--mem=%d' % self.mem)
if self.audit: l.append('--audit')
if self.bfgs: l.append('--bfgs')
if self.adaptive: l.append('--adaptive')
if self.invariant: l.append('--invariant')
if self.normalized: l.append('--normalized')
if self.sgd: l.append('--sgd')
if self.ignore is not None: l.append('--ignore=%d' % self.ignore)
if self.nn is not None: l.append('--nn=%d' % self.nn)
if self.holdout_off is not None: l.append('--holdout_off')
return ' '.join(l)
def vw_train_command(self, cache_file):
if os.path.exists(self.get_model_file()) and self.incremental:
return self.vw_base_command([self.vw], True) + ' --passes %d -c -i %s' \
% (self.passes, self.get_model_file())
else:
print('No existing model file or not options.incremental')
return self.vw_base_command([self.vw], True) + ' --passes %d -c' \
% (self.passes)
def vw_test_command(self, model_file, prediction_file):
return self.vw_base_command([self.vw], False) + ' -t -i %s -r %s' % (model_file, prediction_file)
def training(self, instances):
if type(instances) is str:
self.start_training(instances)
self.close_process()
return
f = self.save_tmp_file(instances, True)
self.start_training(f)
self.close_process()
self.del_file(f)
def predicting(self, instances):
if type(instances) is str:
self.start_predicting(instances)
self.close_process()
return
f = self.save_tmp_file(instances, False)
self.start_predicting(f)
self.close_process()
self.del_file(f)
def save_tmp_file(self, instances, training=True):
f = self.tmpfile('_tmp_' + ('training' if training else 'testing') + '_file.vw.')
with open(f, 'wb') as fs: fs.write('\n'.join(instances))
return f
def tmpfile(self, suffix):
_, f = tempfile.mkstemp(dir=self.tmpdir, suffix=suffix)
os.close(_)
return self.tmpdir + '/' + f.split('\\')[-1]
def start_training(self, training_file):
cache_file = self.tmpdir + '/' + self.handle + '.cache'
model_file = self.get_model_file()
# Remove the old cache and model files
if not self.incremental:
self.del_file(cache_file)
self.del_file(model_file)
# Run the actual training
cmd = self.vw_train_command(cache_file)
self.vw_process = self.make_subprocess(cmd, training_file)
def close_process(self):
# Close the process
assert self.vw_process
if self.vw_process.wait() != 0:
raise Exception("vw_process %d (%s) exited abnormally with return code %d" % \
(self.vw_process.pid, self.vw_process.command, self.vw_process.returncode))
def start_predicting(self, testing_file):
model_file = self.get_model_file()
# Be sure that the prediction file has a unique filename, since many processes may try to
# make predictions using the same model at the same time
pred_file = self.handle + '.prediction'
prediction_file = self.tmpfile(pred_file)
self.vw_process = self.make_subprocess(
self.vw_test_command(model_file, prediction_file), testing_file)
self.prediction_file = prediction_file
def parse_prediction(self, p):
return map(float, p.split()) if self.lda else float(p.split()[0])
def read_predictions_(self):
for x in open(self.prediction_file):
yield self.parse_prediction(x)
self.del_file(self.prediction_file)
def del_file(self, file):
#try: os.remove(file)
#except OSError: pass
pass
def make_subprocess(self, command, file):
stdout = open('nul', 'w')
stderr = open('nul', 'w') if self.silent else sys.stderr
commands = shlex.split(str(command))
commands += ['-d', file]
| |
BOLD DIGIT SIX
1D7D5 MATHEMATICAL BOLD DIGIT SEVEN
1D7D6 MATHEMATICAL BOLD DIGIT EIGHT
1D7D7 MATHEMATICAL BOLD DIGIT NINE
1D7D8 MATHEMATICAL DOUBLE-STRUCK DIGIT ZERO
1D7D9 MATHEMATICAL DOUBLE-STRUCK DIGIT ONE
1D7DA MATHEMATICAL DOUBLE-STRUCK DIGIT TWO
1D7DB MATHEMATICAL DOUBLE-STRUCK DIGIT THREE
1D7DC MATHEMATICAL DOUBLE-STRUCK DIGIT FOUR
1D7DD MATHEMATICAL DOUBLE-STRUCK DIGIT FIVE
1D7DE MATHEMATICAL DOUBLE-STRUCK DIGIT SIX
1D7DF MATHEMATICAL DOUBLE-STRUCK DIGIT SEVEN
1D7E0 MATHEMATICAL DOUBLE-STRUCK DIGIT EIGHT
1D7E1 MATHEMATICAL DOUBLE-STRUCK DIGIT NINE
1D7E2 MATHEMATICAL SANS-SERIF DIGIT ZERO
1D7E3 MATHEMATICAL SANS-SERIF DIGIT ONE
1D7E4 MATHEMATICAL SANS-SERIF DIGIT TWO
1D7E5 MATHEMATICAL SANS-SERIF DIGIT THREE
1D7E6 MATHEMATICAL SANS-SERIF DIGIT FOUR
1D7E7 MATHEMATICAL SANS-SERIF DIGIT FIVE
1D7E8 MATHEMATICAL SANS-SERIF DIGIT SIX
1D7E9 MATHEMATICAL SANS-SERIF DIGIT SEVEN
1D7EA MATHEMATICAL SANS-SERIF DIGIT EIGHT
1D7EB MATHEMATICAL SANS-SERIF DIGIT NINE
1D7EC MATHEMATICAL SANS-SERIF BOLD DIGIT ZERO
1D7ED MATHEMATICAL SANS-SERIF BOLD DIGIT ONE
1D7EE MATHEMATICAL SANS-SERIF BOLD DIGIT TWO
1D7EF MATHEMATICAL SANS-SERIF BOLD DIGIT THREE
1D7F0 MATHEMATICAL SANS-SERIF BOLD DIGIT FOUR
1D7F1 MATHEMATICAL SANS-SERIF BOLD DIGIT FIVE
1D7F2 MATHEMATICAL SANS-SERIF BOLD DIGIT SIX
1D7F3 MATHEMATICAL SANS-SERIF BOLD DIGIT SEVEN
1D7F4 MATHEMATICAL SANS-SERIF BOLD DIGIT EIGHT
1D7F5 MATHEMATICAL SANS-SERIF BOLD DIGIT NINE
1D7F6 MATHEMATICAL MONOSPACE DIGIT ZERO
1D7F7 MATHEMATICAL MONOSPACE DIGIT ONE
1D7F8 MATHEMATICAL MONOSPACE DIGIT TWO
1D7F9 MATHEMATICAL MONOSPACE DIGIT THREE
1D7FA MATHEMATICAL MONOSPACE DIGIT FOUR
1D7FB MATHEMATICAL MONOSPACE DIGIT FIVE
1D7FC MATHEMATICAL MONOSPACE DIGIT SIX
1D7FD MATHEMATICAL MONOSPACE DIGIT SEVEN
1D7FE MATHEMATICAL MONOSPACE DIGIT EIGHT
1D7FF MATHEMATICAL MONOSPACE DIGIT NINE
1F000 MAHJONG TILE EAST WIND
1F001 MAHJONG TILE SOUTH WIND
1F002 MAHJONG TILE WEST WIND
1F003 MAHJONG TILE NORTH WIND
1F004 MAHJONG TILE RED DRAGON
1F005 MAHJONG TILE GREEN DRAGON
1F006 MAHJONG TILE WHITE DRAGON
1F007 MAHJONG TILE ONE OF CHARACTERS
1F008 MAHJONG TILE TWO OF CHARACTERS
1F009 MAHJONG TILE THREE OF CHARACTERS
1F00A MAHJONG TILE FOUR OF CHARACTERS
1F00B MAHJONG TILE FIVE OF CHARACTERS
1F00C MAHJONG TILE SIX OF CHARACTERS
1F00D MAHJONG TILE SEVEN OF CHARACTERS
1F00E MAHJONG TILE EIGHT OF CHARACTERS
1F00F MAHJONG TILE NINE OF CHARACTERS
1F010 MAHJONG TILE ONE OF BAMBOOS
1F011 MAHJONG TILE TWO OF BAMBOOS
1F012 MAHJONG TILE THREE OF BAMBOOS
1F013 MAHJONG TILE FOUR OF BAMBOOS
1F014 MAHJONG TILE FIVE OF BAMBOOS
1F015 MAHJONG TILE SIX OF BAMBOOS
1F016 MAHJONG TILE SEVEN OF BAMBOOS
1F017 MAHJONG TILE EIGHT OF BAMBOOS
1F018 MAHJONG TILE NINE OF BAMBOOS
1F019 MAHJONG TILE ONE OF CIRCLES
1F01A MAHJONG TILE TWO OF CIRCLES
1F01B MAHJONG TILE THREE OF CIRCLES
1F01C MAHJONG TILE FOUR OF CIRCLES
1F01D MAHJONG TILE FIVE OF CIRCLES
1F01E MAHJONG TILE SIX OF CIRCLES
1F01F MAHJONG TILE SEVEN OF CIRCLES
1F020 MAHJONG TILE EIGHT OF CIRCLES
1F021 MAHJONG TILE NINE OF CIRCLES
1F022 MAHJONG TILE PLUM
1F023 MAHJONG TILE ORCHID
1F024 MAHJONG TILE BAMBOO
1F025 MAHJONG TILE CHRYSANTHEMUM
1F026 MAHJONG TILE SPRING
1F027 MAHJONG TILE SUMMER
1F028 MAHJONG TILE AUTUMN
1F029 MAHJONG TILE WINTER
1F02A MAHJONG TILE JOKER
1F02B MAHJONG TILE BACK
1F030 DOMINO TILE HORIZONTAL BACK
1F031 DOMINO TILE HORIZONTAL-00-00
1F032 DOMINO TILE HORIZONTAL-00-01
1F033 DOMINO TILE HORIZONTAL-00-02
1F034 DOMINO TILE HORIZONTAL-00-03
1F035 DOMINO TILE HORIZONTAL-00-04
1F036 DOMINO TILE HORIZONTAL-00-05
1F037 DOMINO TILE HORIZONTAL-00-06
1F038 DOMINO TILE HORIZONTAL-01-00
1F039 DOMINO TILE HORIZONTAL-01-01
1F03A DOMINO TILE HORIZONTAL-01-02
1F03B DOMINO TILE HORIZONTAL-01-03
1F03C DOMINO TILE HORIZONTAL-01-04
1F03D DOMINO TILE HORIZONTAL-01-05
1F03E DOMINO TILE HORIZONTAL-01-06
1F03F DOMINO TILE HORIZONTAL-02-00
1F040 DOMINO TILE HORIZONTAL-02-01
1F041 DOMINO TILE HORIZONTAL-02-02
1F042 DOMINO TILE HORIZONTAL-02-03
1F043 DOMINO TILE HORIZONTAL-02-04
1F044 DOMINO TILE HORIZONTAL-02-05
1F045 DOMINO TILE HORIZONTAL-02-06
1F046 DOMINO TILE HORIZONTAL-03-00
1F047 DOMINO TILE HORIZONTAL-03-01
1F048 DOMINO TILE HORIZONTAL-03-02
1F049 DOMINO TILE HORIZONTAL-03-03
1F04A DOMINO TILE HORIZONTAL-03-04
1F04B DOMINO TILE HORIZONTAL-03-05
1F04C DOMINO TILE HORIZONTAL-03-06
1F04D DOMINO TILE HORIZONTAL-04-00
1F04E DOMINO TILE HORIZONTAL-04-01
1F04F DOMINO TILE HORIZONTAL-04-02
1F050 DOMINO TILE HORIZONTAL-04-03
1F051 DOMINO TILE HORIZONTAL-04-04
1F052 DOMINO TILE HORIZONTAL-04-05
1F053 DOMINO TILE HORIZONTAL-04-06
1F054 DOMINO TILE HORIZONTAL-05-00
1F055 DOMINO TILE HORIZONTAL-05-01
1F056 DOMINO TILE HORIZONTAL-05-02
1F057 DOMINO TILE HORIZONTAL-05-03
1F058 DOMINO TILE HORIZONTAL-05-04
1F059 DOMINO TILE HORIZONTAL-05-05
1F05A DOMINO TILE HORIZONTAL-05-06
1F05B DOMINO TILE HORIZONTAL-06-00
1F05C DOMINO TILE HORIZONTAL-06-01
1F05D DOMINO TILE HORIZONTAL-06-02
1F05E DOMINO TILE HORIZONTAL-06-03
1F05F DOMINO TILE HORIZONTAL-06-04
1F060 DOMINO TILE HORIZONTAL-06-05
1F061 DOMINO TILE HORIZONTAL-06-06
1F062 DOMINO TILE VERTICAL BACK
1F063 DOMINO TILE VERTICAL-00-00
1F064 DOMINO TILE VERTICAL-00-01
1F065 DOMINO TILE VERTICAL-00-02
1F066 DOMINO TILE VERTICAL-00-03
1F067 DOMINO TILE VERTICAL-00-04
1F068 DOMINO TILE VERTICAL-00-05
1F069 DOMINO TILE VERTICAL-00-06
1F06A DOMINO TILE VERTICAL-01-00
1F06B DOMINO TILE VERTICAL-01-01
1F06C DOMINO TILE VERTICAL-01-02
1F06D DOMINO TILE VERTICAL-01-03
1F06E DOMINO TILE VERTICAL-01-04
1F06F DOMINO TILE VERTICAL-01-05
1F070 DOMINO TILE VERTICAL-01-06
1F071 DOMINO TILE VERTICAL-02-00
1F072 DOMINO TILE VERTICAL-02-01
1F073 DOMINO TILE VERTICAL-02-02
1F074 DOMINO TILE VERTICAL-02-03
1F075 DOMINO TILE VERTICAL-02-04
1F076 DOMINO TILE VERTICAL-02-05
1F077 DOMINO TILE VERTICAL-02-06
1F078 DOMINO TILE VERTICAL-03-00
1F079 DOMINO TILE VERTICAL-03-01
1F07A DOMINO TILE VERTICAL-03-02
1F07B DOMINO TILE VERTICAL-03-03
1F07C DOMINO TILE VERTICAL-03-04
1F07D DOMINO TILE VERTICAL-03-05
1F07E DOMINO TILE VERTICAL-03-06
1F07F DOMINO TILE VERTICAL-04-00
1F080 DOMINO TILE VERTICAL-04-01
1F081 DOMINO TILE VERTICAL-04-02
1F082 DOMINO TILE VERTICAL-04-03
1F083 DOMINO TILE VERTICAL-04-04
1F084 DOMINO TILE VERTICAL-04-05
1F085 DOMINO TILE VERTICAL-04-06
1F086 DOMINO TILE VERTICAL-05-00
1F087 DOMINO TILE VERTICAL-05-01
1F088 DOMINO TILE VERTICAL-05-02
1F089 DOMINO TILE VERTICAL-05-03
1F08A DOMINO TILE VERTICAL-05-04
1F08B DOMINO TILE VERTICAL-05-05
1F08C DOMINO TILE VERTICAL-05-06
1F08D DOMINO TILE VERTICAL-06-00
1F08E DOMINO TILE VERTICAL-06-01
1F08F DOMINO TILE VERTICAL-06-02
1F090 DOMINO TILE VERTICAL-06-03
1F091 DOMINO TILE VERTICAL-06-04
1F092 DOMINO TILE VERTICAL-06-05
1F093 DOMINO TILE VERTICAL-06-06
1F100 DIGIT ZERO FULL STOP
1F101 DIGIT ZERO COMMA
1F102 DIGIT ONE COMMA
1F103 DIGIT TWO COMMA
1F104 DIGIT THREE COMMA
1F105 DIGIT FOUR COMMA
1F106 DIGIT FIVE COMMA
1F107 DIGIT SIX COMMA
1F108 DIGIT SEVEN COMMA
1F109 DIGIT EIGHT COMMA
1F10A DIGIT NINE COMMA
1F110 PARENTHESIZED LATIN CAPITAL LETTER A
1F111 PARENTHESIZED LATIN CAPITAL LETTER B
1F112 PARENTHESIZED LATIN CAPITAL LETTER C
1F113 PARENTHESIZED LATIN CAPITAL LETTER D
1F114 PARENTHESIZED LATIN CAPITAL LETTER E
1F115 PARENTHESIZED LATIN CAPITAL LETTER F
1F116 PARENTHESIZED LATIN CAPITAL LETTER G
1F117 PARENTHESIZED LATIN CAPITAL LETTER H
1F118 PARENTHESIZED LATIN CAPITAL LETTER I
1F119 PARENTHESIZED LATIN CAPITAL LETTER J
1F11A PARENTHESIZED LATIN CAPITAL LETTER K
1F11B PARENTHESIZED LATIN CAPITAL LETTER L
1F11C PARENTHESIZED LATIN CAPITAL LETTER M
1F11D PARENTHESIZED LATIN CAPITAL LETTER N
1F11E PARENTHESIZED LATIN CAPITAL LETTER O
1F11F PARENTHESIZED LATIN CAPITAL LETTER P
1F120 PARENTHESIZED LATIN CAPITAL LETTER Q
1F121 PARENTHESIZED LATIN CAPITAL LETTER R
1F122 PARENTHESIZED LATIN CAPITAL LETTER S
1F123 PARENTHESIZED LATIN CAPITAL LETTER T
1F124 PARENTHESIZED LATIN CAPITAL LETTER U
1F125 PARENTHESIZED LATIN CAPITAL LETTER V
1F126 PARENTHESIZED LATIN CAPITAL LETTER W
1F127 PARENTHESIZED LATIN CAPITAL LETTER X
1F128 PARENTHESIZED LATIN CAPITAL LETTER Y
1F129 PARENTHESIZED LATIN CAPITAL LETTER Z
1F12A TORTOISE SHELL BRACKETED LATIN CAPITAL LETTER S
1F12B CIRCLED ITALIC LATIN CAPITAL LETTER C
1F12C CIRCLED ITALIC LATIN CAPITAL LETTER R
1F12D CIRCLED CD
1F12E CIRCLED WZ
1F131 SQUARED LATIN CAPITAL LETTER B
1F13D SQUARED LATIN CAPITAL LETTER N
1F13F SQUARED LATIN CAPITAL LETTER P
1F142 SQUARED LATIN CAPITAL LETTER S
1F146 SQUARED LATIN CAPITAL LETTER W
1F14A SQUARED HV
1F14B SQUARED MV
1F14C SQUARED SD
1F14D SQUARED SS
1F14E SQUARED PPV
1F157 NEGATIVE CIRCLED LATIN CAPITAL LETTER H
1F15F NEGATIVE CIRCLED LATIN CAPITAL LETTER P
1F179 NEGATIVE SQUARED LATIN CAPITAL LETTER J
1F17B NEGATIVE SQUARED LATIN CAPITAL LETTER L
1F17C NEGATIVE SQUARED LATIN CAPITAL LETTER M
1F17F NEGATIVE SQUARED LATIN CAPITAL LETTER P
1F18A CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTER P
1F18B NEGATIVE SQUARED IC
1F18C NEGATIVE SQUARED PA
1F18D NEGATIVE SQUARED SA
1F190 SQUARE DJ
1F200 SQUARE HIRAGANA HOKA
1F210 SQUARED CJK UNIFIED IDEOGRAPH-624B
1F211 SQUARED CJK UNIFIED IDEOGRAPH-5B57
1F212 SQUARED CJK UNIFIED IDEOGRAPH-53CC
1F213 SQUARED KATAKANA DE
1F214 SQUARED CJK UNIFIED IDEOGRAPH-4E8C
1F215 SQUARED CJK UNIFIED IDEOGRAPH-591A
1F216 SQUARED CJK UNIFIED IDEOGRAPH-89E3
1F217 SQUARED CJK UNIFIED IDEOGRAPH-5929
1F218 SQUARED CJK UNIFIED IDEOGRAPH-4EA4
1F219 SQUARED CJK UNIFIED IDEOGRAPH-6620
1F21A SQUARED CJK UNIFIED IDEOGRAPH-7121
1F21B SQUARED CJK UNIFIED IDEOGRAPH-6599
1F21C SQUARED CJK UNIFIED IDEOGRAPH-524D
1F21D SQUARED CJK UNIFIED IDEOGRAPH-5F8C
1F21E SQUARED CJK UNIFIED IDEOGRAPH-518D
1F21F SQUARED CJK UNIFIED IDEOGRAPH-65B0
1F220 SQUARED CJK UNIFIED IDEOGRAPH-521D
1F221 SQUARED CJK UNIFIED IDEOGRAPH-7D42
1F222 SQUARED CJK UNIFIED IDEOGRAPH-751F
1F223 SQUARED CJK UNIFIED IDEOGRAPH-8CA9
1F224 SQUARED CJK UNIFIED IDEOGRAPH-58F0
1F225 SQUARED CJK UNIFIED IDEOGRAPH-5439
1F226 SQUARED CJK UNIFIED IDEOGRAPH-6F14
1F227 SQUARED CJK UNIFIED IDEOGRAPH-6295
1F228 SQUARED CJK UNIFIED IDEOGRAPH-6355
1F229 SQUARED CJK UNIFIED IDEOGRAPH-4E00
1F22A SQUARED CJK UNIFIED IDEOGRAPH-4E09
1F22B SQUARED CJK UNIFIED IDEOGRAPH-904A
1F22C SQUARED CJK UNIFIED IDEOGRAPH-5DE6
1F22D SQUARED CJK UNIFIED IDEOGRAPH-4E2D
1F22E SQUARED CJK UNIFIED IDEOGRAPH-53F3
1F22F SQUARED CJK UNIFIED IDEOGRAPH-6307
1F230 SQUARED CJK UNIFIED IDEOGRAPH-8D70
1F231 SQUARED CJK UNIFIED IDEOGRAPH-6253
1F240 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-672C
1F241 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-4E09
1F242 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-4E8C
1F243 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-5B89
1F244 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-70B9
1F245 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6253
1F246 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-76D7
1F247 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-52DD
1F248 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6557
20000 <CJK Ideograph Extension B, First>
2A6D6 <CJK Ideograph Extension B, Last>
2A700 <CJK Ideograph Extension C, First>
2B734 <CJK Ideograph Extension C, Last>
2F800 CJK COMPATIBILITY IDEOGRAPH-2F800
2F801 CJK COMPATIBILITY IDEOGRAPH-2F801
2F802 CJK COMPATIBILITY IDEOGRAPH-2F802
2F803 CJK COMPATIBILITY IDEOGRAPH-2F803
2F804 CJK COMPATIBILITY IDEOGRAPH-2F804
2F805 CJK COMPATIBILITY IDEOGRAPH-2F805
2F806 CJK COMPATIBILITY IDEOGRAPH-2F806
2F807 CJK COMPATIBILITY IDEOGRAPH-2F807
2F808 CJK COMPATIBILITY IDEOGRAPH-2F808
2F809 CJK COMPATIBILITY IDEOGRAPH-2F809
2F80A CJK COMPATIBILITY IDEOGRAPH-2F80A
2F80B CJK COMPATIBILITY IDEOGRAPH-2F80B
2F80C CJK COMPATIBILITY IDEOGRAPH-2F80C
2F80D CJK COMPATIBILITY IDEOGRAPH-2F80D
2F80E CJK COMPATIBILITY IDEOGRAPH-2F80E
2F80F CJK COMPATIBILITY IDEOGRAPH-2F80F
2F810 CJK COMPATIBILITY IDEOGRAPH-2F810
2F811 CJK COMPATIBILITY IDEOGRAPH-2F811
2F812 CJK COMPATIBILITY IDEOGRAPH-2F812
2F813 CJK COMPATIBILITY IDEOGRAPH-2F813
2F814 CJK COMPATIBILITY IDEOGRAPH-2F814
2F815 CJK COMPATIBILITY IDEOGRAPH-2F815
2F816 CJK COMPATIBILITY IDEOGRAPH-2F816
2F817 CJK COMPATIBILITY IDEOGRAPH-2F817
2F818 CJK COMPATIBILITY IDEOGRAPH-2F818
2F819 CJK COMPATIBILITY IDEOGRAPH-2F819
2F81A CJK COMPATIBILITY IDEOGRAPH-2F81A
2F81B CJK COMPATIBILITY IDEOGRAPH-2F81B
2F81C CJK COMPATIBILITY IDEOGRAPH-2F81C
2F81D CJK COMPATIBILITY IDEOGRAPH-2F81D
2F81E CJK COMPATIBILITY IDEOGRAPH-2F81E
2F81F CJK COMPATIBILITY IDEOGRAPH-2F81F
2F820 CJK COMPATIBILITY IDEOGRAPH-2F820
2F821 CJK COMPATIBILITY IDEOGRAPH-2F821
2F822 CJK COMPATIBILITY IDEOGRAPH-2F822
2F823 CJK COMPATIBILITY IDEOGRAPH-2F823
2F824 CJK COMPATIBILITY IDEOGRAPH-2F824
2F825 CJK COMPATIBILITY IDEOGRAPH-2F825
2F826 CJK COMPATIBILITY IDEOGRAPH-2F826
2F827 CJK COMPATIBILITY IDEOGRAPH-2F827
2F828 CJK COMPATIBILITY IDEOGRAPH-2F828
2F829 CJK COMPATIBILITY IDEOGRAPH-2F829
2F82A CJK COMPATIBILITY IDEOGRAPH-2F82A
2F82B CJK COMPATIBILITY IDEOGRAPH-2F82B
2F82C CJK COMPATIBILITY IDEOGRAPH-2F82C
2F82D CJK COMPATIBILITY IDEOGRAPH-2F82D
2F82E CJK COMPATIBILITY IDEOGRAPH-2F82E
2F82F CJK COMPATIBILITY IDEOGRAPH-2F82F
2F830 CJK COMPATIBILITY IDEOGRAPH-2F830
2F831 CJK COMPATIBILITY IDEOGRAPH-2F831
2F832 CJK COMPATIBILITY IDEOGRAPH-2F832
2F833 CJK COMPATIBILITY IDEOGRAPH-2F833
2F834 CJK COMPATIBILITY IDEOGRAPH-2F834
2F835 CJK COMPATIBILITY IDEOGRAPH-2F835
2F836 CJK COMPATIBILITY IDEOGRAPH-2F836
2F837 CJK COMPATIBILITY IDEOGRAPH-2F837
2F838 CJK COMPATIBILITY IDEOGRAPH-2F838
2F839 CJK COMPATIBILITY IDEOGRAPH-2F839
2F83A CJK COMPATIBILITY IDEOGRAPH-2F83A
2F83B CJK COMPATIBILITY IDEOGRAPH-2F83B
2F83C CJK COMPATIBILITY IDEOGRAPH-2F83C
2F83D CJK COMPATIBILITY IDEOGRAPH-2F83D
2F83E CJK COMPATIBILITY IDEOGRAPH-2F83E
2F83F CJK COMPATIBILITY IDEOGRAPH-2F83F
2F840 CJK COMPATIBILITY IDEOGRAPH-2F840
2F841 CJK COMPATIBILITY IDEOGRAPH-2F841
2F842 CJK COMPATIBILITY IDEOGRAPH-2F842
2F843 CJK COMPATIBILITY IDEOGRAPH-2F843
2F844 CJK COMPATIBILITY IDEOGRAPH-2F844
2F845 CJK COMPATIBILITY IDEOGRAPH-2F845
2F846 CJK COMPATIBILITY IDEOGRAPH-2F846
2F847 CJK COMPATIBILITY IDEOGRAPH-2F847
2F848 CJK COMPATIBILITY IDEOGRAPH-2F848
2F849 CJK COMPATIBILITY IDEOGRAPH-2F849
2F84A CJK COMPATIBILITY IDEOGRAPH-2F84A
2F84B CJK COMPATIBILITY IDEOGRAPH-2F84B
2F84C CJK COMPATIBILITY IDEOGRAPH-2F84C
2F84D CJK COMPATIBILITY IDEOGRAPH-2F84D
2F84E CJK COMPATIBILITY IDEOGRAPH-2F84E
2F84F CJK COMPATIBILITY IDEOGRAPH-2F84F
2F850 CJK COMPATIBILITY IDEOGRAPH-2F850
2F851 CJK COMPATIBILITY IDEOGRAPH-2F851
2F852 CJK COMPATIBILITY IDEOGRAPH-2F852
2F853 CJK COMPATIBILITY IDEOGRAPH-2F853
2F854 CJK COMPATIBILITY IDEOGRAPH-2F854
2F855 CJK COMPATIBILITY IDEOGRAPH-2F855
2F856 CJK COMPATIBILITY IDEOGRAPH-2F856
2F857 CJK COMPATIBILITY IDEOGRAPH-2F857
2F858 CJK COMPATIBILITY IDEOGRAPH-2F858
2F859 CJK COMPATIBILITY IDEOGRAPH-2F859
2F85A CJK COMPATIBILITY IDEOGRAPH-2F85A
2F85B CJK COMPATIBILITY IDEOGRAPH-2F85B
2F85C CJK COMPATIBILITY IDEOGRAPH-2F85C
2F85D CJK COMPATIBILITY IDEOGRAPH-2F85D
2F85E CJK COMPATIBILITY IDEOGRAPH-2F85E
2F85F CJK COMPATIBILITY IDEOGRAPH-2F85F
2F860 CJK COMPATIBILITY IDEOGRAPH-2F860
2F861 CJK COMPATIBILITY IDEOGRAPH-2F861
2F862 CJK COMPATIBILITY IDEOGRAPH-2F862
2F863 CJK COMPATIBILITY IDEOGRAPH-2F863
2F864 CJK COMPATIBILITY IDEOGRAPH-2F864
2F865 CJK COMPATIBILITY IDEOGRAPH-2F865
2F866 CJK COMPATIBILITY IDEOGRAPH-2F866
2F867 CJK COMPATIBILITY IDEOGRAPH-2F867
2F868 CJK COMPATIBILITY IDEOGRAPH-2F868
2F869 CJK COMPATIBILITY IDEOGRAPH-2F869
2F86A CJK COMPATIBILITY IDEOGRAPH-2F86A
2F86B CJK COMPATIBILITY IDEOGRAPH-2F86B
2F86C CJK COMPATIBILITY IDEOGRAPH-2F86C
2F86D CJK COMPATIBILITY IDEOGRAPH-2F86D
2F86E CJK COMPATIBILITY IDEOGRAPH-2F86E
2F86F CJK COMPATIBILITY IDEOGRAPH-2F86F
2F870 CJK COMPATIBILITY IDEOGRAPH-2F870
2F871 CJK COMPATIBILITY IDEOGRAPH-2F871
2F872 CJK COMPATIBILITY IDEOGRAPH-2F872
2F873 CJK COMPATIBILITY IDEOGRAPH-2F873
2F874 CJK COMPATIBILITY IDEOGRAPH-2F874
2F875 CJK COMPATIBILITY IDEOGRAPH-2F875
2F876 CJK COMPATIBILITY IDEOGRAPH-2F876
2F877 CJK COMPATIBILITY IDEOGRAPH-2F877
2F878 CJK COMPATIBILITY IDEOGRAPH-2F878
2F879 CJK COMPATIBILITY IDEOGRAPH-2F879
2F87A CJK COMPATIBILITY IDEOGRAPH-2F87A
2F87B CJK COMPATIBILITY IDEOGRAPH-2F87B
2F87C CJK COMPATIBILITY IDEOGRAPH-2F87C
2F87D CJK COMPATIBILITY IDEOGRAPH-2F87D
2F87E CJK COMPATIBILITY IDEOGRAPH-2F87E
2F87F CJK COMPATIBILITY IDEOGRAPH-2F87F
2F880 CJK COMPATIBILITY IDEOGRAPH-2F880
2F881 CJK COMPATIBILITY IDEOGRAPH-2F881
2F882 CJK COMPATIBILITY IDEOGRAPH-2F882
2F883 CJK COMPATIBILITY IDEOGRAPH-2F883
2F884 CJK COMPATIBILITY IDEOGRAPH-2F884
2F885 CJK COMPATIBILITY IDEOGRAPH-2F885
2F886 CJK COMPATIBILITY IDEOGRAPH-2F886
2F887 CJK COMPATIBILITY IDEOGRAPH-2F887
2F888 CJK COMPATIBILITY IDEOGRAPH-2F888
2F889 CJK COMPATIBILITY IDEOGRAPH-2F889
2F88A CJK COMPATIBILITY IDEOGRAPH-2F88A
2F88B CJK COMPATIBILITY IDEOGRAPH-2F88B
2F88C CJK COMPATIBILITY IDEOGRAPH-2F88C
2F88D CJK COMPATIBILITY IDEOGRAPH-2F88D
2F88E CJK COMPATIBILITY IDEOGRAPH-2F88E
2F88F CJK COMPATIBILITY IDEOGRAPH-2F88F
2F890 CJK COMPATIBILITY IDEOGRAPH-2F890
2F891 CJK COMPATIBILITY IDEOGRAPH-2F891
2F892 CJK COMPATIBILITY IDEOGRAPH-2F892
2F893 CJK COMPATIBILITY IDEOGRAPH-2F893
2F894 CJK COMPATIBILITY IDEOGRAPH-2F894
2F895 CJK COMPATIBILITY IDEOGRAPH-2F895
2F896 CJK COMPATIBILITY IDEOGRAPH-2F896
2F897 CJK COMPATIBILITY IDEOGRAPH-2F897
2F898 CJK COMPATIBILITY IDEOGRAPH-2F898
2F899 CJK COMPATIBILITY IDEOGRAPH-2F899
2F89A CJK COMPATIBILITY IDEOGRAPH-2F89A
2F89B CJK COMPATIBILITY IDEOGRAPH-2F89B
2F89C CJK COMPATIBILITY IDEOGRAPH-2F89C
2F89D CJK COMPATIBILITY IDEOGRAPH-2F89D
2F89E CJK COMPATIBILITY IDEOGRAPH-2F89E
2F89F CJK COMPATIBILITY IDEOGRAPH-2F89F
2F8A0 CJK COMPATIBILITY IDEOGRAPH-2F8A0
2F8A1 CJK COMPATIBILITY IDEOGRAPH-2F8A1
2F8A2 CJK COMPATIBILITY IDEOGRAPH-2F8A2
2F8A3 CJK COMPATIBILITY IDEOGRAPH-2F8A3
2F8A4 CJK COMPATIBILITY IDEOGRAPH-2F8A4
2F8A5 CJK COMPATIBILITY IDEOGRAPH-2F8A5
2F8A6 CJK COMPATIBILITY IDEOGRAPH-2F8A6
2F8A7 CJK COMPATIBILITY IDEOGRAPH-2F8A7
2F8A8 CJK COMPATIBILITY IDEOGRAPH-2F8A8
2F8A9 CJK COMPATIBILITY IDEOGRAPH-2F8A9
2F8AA CJK COMPATIBILITY IDEOGRAPH-2F8AA
2F8AB CJK COMPATIBILITY IDEOGRAPH-2F8AB
2F8AC CJK COMPATIBILITY IDEOGRAPH-2F8AC
2F8AD CJK COMPATIBILITY IDEOGRAPH-2F8AD
2F8AE CJK COMPATIBILITY IDEOGRAPH-2F8AE
2F8AF CJK COMPATIBILITY IDEOGRAPH-2F8AF
2F8B0 CJK COMPATIBILITY IDEOGRAPH-2F8B0
2F8B1 CJK COMPATIBILITY IDEOGRAPH-2F8B1
2F8B2 CJK COMPATIBILITY IDEOGRAPH-2F8B2
2F8B3 CJK COMPATIBILITY IDEOGRAPH-2F8B3
2F8B4 CJK COMPATIBILITY IDEOGRAPH-2F8B4
2F8B5 CJK COMPATIBILITY IDEOGRAPH-2F8B5
2F8B6 CJK COMPATIBILITY IDEOGRAPH-2F8B6
2F8B7 CJK COMPATIBILITY IDEOGRAPH-2F8B7
2F8B8 CJK COMPATIBILITY IDEOGRAPH-2F8B8
2F8B9 CJK COMPATIBILITY IDEOGRAPH-2F8B9
2F8BA CJK COMPATIBILITY IDEOGRAPH-2F8BA
2F8BB CJK COMPATIBILITY IDEOGRAPH-2F8BB
2F8BC CJK COMPATIBILITY IDEOGRAPH-2F8BC
2F8BD CJK COMPATIBILITY IDEOGRAPH-2F8BD
2F8BE CJK COMPATIBILITY IDEOGRAPH-2F8BE
2F8BF CJK COMPATIBILITY IDEOGRAPH-2F8BF
2F8C0 CJK COMPATIBILITY IDEOGRAPH-2F8C0
2F8C1 CJK COMPATIBILITY IDEOGRAPH-2F8C1
2F8C2 CJK COMPATIBILITY IDEOGRAPH-2F8C2
2F8C3 CJK COMPATIBILITY IDEOGRAPH-2F8C3
2F8C4 CJK COMPATIBILITY IDEOGRAPH-2F8C4
2F8C5 CJK COMPATIBILITY IDEOGRAPH-2F8C5
2F8C6 CJK COMPATIBILITY IDEOGRAPH-2F8C6
2F8C7 CJK COMPATIBILITY IDEOGRAPH-2F8C7
2F8C8 CJK COMPATIBILITY IDEOGRAPH-2F8C8
2F8C9 CJK COMPATIBILITY IDEOGRAPH-2F8C9
2F8CA CJK COMPATIBILITY IDEOGRAPH-2F8CA
2F8CB CJK COMPATIBILITY IDEOGRAPH-2F8CB
2F8CC CJK COMPATIBILITY IDEOGRAPH-2F8CC
2F8CD CJK | |
#
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright (C) 2018-2021 UAVCAN Development Team <uavcan.org>
# This software is distributed under the terms of the MIT License.
#
import datetime
import inspect
import logging
import types
import typing
import nunavut.lang
from ..templates import LanguageEnvironment
from .extensions import JinjaAssert, UseQuery
from .jinja2 import BaseLoader, Environment, StrictUndefined, select_autoescape
from .jinja2.ext import Extension
from .jinja2.ext import do as jinja_do
from .jinja2.ext import loopcontrols as loopcontrols
from .jinja2.filters import FILTERS as JINJA2_FILTERS
logger = logging.getLogger(__name__)
# +---------------------------------------------------------------------------+
# | JINJA : LanguageTemplateNamespace
# +---------------------------------------------------------------------------+
class LanguageTemplateNamespace:
"""
Generic namespace object used to create reserved namespaces in the global environment.
.. invisible-code-block: python
from nunavut.jinja.environment import LanguageTemplateNamespace
.. code-block:: python
ns = LanguageTemplateNamespace()
# any property can be set at any time.
ns.foo = 'foo'
assert ns.foo == 'foo'
# repr of the ns enables cloning using exec
exec('ns2={}'.format(repr(ns)))
assert ns2.foo == 'foo'
# clones will be equal
assert ns2 == ns
# but not the same object
assert ns2 is not ns
In addition to the namespace behavior this object exposed some dictionary-like methods:
.. code-block:: python
ns = LanguageTemplateNamespace()
ns.update({'foo':'bar'})
assert ns.foo == 'bar'
.. invisible-code-block: python
ns = LanguageTemplateNamespace(one='one', two='two')
assert 'one' == ns.one
assert 'two' == ns.two
"""
def __init__(self, **kwargs: typing.Any):
for name in kwargs:
setattr(self, name, kwargs[name])
def __repr__(self) -> str:
type_name = type(self).__name__
arg_strings = []
star_args = {}
for name, value in self._get_kwargs():
if name.isidentifier():
arg_strings.append("%s=%r" % (name, value))
else:
star_args[name] = value
if star_args:
arg_strings.append("**%s" % repr(star_args))
return "%s(%s)" % (type_name, ", ".join(arg_strings))
def _get_kwargs(self) -> typing.List[typing.Any]:
return list(self.__dict__.items())
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, LanguageTemplateNamespace):
return NotImplemented
return vars(self) == vars(other)
def __contains__(self, key: str) -> bool:
return key in self.__dict__
def update(self, update_from: typing.Mapping[str, typing.Any]) -> None:
for key, value in update_from.items():
setattr(self, key, value)
def items(self) -> typing.ItemsView[str, typing.Any]:
return self.__dict__.items()
def values(self) -> typing.ValuesView[typing.Any]:
return self.__dict__.values()
# +---------------------------------------------------------------------------+
# | JINJA : CodeGenEnvironment
# +---------------------------------------------------------------------------+
class CodeGenEnvironment(Environment):
"""
Jinja Environment optimized for compile-time generation of source code
(i.e. as opposed to dynamically generating webpages).
.. invisible-code-block: python
from nunavut.lang import LanguageContext, Language
from nunavut.jinja import CodeGenEnvironment
from nunavut.jinja.jinja2 import DictLoader
.. code-block:: python
template = 'Hello World'
e = CodeGenEnvironment(loader=DictLoader({'test': template}))
assert 'Hello World' == e.get_template('test').render()
.. warning::
The :attr:`RESERVED_GLOBAL_NAMESPACES` and :attr:`RESERVED_GLOBAL_NAMES` collections
contain names in the global namespace reserved by this environment. Attempting to override one
of these reserved names will cause the constructor to raise an error.
.. code-block:: python
try:
CodeGenEnvironment(loader=DictLoader({'test': template}), additional_globals={'ln': 'bad_ln'})
assert False
except RuntimeError:
pass
Other safe-guards include checks that Jinja built-ins aren't accidentally overridden...
.. code-block:: python
try:
CodeGenEnvironment(loader=DictLoader({'test': template}),
additional_filters={'indent': lambda x: x})
assert False
except RuntimeError:
pass
# You can allow overwrite of built-ins using the ``allow_filter_test_or_use_query_overwrite``
# argument.
e = CodeGenEnvironment(loader=DictLoader({'test': template}),
additional_filters={'indent': lambda x: x},
allow_filter_test_or_use_query_overwrite=True)
assert 'foo' == e.filters['indent']('foo')
...or that user-defined filters or redefined.
.. code-block:: python
class MyFilters:
@staticmethod
def filter_misnamed(name: str) -> str:
return name
e = CodeGenEnvironment(loader=DictLoader({'test': template}),
additional_filters={'filter_misnamed': lambda x: x})
try:
e.add_conventional_methods_to_environment(MyFilters())
assert False
except RuntimeError:
pass
.. note:: Maintainer's Note
This class should remain DSDL agnostic. It is, theoretically, applicable using Jinja with any compiler front-end
input although, in practice, it will only ever be used with pydsdl AST.
Pydsdl-specific logic should live in the CodeGenerator (:class:`nunavut.jinja.DSDLCodeGenerator`).
"""
RESERVED_GLOBAL_NAMESPACES = {"ln", "options", "uses_queries", "nunavut"}
RESERVED_GLOBAL_NAMES = {"now_utc"}
NUNAVUT_NAMESPACE_PREFIX = "nunavut.lang."
def __init__(
self,
loader: BaseLoader,
lctx: typing.Optional[nunavut.lang.LanguageContext] = None,
trim_blocks: bool = False,
lstrip_blocks: bool = False,
additional_filters: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_tests: typing.Optional[typing.Dict[str, typing.Callable]] = None,
additional_globals: typing.Optional[typing.Dict[str, typing.Any]] = None,
extensions: typing.List[Extension] = [jinja_do, loopcontrols, JinjaAssert, UseQuery],
allow_filter_test_or_use_query_overwrite: bool = False,
):
super().__init__(
loader=loader, # nosec
extensions=extensions,
autoescape=select_autoescape(
enabled_extensions=("htm", "html", "xml", "json"), default_for_string=False, default=False
),
undefined=StrictUndefined,
keep_trailing_newline=True,
lstrip_blocks=lstrip_blocks,
trim_blocks=trim_blocks,
auto_reload=False,
cache_size=400,
)
if additional_globals is not None:
for global_name, global_value in additional_globals.items():
if global_name in self.RESERVED_GLOBAL_NAMESPACES or global_name in self.RESERVED_GLOBAL_NAMES:
raise RuntimeError('Additional global "{}" uses a reserved global name'.format(global_name))
self.globals[global_name] = global_value
self._allow_replacements = allow_filter_test_or_use_query_overwrite
for global_namespace in self.RESERVED_GLOBAL_NAMESPACES:
self.globals[global_namespace] = LanguageTemplateNamespace()
self.globals["now_utc"] = datetime.datetime(datetime.MINYEAR, 1, 1)
self._target_language = None # type: typing.Optional[nunavut.lang.Language]
# --------------------------------------------------
# After this point we do that most heinous act so common in dynamic languages;
# we expose the state of this partially constructed object so we can complete
# configuring it.
if lctx is not None:
self._update_language_support(lctx)
supported_languages = (
lctx.get_supported_languages().values()
) # type: typing.Optional[typing.ValuesView[nunavut.lang.Language]]
else:
supported_languages = None
self._update_nunavut_globals(lctx)
self.add_conventional_methods_to_environment(self)
if additional_filters is not None:
self._add_each_to_environment(
additional_filters.items(), self.filters, supported_languages=supported_languages
)
if additional_tests is not None:
self._add_each_to_environment(additional_tests.items(), self.tests, supported_languages=supported_languages)
def add_conventional_methods_to_environment(self, obj: typing.Any) -> None:
for name, method in inspect.getmembers(obj, inspect.isroutine):
try:
self._add_conventional_method_to_environment(method, name, supported_languages=self.supported_languages)
except TypeError:
pass
@property
def supported_languages(self) -> typing.ValuesView[nunavut.lang.Language]:
ln_globals = self.globals["ln"] # type: LanguageTemplateNamespace
return ln_globals.values()
@property
def nunavut_global(self) -> LanguageTemplateNamespace:
return typing.cast(LanguageTemplateNamespace, self.globals["nunavut"])
@property
def target_language_uses_queries(self) -> LanguageTemplateNamespace:
return typing.cast(LanguageTemplateNamespace, self.globals["uses_queries"])
@property
def language_options(self) -> LanguageTemplateNamespace:
return typing.cast(LanguageTemplateNamespace, self.globals["options"])
@property
def language_support(self) -> LanguageTemplateNamespace:
return typing.cast(LanguageTemplateNamespace, self.globals["ln"])
@property
def target_language(self) -> typing.Optional[nunavut.lang.Language]:
return self._target_language
@property
def now_utc(self) -> datetime.datetime:
return typing.cast(datetime.datetime, self.globals["now_utc"])
@now_utc.setter
def now_utc(self, utc_time: datetime.datetime) -> None:
self.globals["now_utc"] = utc_time
def add_test(self, test_name: str, test_callable: typing.Callable) -> None:
self._add_to_environment(test_name, test_callable, self.tests)
# +----------------------------------------------------------------------------------------------------------------+
# | Private
# +----------------------------------------------------------------------------------------------------------------+
def _resolve_collection(
self,
conventional_method_prefix: typing.Optional[str],
method_name: str,
collection_maybe: typing.Optional[typing.Union[LanguageTemplateNamespace, typing.Dict[str, typing.Any]]],
) -> typing.Union[LanguageTemplateNamespace, typing.Dict[str, typing.Any]]:
if collection_maybe is not None:
return collection_maybe
if LanguageEnvironment.is_test_name(conventional_method_prefix):
return typing.cast(typing.Dict[str, typing.Any], self.tests)
elif LanguageEnvironment.is_filter_name(conventional_method_prefix):
return typing.cast(typing.Dict[str, typing.Any], self.filters)
elif LanguageEnvironment.is_uses_query_name(conventional_method_prefix):
uses_queries = self.globals["uses_queries"]
return typing.cast(LanguageTemplateNamespace, uses_queries)
else:
raise TypeError(
"Tried to add an item {} to the template environment but we don't know what the item is.".format(
method_name
)
)
def _add_to_environment(
self,
item_name: str,
item: typing.Any,
collection: typing.Union[LanguageTemplateNamespace, typing.Dict[str, typing.Any]],
) -> None:
if item_name in collection:
if not self._allow_replacements:
raise RuntimeError("{} was already defined.".format(item_name))
elif item_name in JINJA2_FILTERS:
logger.info("Replacing Jinja built-in {}".format(item_name))
else:
logger.info('Replacing "{}" which was already defined for this environment.'.format(item_name))
else:
logger.debug("Adding {} to environment".format(item_name))
if isinstance(collection, LanguageTemplateNamespace):
setattr(collection, item_name, item)
else:
collection[item_name] = item
def _add_conventional_method_to_environment(
self,
method: typing.Callable[..., bool],
method_name: str,
collection_maybe: typing.Optional[typing.Union[LanguageTemplateNamespace, typing.Dict[str, typing.Any]]] = None,
supported_languages: typing.Optional[typing.ValuesView[nunavut.lang.Language]] = None,
method_language: typing.Optional[nunavut.lang.Language] = None,
is_target: bool = False,
) -> None:
"""
:param str callable_name: The name of the callable to use in a template.
:param typing.Callable[..., bool] callable: The named callable.
:param typing.Optional[str] callable_namespace: If provided the namespace to prefix to the callable name.
:return: tuple of name and the callable which might be prepared as a partial function based on decorators.
:raises: RuntimeWarning if the callable requested resources that were not available in this environment.
.. invisible-code-block: python
from nunavut.jinja import CodeGenEnvironment
from nunavut.jinja.jinja2 import DictLoader
from nunavut.templates import template_language_test
from unittest.mock import MagicMock
lctx = MagicMock(spec=LanguageContext)
poop_lang = MagicMock(spec=Language)
poop_lang.name = 'poop'
poop_lang.get_templates_package_name = MagicMock(return_value='nunavut.lang.poop')
lctx.get_target_language = MagicMock(return_value=None)
lctx.get_supported_languages = MagicMock(return_value = {'poop': poop_lang})
@template_language_test('nunavut.lang.poop')
def test_test(language):
return True
e = CodeGenEnvironment(
loader=DictLoader({'test': 'hello world'}),
additional_tests={'foo': test_test},
lctx=lctx
)
assert test_test == e.tests['foo'].func
assert e.tests['foo']()
"""
result = LanguageEnvironment.handle_conventional_methods(method, method_name, supported_languages)
collection = self._resolve_collection(result[0], method_name, collection_maybe)
if method_language is not None:
self._add_to_environment("ln.{}.{}".format(method_language.name, result[1]), result[2], collection)
else:
self._add_to_environment(result[1], result[2], collection)
if is_target:
self._add_to_environment(result[1], result[2], collection)
def _add_each_to_environment(
self,
items: typing.AbstractSet[typing.Tuple[str, typing.Callable]],
collection: typing.Optional[
typing.Union[
LanguageTemplateNamespace,
typing.Dict[str, typing.Any],
]
] = None,
supported_languages: typing.Optional[typing.ValuesView[nunavut.lang.Language]] = None,
language: typing.Optional[nunavut.lang.Language] = None,
is_target: bool = False,
) -> None:
for method_name, method in items:
self._add_conventional_method_to_environment(
method, method_name, collection, supported_languages, language, is_target
)
@classmethod
def _create_platform_version(cls) -> typing.Dict[str, typing.Any]:
import platform
import sys
platform_version = {} # type: typing.Dict[str, typing.Any]
platform_version["python_implementation"] = platform.python_implementation()
platform_version["python_version"] = platform.python_version()
platform_version["python_release_level"] = sys.version_info[3]
platform_version["python_build"] = platform.python_build()
platform_version["python_compiler"] = platform.python_compiler()
platform_version["python_revision"] = platform.python_revision()
try:
platform_version["python_xoptions"] = sys._xoptions
except AttributeError: # pragma: no cover
platform_version["python_xoptions"] = {}
platform_version["runtime_platform"] = platform.platform()
return platform_version
def _add_support_from_language_module_to_environment(
self,
lctx: nunavut.lang.LanguageContext,
language: nunavut.lang.Language,
ln_module: "types.ModuleType",
is_target: bool = False,
) -> None:
supported_languages = lctx.get_supported_languages()
ln_env = LanguageEnvironment.find_all_conventional_methods_in_language_module(
language, supported_languages.values(), ln_module
)
self._add_each_to_environment(
ln_env.filters.items(), self.filters, supported_languages.values(), language=language, is_target=is_target
)
self._add_each_to_environment(
ln_env.tests.items(), self.tests, supported_languages.values(), language=language, | |
#!/usr/bin/env python
# Copyright (c) 2015:
# Istituto Nazionale di Fisica Nucleare (INFN), Italy
#
# See http://www.infn.it for details on the copyrigh holder
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import request
import logging.config
import socket
import uuid
from Crypto.Cipher import ARC4
from fgapiserver_config import FGApiServerConfig
from fgapiserver_db import get_db
import os
import sys
import time
import base64
import logging
"""
FutureGateway APIServer tools
"""
__author__ = '<NAME>'
__copyright__ = '2019'
__license__ = 'Apache'
__version__ = 'v0.0.10.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'devel'
__update__ = '2019-10-18 15:19:14'
# setup path
fgapirundir = os.path.dirname(os.path.abspath(__file__)) + '/'
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# fgapiserver configuration file
fgapiserver_config_file = fgapirundir + 'fgapiserver.conf'
# Load configuration
fg_config = FGApiServerConfig(fgapiserver_config_file)
# FutureGateway database object
fgapisrv_db = None
# Logging
logging.config.fileConfig(fg_config['fgapisrv_logcfg'])
#
# Tooling functions commonly used by fgapiserber_ source codes
#
def json_bool(bool_value):
"""
Accepts true/false values in different forms from json streams and
transform it in boolean value accordingly to the following table:
bool_Value = ["true"|"True"|"TRUE]" -> True/False otherwise
bool_Value = true/false -> True/False (bool)
bool_value = "1"/"0" -> True/False (str)
bool_value = 1/0 -> True/False (int)
"""
if type(bool_value) != bool:
bool_value = str(bool_value)
if bool_value.lower() == 'true' or bool_value == '1':
bool_value = True
else:
bool_value = False
return bool_value
def get_fgapiserver_db():
"""
Retrieve the fgAPIServer database object instance
:return: Return the fgAPIServer database object or None if the
database connection fails
"""
db, message = get_db(
db_host=fg_config['fgapisrv_db_host'],
db_port=fg_config['fgapisrv_db_port'],
db_user=fg_config['fgapisrv_db_user'],
db_pass=fg_config['fgapisrv_db_pass'],
db_name=fg_config['fgapisrv_db_name'],
iosandbbox_dir=fg_config['fgapisrv_iosandbox'],
fgapiserverappid=fg_config['fgapisrv_geappid'])
if db is None:
logging.error(message)
return db
def check_api_ver(apiver):
"""
Check the API version
Future versions of this function can be used to route different versions
:return: A list containing three values:
- A true boolean value if the version matches
- 404 error code in case versions are not matching
- The error message in case versions are not matching
"""
if apiver == fg_config['fgapiver']:
ret_value = (True, 200, 'Supported API version %s' % apiver)
else:
ret_value = (False, 404, "Unsupported API version %s" % apiver)
return ret_value
def check_db_ver():
"""
Database version check
:return: This function will check the database connectivity, set the
fgapisrv_db global variable and terminate with error if the
database schema version is not aligned with the version
required by the code; see fgapisrv_dbver in configuration file
"""
global fgapisrv_db
fgapisrv_db = get_fgapiserver_db()
if fgapisrv_db is None:
msg = "Unable to connect to the database!"
logging.error(msg)
print(msg)
sys.exit(1)
else:
# getDBVersion
db_ver = fgapisrv_db.get_db_version()
if fg_config['fgapisrv_dbver'] is None or \
fg_config['fgapisrv_dbver'] == '' or \
fg_config['fgapisrv_dbver'] != db_ver:
msg = ("Current database version '%s' is not compatible "
"with this version of the API server front-end; "
"version %s is required.\n"
"It is suggested to update your database applying "
"new available patches."
% (db_ver, fg_config['fgapisrv_dbver']))
logging.error(msg)
sys.exit(1)
logging.debug("Check database version passed")
return db_ver
def srv_uuid():
"""
Service UUID
:return: This function returns the service UUID calculated
using the server' hostname
"""
# UUID from hostname/IP
return str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname()))
def paginate_response(response, page, per_page, page_url):
"""
Paginate the incoming response json vector, accordinlgly to page and
per_page values
:param response: The whole response text
:param page: The selected page number
:param per_page: How many response record per page
:param page_url: The url to get this page
:return: The number of specified response records of the selected page
"""
links = []
if page is not None and per_page is not None:
pg = int(page)
if pg > 0:
pg -= 1
ppg = int(per_page)
if pg > len(response) / ppg:
pg = len(response) / ppg
max_pages = len(response) / ppg + (1 * len(response) % ppg)
record_from = pg * ppg
record_to = record_from + ppg
paginated_response = response[record_from:record_to]
for link_page in range(0, max_pages):
if link_page == pg:
rel = "self"
elif link_page < pg:
rel = "prev"
else:
rel = "next"
if "?" in page_url:
filter_char = "&"
else:
filter_char = "?"
href = "%s%spage=%s&per_page=%s" % (page_url,
filter_char,
link_page + 1,
ppg)
links += [{"rel": rel,
"href": href}, ]
else:
paginated_response = response
links += [{"rel": "self",
"href": page_url}, ]
return paginated_response, links
def get_task_app_id(taskid):
"""
Return the application id associated to the given task_id
:param taskid: Task id
:return: The associated application id associated to the given task id
"""
task_info = fgapisrv_db.get_task_info(taskid)
app_record = task_info.get('application', None)
if app_record is not None:
logging.debug("Found app_id: '%s' for task_id: '%s'"
% (app_record['id'], taskid))
return app_record['id']
logging.warn("Could not find app_id for task_id: '%s'" % taskid)
return None
def process_log_token(logtoken):
"""
processLogToken retrieve username and password from a given login token
(!)Override this method to manage more complex and secure algorithms;
tester code uses the following encrypted string to store user
credentials:
username=<username>:password=<password>:timestamp=<timestamp>
To create such log tokens, please use the following python snippet:
from Crypto.Cipher import ARC4
import time
import base64
secret = "<KEY>" # (!) Please use fgapiserver_secret value
username = "<username>"
password = "<password>"
# Encode
obj=ARC4.new(secret)
b64em = base64.b64encode(obj.encrypt("username=%s:password=%s:timestamp=%s"
% (username,password,int(time.time()))))
print(b64em)
# Decode
obj=ARC4.new(secret)
creds = obj.decrypt(base64.b64decode(b64em))
print(creds)
:param logtoken: The encripted string containing the:
username=<username>:password=<password>:timestamp=<timestamp>
The key is encripted using a key, see fgapisrv_secret value
in configuration file
Username and Passord credentials are stored inside in the
APIServer users table
:return: Unencripted triple: (username, password, timestamp)
"""
username = ""
password = ""
timestamp = 0
obj = ARC4.new(fg_config['fgapisrv_secret'])
creds = obj.decrypt(base64.b64decode(logtoken))
credfields = creds.split(":")
if len(credfields) > 0:
username = credfields[0].split("=")[1]
password = credfields[1].split("=")[1]
timestamp = credfields[2].split("=")[1]
logging.debug("Logtoken: '%s'\n"
" User: '%s'\n"
" Password: '%s'\n"
" Timestamp: '%s'" % (logtoken,
username,
password,
timestamp))
return username, password, timestamp
def create_session_token(**kwargs):
"""
This function accepts login tokens or directly username/password
credentials returning an access token
:param kwargs: logtoken - A token containing encrypted credentials
plus a timestamp
username,password - Credentials of APIServer users
:return: An access token to be used by any further transaction with
the APIServer front-end
"""
global fgapisrv_db
timestamp = int(time.time())
user = kwargs.get("user", "")
logtoken = kwargs.get("logtoken", "")
username = kwargs.get("username", "")
password = kwargs.get("password", "")
delegated_token = ''
if len(logtoken) > 0:
# Calculate credentials starting from a logtoken
username, password, timestamp = process_log_token(logtoken)
if len(username) > 0 and len(password) > 0:
# Create a new access token starting from given username and password
# (DBRequired)
sestoken = fgapisrv_db.create_session_token(username,
password,
timestamp)
else:
# Nor logtoken or (username/password) provided
return '', ''
# Log token info
logging.debug("Session token is:\n"
"logtoken: '%s'\n"
"username: '%s'\n"
"password: '%s'\n"
"timestamp: '%s'\n" % (sestoken,
logtoken,
username,
password))
# Verify is delegated user is provided
if len(sestoken) > 0 and len(user) > 0:
# A different user has been specified
# First get user info from token
user_token = fgapisrv_db.user_token(sestoken)
# Verify the user has the user_impersonate right
if user_token['name'] != user and\
fgapisrv_db.verify_user_role(user_token['id'], 'user_impersonate'):
delegated_token = fgapisrv_db.create_delegated_token(sestoken,
user)
logging.debug(
"Delegated token is: '%s' for user: '%s'" %
(delegated_token, user))
return sestoken, delegated_token
#
# header_links; take care of _links fields and Location specified
# inside the passed json dictionary content
#
def header_links(req, resp, json_dict):
if '_links' in json_dict:
for link in json_dict['_links']:
resp.headers.add('Link', ('%s; '
'rel="%s", <%s>; '
% (req.url,
link['rel'],
link['href'])))
resp.headers.add('Location', req.url)
#
# Not allowed method common answer
#
def not_allowed_method():
return 400,\
{"message": "Method '%s' is not allowed for this endpoint"
% request.method}
#
# Envconfig DB config and registry functions
#
def check_db_reg(config):
"""
Running server registration check
:return: This fucntion checks if this running server has been registered
into the database. If the registration is not yet done, the
registration will be performed and the current configuration
registered. If the server has been registered return the
configuration saved from the registration.
"""
# Retrieve the service UUID
fgapisrv_uuid = srv_uuid()
if not fgapisrv_db.is_srv_reg(fgapisrv_uuid):
# The service is not registered
# Register the service | |
from .core import KeyStore, KeyStoreError, PinError
from platform import CriticalErrorWipeImmediately
import platform
from rng import get_random_bytes
import hashlib
import hmac
from bitcoin import ec, bip39, bip32
from bitcoin.transaction import SIGHASH
from helpers import aead_encrypt, aead_decrypt, tagged_hash
import secp256k1
from gui.screens import Alert, PinScreen, MnemonicScreen, Prompt
class RAMKeyStore(KeyStore):
"""
KeyStore that doesn't store your keys.
Don't use directly. It's a parent class for inheritance.
For PIN verifiction implement
_set_pin, _unlock, _change_pin and other pin-related methods.
"""
# Button to go to storage menu
# Menu should be implemented in async storage_menu function
# Here we only have a single option - to show mnemonic
storage_button = "Show mnemonic"
def __init__(self):
# bip39 mnemonic
self.mnemonic = None
# root xprv (derived from mnemonic, password)
self.root = None
# root fingerprint
self.fingerprint = None
# private key at path m/0x1D'
# used to encrypt & authenticate data
# specific to this root key
self.idkey = None
# unique secret for a device
# used to show anti-phishing words
self.secret = None
# encryption secret for untrusted data storage
# stored encrypted with PIN secret
# if PIN changed we only need to re-encrypt
# this secret, all the data remains the same
self.enc_secret = None
self.initialized = False
# show function for menus and stuff
self.show = None
def set_mnemonic(self, mnemonic=None, password=""):
if mnemonic == self.mnemonic and password != "":
# probably checking mnemonic after saving
self.show_loader()
else:
self.show_loader(title="Generating keys...")
"""Load mnemonic and password and create root key"""
if mnemonic is not None:
self.mnemonic = mnemonic.strip()
if not bip39.mnemonic_is_valid(self.mnemonic):
raise KeyStoreError("Invalid mnemonic")
seed = bip39.mnemonic_to_seed(self.mnemonic, password)
self.root = bip32.HDKey.from_seed(seed)
self.fingerprint = self.root.child(0).fingerprint
# id key to sign and encrypt wallet files
# stored on untrusted external chip
self.idkey = self.root.child(0x1D, hardened=True).key.serialize()
def sign_psbt(self, psbt, sighash=SIGHASH.ALL):
psbt.sign_with(self.root, sighash)
def sign_hash(self, derivation, msghash: bytes):
return self.root.derive(derivation).key.sign(msghash)
def sign_recoverable(self, derivation, msghash: bytes):
"""Returns a signature and a recovery flag"""
prv = self.root.derive(derivation).key
sig = secp256k1.ecdsa_sign_recoverable(msghash, prv._secret)
flag = sig[64]
return ec.Signature(sig[:64]), flag
def save_aead(self, path, adata=b"", plaintext=b"", key=None):
"""Encrypts and saves plaintext and associated data to file"""
if key is None:
key = self.idkey
if key is None:
raise KeyStoreError("Pass the key please")
d = aead_encrypt(key, adata, plaintext)
with open(path, "wb") as f:
f.write(d)
platform.sync()
def load_aead(self, path, key=None):
"""
Loads data saved with save_aead,
returns a tuple (associated data, plaintext)
"""
if key is None:
key = self.idkey
if key is None:
raise KeyStoreError("Pass the key please")
with open(path, "rb") as f:
data = f.read()
return aead_decrypt(data, key)
def get_xpub(self, path):
if self.is_locked or self.root is None:
raise KeyStoreError("Keystore is not ready")
return self.root.derive(path).to_public()
def owns(self, key):
if key.fingerprint is not None and key.fingerprint != self.fingerprint:
return False
if key.derivation is None:
return key.key == self.root.to_public()
return key.key == self.root.derive(key.derivation).to_public()
def wipe(self, path):
"""Delete everything in path"""
platform.delete_recursively(path)
def load_secret(self, path):
"""Try to load a secret from file,
create new if doesn't exist"""
try:
# try to load secret
with open(path + "/secret", "rb") as f:
self.secret = f.read()
except:
self.secret = self.create_new_secret(path)
@property
def settings_key(self):
return tagged_hash("settings key", self.secret)
def create_new_secret(self, path):
"""Generate new secret and default PIN config"""
# generate new and save
secret = get_random_bytes(32)
# save secret
with open(path + "/secret", "wb") as f:
f.write(secret)
self.secret = secret
return secret
def get_auth_word(self, pin_part):
"""
Get anti-phishing word to check internal secret
from part of the PIN so user can stop when he sees wrong words
"""
key = tagged_hash("auth", self.secret)
h = hmac.new(key, pin_part, digestmod="sha256").digest()
# wordlist is 2048 long (11 bits) so
# this modulo doesn't create an offset
word_number = int.from_bytes(h[:2], "big") % len(bip39.WORDLIST)
return bip39.WORDLIST[word_number]
def app_secret(self, app):
return tagged_hash(app, self.secret)
@property
def is_ready(self):
return (
(not self.is_locked)
and (self.fingerprint is not None)
)
@property
def is_locked(self):
"""
Override this method!!!
with your locking check function
"""
# hack: we don't support PIN but
# we need enc_secret, so let's do it here.
# DONT USE THIS IF YOU HAVE PIN SUPPORT!
if self.enc_secret is None:
self.enc_secret = tagged_hash("enc", self.secret)
return False
@property
def is_key_saved(self):
"""
Override this method
to detect if the key is stored
"""
return False
@property
def pin_attempts_left(self):
"""
Override this property
with a function to get number of attempts left.
"""
return self.pin_attempts_max
@property
def pin_attempts_max(self):
"""
Override this property
with a function to get max number of attempts.
"""
return 10
@property
def is_pin_set(self):
"""
Override this property
with a function to get PIN state.
"""
return True
def lock(self):
"""Locks the keystore"""
pass
def _unlock(self, pin):
"""
Implement this.
Unlock the keystore, raises PinError if PIN is invalid.
Raises CriticalErrorWipeImmediately if no attempts left.
"""
# check we have attempts
if self.pin_attempts_left <= 0:
# wipe is happening automatically on this error
raise CriticalErrorWipeImmediately("No more PIN attempts!\nWipe!")
# check PIN code somehow, raise PinError if it's incorrect
# for reference - first decrease PIN counter, then check PIN
# raise PIN Error if it's invalid like this:
# if pin == "INVALID PIN":
# raise PinError("Invalid PIN!\n%d of %d attempts left..." % (
# self._pin_attempts_left, self._pin_attempts_max)
# )
# reset PIN counter here and unlock
# set encryption secret somehow mb save it
# don't use this approach, it's just for reference
self.enc_secret = tagged_hash("enc", self.secret)
def _change_pin(self, old_pin, new_pin):
"""Implement PIN change function"""
self._unlock(old_pin)
def _set_pin(self, pin):
"""Implement PIN set function"""
self._unlock(pin)
async def init(self, show_fn, show_loader):
"""
Waits for keystore media
and loads internal secret and PIN state
"""
self.show_loader = show_loader
self.show = show_fn
platform.maybe_mkdir(self.path)
self.load_secret(self.path)
# check if init is called for the first time
# and we have less than max PIN attempts
if not self.initialized and self.pin_attempts_left != self.pin_attempts_max:
scr = Alert(
"Warning!",
"You only have %d of %d attempts\n"
"to enter correct PIN code!"
% (self.pin_attempts_left, self.pin_attempts_max),
button_text="OK",
)
await self.show(scr)
self.initialized = True
async def unlock(self):
# pin is not set - choose one
if not self.is_pin_set:
pin = await self.setup_pin()
self.show_loader("Setting up PIN code...")
self._set_pin(pin)
# if keystore is locked - ask for PIN code
while self.is_locked:
pin = await self.get_pin()
self.show_loader("Verifying PIN code...")
self._unlock(pin)
async def get_pin(self, title="Enter your PIN code", with_cancel=False):
"""
Async version of the PIN screen.
Waits for an event that is set in the callback.
"""
scr = PinScreen(
title=title,
note="Do you recognize these words?",
get_word=self.get_auth_word,
subtitle=self.pin_subtitle,
with_cancel=with_cancel
)
return await self.show(scr)
@property
def pin_subtitle(self):
return "using #%s %s #" % (type(self).COLOR, type(self).NAME.lower())
async def setup_pin(self, get_word=None):
"""
PIN setup screen - first choose, then confirm
If PIN codes are the same -> return the PIN
If not -> try again
"""
scr = PinScreen(
title="Choose your PIN code",
note="Remember these words," "they will stay the same on this device.",
get_word=self.get_auth_word,
subtitle=self.pin_subtitle,
)
pin1 = await self.show(scr)
scr = PinScreen(
title="Confirm your PIN code",
note="Remember these words," "they will stay the same on this device.",
get_word=self.get_auth_word,
subtitle=self.pin_subtitle,
)
pin2 = await self.show(scr)
# check if PIN is the same
if pin1 == pin2:
return pin1
# if not - show an error
await self.show(Alert("Error!", "PIN codes are different!"))
return await self.setup_pin(get_word)
async def change_pin(self):
# get_auth_word function can generate words from part of the PIN
old_pin = await self.get_pin(title="First enter your old PIN code", with_cancel=True)
if old_pin is None:
return
# check pin - will raise if not valid
self.show_loader("Verifying PIN code...")
self._unlock(old_pin)
new_pin = await self.setup_pin()
self.show_loader("Setting new PIN code...")
self._change_pin(old_pin, new_pin)
await self.show(
Alert("Success!", "PIN code is successfully changed!", button_text="OK")
)
async def show_mnemonic(self):
if await self.show(Prompt("Warning",
"You need to confirm your PIN code "
"to display your recovery phrase.\n\n"
"Continue?")):
self.lock()
await self.unlock()
await self.show(MnemonicScreen(self.mnemonic))
async def storage_menu(self):
"""Manage storage and display of the recovery phrase"""
# This class can only show mnemonic, can't save
await self.show_mnemonic()
"""
# Example:
buttons = [
# id, text
(None, "Key management"),
(0, "Save key to flash"),
(1, | |
<filename>data/external/repositories_2to3/154929/march-ml-mania-2015-master/src/hardcoded.py
# this was produced by calling kaggle_to_collected(2011) and then manually fixing it by looking at
# http://en.wikipedia.org/wiki/List_of_colloquial_names_for_universities_and_colleges_in_the_United_States
# For schools for which I didn't find the matching the value is None
kaggle_to_collected = {
'Abilene Chr': 'Abilene Christian Wildcats',
'Air Force': 'Air Force Falcons',
'Akron': 'Akron Zips',
'Alabama': 'Alabama Crimson Tide',
'Alabama A&M': 'Alabama A&M Bulldogs',
'Alabama St': 'Alabama State Hornets',
'Albany NY': 'Albany (NY) Great Danes',
'Alcorn St': 'Alcorn State Braves',
'Alliant Intl': None, #u'Alliant International Gulls',
'American Univ': 'American Eagles',
'Appalachian St': 'Appalachian State Mountaineers',
'Arizona': 'Arizona Wildcats',
'Arizona St': 'Arizona State Sun Devils',
'Ark Little Rock': 'Arkansas-Little Rock Trojans',
'Ark Pine Bluff': 'Arkansas-Pine Bluff Golden Lions',
'Arkansas': 'Arkansas Razorbacks',
'Arkansas St': 'Arkansas State Red Wolves',
'UT Arlington': 'Texas-Arlington Mavericks',
'Armstrong St': None, #u'Armstrong Pirates',
'Army': 'Army Black Knights',
'Auburn': 'Auburn Tigers',
'Augusta': None, #u'Augusta State Jaguars',
'Austin Peay': 'Austin Peay Governors',
'BYU': 'Brigham Young Cougars',
'Ball St': 'Ball State Cardinals',
'Baylor': 'Baylor Bears',
'Belmont': 'Belmont Bruins',
'Bethune-Cookman': 'Bethune-Cookman Wildcats',
'Binghamton': 'Binghamton Bearcats',
'Birmingham So': None,
'Boise St': 'Boise State Broncos',
'Boston College': 'Boston College Eagles',
'Boston Univ': 'Boston University Terriers',
'Bowling Green': 'Bowling Green State Falcons',
'Bradley': 'Bradley Braves',
'Brooklyn': None, #u'Brooklyn Bulldogs',
'Brown': 'Brown Bears',
'Bryant': 'Bryant Bulldogs',
'Bucknell': 'Bucknell Bison',
'Buffalo': 'Buffalo Bulls',
'Butler': 'Butler Bulldogs',
'C Michigan': 'Central Michigan Chippewas',
'CS Bakersfield': 'Cal State Bakersfield Roadrunners',
'CS Fullerton': 'Cal State Fullerton Titans',
'CS Northridge': 'Cal State Northridge Matadors',
'CS Sacramento': 'Sacramento State Hornets',
'Cal Poly SLO': 'Cal Poly Mustangs',
'California': 'University of California Golden Bears',
'Campbell': 'Campbell Fighting Camels',
'Canisius': 'Canisius Golden Griffins',
'Cent Arkansas': 'Central Arkansas Bears',
'Centenary': 'Centenary (LA) Gents',
'Central Conn': 'Central Connecticut State Blue Devils',
'Charleston So': 'Charleston Southern Buccaneers',
'Charlotte': 'Charlotte 49ers',
'Chattanooga': 'Chattanooga Mocs',
'Chicago St': 'Chicago State Cougars',
'Cincinnati': 'Cincinnati Bearcats',
'Citadel': 'Citadel Bulldogs',
'Clemson': '<NAME>',
'Cleveland St': 'Cleveland State Vikings',
'Coastal Car': 'Coastal Carolina Chanticleers',
'Col Charleston': 'College of Charleston Cougars',
'Colgate': 'Colgate Raiders',
'Colorado': 'Colorado Buffaloes',
'Colorado St': 'Colorado State Rams',
'Columbia': 'Columbia Lions',
'Connecticut': 'Connecticut Huskies',
'Coppin St': 'Coppin State Eagles',
'Cornell': 'Cornell Big Red',
'Creighton': 'Creighton Bluejays',
'Dartmouth': 'Dartmouth Big Green',
'Davidson': '<NAME>',
'Dayton': 'Dayton Flyers',
'DePaul': 'DePaul Blue Demons',
'Delaware': "Delaware Fightin' Blue Hens",
'Delaware St': 'Delaware State Hornets',
'Denver': 'Denver Pioneers',
'Detroit': 'Detroit Mercy Titans',
'Drake': 'Drake Bulldogs',
'Drexel': 'Drexel Dragons',
'Duke': 'Duke Blue Devils',
'Duquesne': 'Duques<NAME>',
'E Illinois': 'Eastern Illinois Panthers',
'E Kentucky': 'Eastern Kentucky Colonels',
'E Michigan': 'Eastern Michigan Eagles',
'E Washington': 'Eastern Washington Eagles',
'ETSU': 'East Tennessee State Buccaneers',
'East Carolina': 'East Carolina Pirates',
'Edwardsville': 'Southern Illinois-Edwardsville Cougars',
'Elon': 'Elon Phoenix',
'Evansville': 'Evansville Purple Aces',
'F Dickinson': 'Fairleigh Dickinson Knights',
'FL Atlantic': 'Florida Atlantic Owls',
'FL Gulf Coast': 'Florida Gulf Coast Eagles',
'Fairfield': 'Fairfield Stags',
'Florida': 'Florida Gators',
'Florida A&M': 'Florida A&M Rattlers',
'Florida Intl': 'Florida International Panthers',
'Florida St': 'Florida State Seminoles',
'Fordham': 'Fordham Rams',
'Fresno St': 'Fresno State Bulldogs',
'Furman': 'Furman Paladins',
'G Washington': 'George Washington Colonials',
'Ga Southern': 'Georgia Southern Eagles',
'Gardner Webb': "Gardner-Webb Runnin' Bulldogs",
'George Mason': 'George Mason Patriots',
'Georgetown': 'Georgetown Hoyas',
'Georgia': 'Georgia Bulldogs',
'Georgia St': 'Georgia State Panthers',
'Georgia Tech': 'Georgia Tech Yellow Jackets',
'Gonzaga': 'Gonzaga Bulldogs',
'Grambling': 'Grambling Tigers',
'Grand Canyon': 'Grand Canyon Antelopes',
'WI Green Bay': 'Green Bay Phoenix',
'Hampton': 'Hampton Pirates',
'Hardin-Simmons': None,
'Hartford': 'Hartford Hawks',
'Harvard': 'Harvard Crimson',
'Hawaii': 'Hawaii Warriors',
'High Point': 'High Point Panthers',
'Hofstra': 'Hofstra Pride',
'Holy Cross': 'Holy Cross Crusaders',
'Houston': 'Houston Cougars',
'Houston Bap': 'Houston Baptist Huskies',
'Howard': 'Howard Bison',
'IL Chicago': 'Illinois-Chicago Flames',
'IPFW': 'IPFW Mastodons',
'IUPUI': 'IUPUI Jaguars',
'Idaho': 'Idaho Vandals',
'Idaho St': 'Idaho State Bengals',
'Illinois': 'Illinois Fighting Illini',
'Illinois St': 'Illinois State Redbirds',
'Incarnate Word': 'Incarnate Word Cardinals',
'Indiana': 'Indiana Hoosiers',
'Indiana St': 'Indiana State Sycamores',
'Iona': 'Iona Gaels',
'Iowa': 'Iowa Hawkeyes',
'Iowa St': 'Iowa State Cyclones',
'Jackson St': 'Jackson State Tigers',
'Jacksonville': '<NAME>',
'Jacksonville St': 'Jacksonville State Gamecocks',
'<NAME>': '<NAME>',
'Kansas': 'Kansas Jayhawks',
'Kansas St': 'Kansas State Wildcats',
'Kennesaw': 'Kennesaw State Owls',
'Kent': 'Kent State Golden Flashes',
'Kentucky': 'Kentucky Wildcats',
'LSU': 'Louisiana State Fighting Tigers',
'La Salle': 'La Salle Explorers',
'Lafayette': 'Lafayette Leopards',
'Lamar': 'Lamar Cardinals',
'Lehigh': 'Lehigh Mountain Hawks',
'Liberty': 'Liberty Flames',
'Lipscomb': 'Lipscomb Bisons',
'Long Beach St': 'Long Beach State 49ers',
'Long Island': 'Long Island University Blackbirds',
'Longwood': 'Longwood Lancers',
'Louisiana Tech': 'Louisiana Tech Bulldogs',
'Louisville': 'Louisville Cardinals',
'Loy Marymount': 'Loyola Marymount Lions',
'Loyola MD': 'Loyola (MD) Greyhounds',
'Loyola-Chicago': 'Loyola (IL) Ramblers',
'MA Lowell': 'Massachusetts-Lowell River Hawks',
'MD E Shore': 'Maryland-Eastern Shore Hawks',
'MS Valley St': 'Mississippi Valley State Delta Devils',
'MTSU': 'Middle Tennessee Blue Raiders',
'Maine': 'Maine Black Bears',
'Manhattan': 'Manhattan Jaspers',
'Marist': 'Marist Red Foxes',
'Marquette': 'Marquette Golden Eagles',
'Marshall': 'Marshall Thundering Herd',
'Maryland': 'Maryland Terrapins',
'Massachusetts': 'Massachusetts Minutemen',
'McNeese St': 'McNeese State Cowboys',
'Memphis': 'Memphis Tigers',
'Mercer': 'Mercer Bears',
'Miami FL': 'Miami (FL) Hurricanes',
'Miami OH': 'Miami (OH) RedHawks',
'Michigan': 'Michigan Wolverines',
'Michigan St': 'Michigan State Spartans',
'WI Milwaukee': 'Milwaukee Panthers',
'Minnesota': 'Minnesota Golden Gophers',
'Mississippi': 'Mississippi Rebels',
'Mississippi St': 'Mississippi State Bulldogs',
'Missouri': 'Missouri Tigers',
'Missouri KC': 'Missouri-Kansas City Kangaroos',
'Missouri St': 'Missouri State Bears',
'Monmouth NJ': 'Monmouth Hawks',
'Montana': 'Montana Grizzlies',
'Montana St': 'Montana State Bobcats',
'Morehead St': 'Morehead State Eagles',
'Morgan St': 'Morgan State Bears',
'Morris Brown': None, #u'Morris Brown Wolverines',
"Mt St Mary's": "Mount St. Mary's Mountaineers",
'Murray St': 'Murray State Racers',
'N Colorado': 'Northern Colorado Bears',
'N Dakota St': 'North Dakota State Bison',
'N Illinois': 'Northern Illinois Huskies',
'N Kentucky': 'Northern Kentucky Norse',
'NC A&T': 'North Carolina A&T Aggies',
'NC Central': 'North Carolina Central Eagles',
'NC State': 'North Carolina State Wolfpack',
'NE Illinois': None, #u'Northeastern Illinois Golden Eagles',
'NE Omaha': 'Nebraska-Omaha Mavericks',
'NJIT': 'NJIT Highlanders',
'Navy': 'Navy Midshipmen',
'Nebraska': 'Nebraska Cornhuskers',
'Nevada': 'Nevada Wolf Pack',
'New Hampshire': 'New Hampshire Wildcats',
'New Mexico': 'New Mexico Lobos',
'New Mexico St': 'New Mexico State Aggies',
'New Orleans': 'New Orleans Privateers',
'Niagara': 'Niagara Purple Eagles',
'Nicholls St': 'Nicholls State Colonels',
'Norfolk St': 'Norfolk State Spartans',
'North Carolina': 'North Carolina Tar Heels',
'North Dakota': 'North Dakota UND',
'North Florida': 'North Florida Ospreys',
'North Texas': 'North Texas Mean Green',
'Northeastern': 'Northeastern Huskies',
'Northern Arizona': 'Northern Arizona Lumberjacks',
'Northern Iowa': 'Northern Iowa Panthers',
'Northwestern': 'Northwestern Wildcats',
'Northwestern LA': 'Northwestern State Demons',
'Notre Dame': 'Notre Dame Fighting Irish',
'Oakland': 'Oakland Golden Grizzlies',
'Ohio': 'Ohio Bobcats',
'Ohio St': 'Ohio State Buckeyes',
'Okla City': None, #u'Oklahoma City Chiefs',
'Oklahoma': 'Oklahoma Sooners',
'Oklahoma St': 'Oklahoma State Cowboys',
'Old Dominion': 'Old Dominion Monarchs',
'Oral Roberts': 'Oral Roberts Golden Eagles',
'Oregon': 'Oregon Ducks',
'Oregon St': 'Oregon State Beavers',
'Pacific': 'Pacific Tigers',
'Penn': 'Pennsylvania Quakers',
'Penn St': 'Penn State Nittany Lions',
'Pepperdine': 'Pepperdine Waves',
'Pittsburgh': 'Pittsburgh Panthers',
'Portland': 'Portland Pilots',
'Portland St': 'Portland State Vikings',
'Prairie View': 'Prairie View Panthers',
'Presbyterian': 'Presbyterian Blue Hose',
'Princeton': 'Princeton Tigers',
'Providence': 'Providence Friars',
'Purdue': 'Purdue Boilermakers',
'Quinnipiac': 'Quinnipiac Bobcats',
'Radford': 'Radford Highlanders',
'Rhode Island': 'Rhode Island Rams',
'Rice': 'Rice Owls',
'Richmond': 'Richmond Spiders',
'Rider': 'Rider Broncs',
'<NAME>': '<NAME>',
'Rutgers': 'Rutgers Scarlet Knights',
'S Carolina St': 'South Carolina State Bulldogs',
'S Dakota St': 'South Dakota State Jackrabbits',
'S Illinois': 'Southern Illinois Salukis',
'SC Upstate': 'South Carolina Upstate Spartans',
'SE Louisiana': 'Southeastern Louisiana Lions',
'SE Missouri St': 'Southeast Missouri State Redhawks',
'SF Austin': '<NAME>',
'SMU': 'Southern Methodist Mustangs',
'Sacred Heart': 'Sacred Heart Pioneers',
'Sam Houston St': 'Sam Houston State Bearkats',
'Samford': '<NAME>',
'UT San Antonio': 'Texas-San Antonio Roadrunners',
'San Diego': 'San Diego Toreros',
'San Diego St': 'San Diego State Aztecs',
'San Francisco': 'San Francisco Dons',
'San Jose St': 'San Jose State Spartans',
'Santa Barbara': 'UC-Santa Barbara Gauchos',
'Santa Clara': 'Santa Clara Broncos',
'Savannah St': 'Savannah State Tigers',
'Seattle': 'Seattle Redhawks',
'Seton Hall': 'Seton Hall Pirates',
'Siena': 'Siena Saints',
'South Alabama': 'South Alabama Jaguars',
'South Carolina': 'South Carolina Gamecocks',
'South Dakota': 'South Dakota Coyotes',
'South Florida': 'South Florida Bulls',
'Southern Miss': 'Southern Mississippi Golden Eagles',
'Southern Univ': 'Southern Jaguars',
'Southern Utah': 'Southern Utah Thunderbirds',
'St Bonaventure': 'St. Bonaventure Bonnies',
'St | |
# Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
"""
✘ Commands Available -
• `{i}gban <reply user/ username>`
• `{i}ungban`
Ban/Unban Globally.
• `{i}gstat <reply to user/userid/username>`
Check if user is GBanned.
• `{i}listgban`
List all GBanned users.
• `{i}gmute <reply user/ username>`
• `{i}ungmute`
Mute/UnMute Globally.
• `{i}gkick <reply user/ username>`
Globally Kick User.
• `{i}gcast <Message>`
Globally Send that msg in all grps.
• `{i}gucast <Message>`
Globally Send that msg in all Ur Chat Users.
•`{i}gpromote <reply to user> <channel/group/all> <rank>`
globally promote user where you are admin.
You can also set where To promote only groups or only channels or all.
Like. `gpromote group boss` ~ it promote repied user in all groups.
Or. `gpromote @username all sar` ~ it promote the users in all group and channel.
•`{i}gdemote`
Same function as gpromote.
"""
import os
from telethon import events
from telethon.tl.functions.channels import EditAdminRequest
from telethon.tl.types import ChatAdminRights
from . import *
@ultroid_cmd(
pattern="gpromote ?(.*)",
)
async def _(e):
if not e.out and not is_fullsudo(e.sender_id):
return await eod(e, "`This Command Is Sudo Restricted.`")
x = e.pattern_match.group(1)
if not x:
return await eod(e, "`Incorrect Format`")
user = await e.get_reply_message()
if user:
ev = await eor(e, "`Promoting Replied User Globally`")
ok = e.text.split()
key = "all"
if len(ok) > 1:
if ("group" in ok[1]) or ("channel" in ok[1]):
key = ok[1]
rank = "AdMin"
if len(ok) > 2:
rank = ok[2]
c = 0
if e.is_private:
user.id = user.peer_id.user_id
else:
user.id = user.from_id.user_id
async for x in ultroid_bot.iter_dialogs():
if "group" in key.lower():
if x.is_group:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except BaseException:
pass
elif "channel" in key.lower():
if x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except BaseException:
pass
else:
if x.is_group or x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except Exception as er:
LOGS.info(er)
return await eor(ev, f"Promoted The Replied Users in Total : {c} {key} chats")
else:
k = e.text.split()
if not k[1]:
return await eod(e, "`Give someone's username/id or replied to user.")
user = k[1]
if user.isdigit():
user = int(user)
try:
name = await ultroid_bot.get_entity(user)
except BaseException:
return await eod(e, f"`No User Found Regarding {user}`")
ev = await eor(e, f"`Promoting {name.first_name} globally.`")
key = "all"
if len(k) > 2:
if ("group" in k[2]) or ("channel" in k[2]):
key = k[2]
rank = "AdMin"
if len(k) > 3:
rank = k[3]
c = 0
async for x in ultroid_bot.iter_dialogs():
if "group" in key.lower():
if x.is_group:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except BaseException:
pass
elif "channel" in key.lower():
if x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except BaseException:
pass
else:
if x.is_group or x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=True,
change_info=False,
ban_users=True,
delete_messages=True,
pin_messages=True,
),
rank,
),
)
c += 1
except BaseException:
pass
return await eor(ev, f"Promoted {name.first_name} in Total : {c} {key} chats.")
@ultroid_cmd(
pattern="gdemote ?(.*)",
)
async def _(e):
if not e.out and not is_fullsudo(e.sender_id):
return await eod(e, "`This Command Is Sudo Restricted.`")
x = e.pattern_match.group(1)
if not x:
return await eod(e, "`Incorrect Format`")
user = await e.get_reply_message()
if user:
if e.is_private:
user.id = user.peer_id.user_id
else:
user.id = user.from_id.user_id
ev = await eor(e, "`Demoting Replied User Globally`")
ok = e.text.split()
key = "all"
if len(ok) > 1:
if ("group" in ok[1]) or ("channel" in ok[1]):
key = ok[1]
rank = "Not AdMin"
c = 0
async for x in ultroid_bot.iter_dialogs():
if "group" in key.lower():
if x.is_group:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
elif "channel" in key.lower():
if x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
else:
if x.is_group or x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user.id,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
return await eor(ev, f"Demoted The Replied Users in Total : {c} {key} chats")
else:
k = e.text.split()
if not k[1]:
return await eod(e, "`Give someone's username/id or replied to user.")
user = k[1]
if user.isdigit():
user = int(user)
try:
name = await ultroid_bot.get_entity(user)
except BaseException:
return await eod(e, f"`No User Found Regarding {user}`")
ev = await eor(e, f"`Demoting {name.first_name} globally.`")
key = "all"
if len(k) > 2:
if ("group" in k[2]) or ("channel" in k[2]):
key = k[2]
rank = "Not AdMin"
c = 0
async for x in ultroid_bot.iter_dialogs():
if "group" in key.lower():
if x.is_group:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
elif "channel" in key.lower():
if x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
else:
if x.is_group or x.is_channel:
try:
await ultroid_bot(
EditAdminRequest(
x.id,
user,
ChatAdminRights(
add_admins=False,
invite_users=False,
change_info=False,
ban_users=False,
delete_messages=False,
pin_messages=False,
),
rank,
),
)
c += 1
except BaseException:
pass
return await eor(ev, f"Demoted {name.first_name} in Total : {c} {key} chats.")
@ultroid_cmd(
pattern="ungban ?(.*)",
)
async def _(e):
xx = await eor(e, "`UnGbanning...`")
if e.reply_to_msg_id:
userid = (await e.get_reply_message()).sender_id
elif e.pattern_match.group(1):
userid = await get_user_id(e.pattern_match.group(1))
elif e.is_private:
userid = (await e.get_chat()).id
else:
return await eod(xx, "`Reply to some msg or add their id.`", time=5)
name = (await e.client.get_entity(userid)).first_name
chats = 0
if not is_gbanned(userid):
return await eod(xx, "`User is not gbanned.`", time=3)
async for ggban in e.client.iter_dialogs():
if ggban.is_group or ggban.is_channel:
try:
await e.client.edit_permissions(ggban.id, userid, view_messages=True)
chats += 1
except BaseException:
pass
ungban(userid)
delete_gban_reason(userid)
await xx.edit(
f"`Ungbanned` [{name}](tg://user?id={userid}) `in {chats} chats.\nRemoved from gbanwatch.`",
)
@ultroid_cmd(
pattern="gban ?(.*)",
)
async def _(e):
if not e.out and not is_fullsudo(e.sender_id):
return await eor(e, "`This Command Is Sudo Restricted.`")
xx = await eor(e, "`Gbanning...`")
reason = ""
if e.reply_to_msg_id:
userid = (await e.get_reply_message()).sender_id
try:
reason = e.text.split(" ", maxsplit=1)[1]
except IndexError:
reason = ""
elif e.pattern_match.group(1):
usr = e.text.split(" ", maxsplit=2)[1]
userid = await get_user_id(usr)
try:
reason = e.text.split(" ", maxsplit=2)[2]
except IndexError:
reason = ""
elif e.is_private:
userid = (await e.get_chat()).id
try:
reason = e.text.split(" ", maxsplit=1)[1]
except IndexError:
reason = ""
else:
return await eod(xx, "`Reply to some msg or add their id.`", tome=5)
name = (await e.client.get_entity(userid)).first_name
chats = 0
if userid == ultroid_bot.uid:
return await eod(xx, "`I can't gban myself.`", time=3)
if str(userid) in DEVLIST:
return await eod(xx, "`I can't gban my Developers.`", time=3)
if is_gbanned(userid):
return await eod(
xx,
"`User is already gbanned and added to gbanwatch.`",
time=4,
)
async for ggban in e.client.iter_dialogs():
if ggban.is_group or ggban.is_channel:
try:
await e.client.edit_permissions(ggban.id, userid, view_messages=False)
chats += 1
except BaseException:
pass
gban(userid)
add_gban_reason(userid, reason)
gb_msg = f"**#Gbanned** [{name}](tg://user?id={userid}) `in {chats} chats and added to gbanwatch!`"
if reason != "":
gb_msg += f"\n**Reason** - {reason}"
await xx.edit(gb_msg)
@ultroid_cmd(
pattern="gcast ?(.*)",
)
async def gcast(event):
if not event.out and not is_fullsudo(event.sender_id):
return await eor(event, "`This Command Is Sudo Restricted.`")
xx = event.pattern_match.group(1)
if not xx:
return eor(event, "`Give some text to Globally Broadcast`")
tt = event.text
msg = tt[6:]
kk = await eor(event, "`Globally Broadcasting Msg...`")
er = 0
done = 0
async for x in ultroid_bot.iter_dialogs():
if x.is_group:
chat = x.id
try:
done += 1
await ultroid_bot.send_message(chat, msg)
except BaseException:
er += 1
await kk.edit(f"Done in {done} chats, error in {er} chat(s)")
@ultroid_cmd(
pattern="gucast ?(.*)",
)
async def gucast(event):
if not event.out and not is_fullsudo(event.sender_id):
return await eor(event, "`This Command Is Sudo Restricted.`")
xx = event.pattern_match.group(1)
if not xx:
return eor(event, "`Give some text to Globally Broadcast`")
tt = event.text
| |
"modifiedTime":
suggest = "modified_time"
elif key == "azureDataLakeSection":
suggest = "azure_data_lake_section"
elif key == "azureMySqlSection":
suggest = "azure_my_sql_section"
elif key == "azurePostgreSqlSection":
suggest = "azure_postgre_sql_section"
elif key == "azureSqlDatabaseSection":
suggest = "azure_sql_database_section"
elif key == "azureStorageSection":
suggest = "azure_storage_section"
elif key == "dataStoreType":
suggest = "data_store_type"
elif key == "glusterFsSection":
suggest = "gluster_fs_section"
elif key == "hasBeenValidated":
suggest = "has_been_validated"
elif key == "linkedInfo":
suggest = "linked_info"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DatastoreResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DatastoreResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DatastoreResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_by: 'outputs.UserInfoResponse',
created_time: str,
modified_by: 'outputs.UserInfoResponse',
modified_time: str,
tags: Mapping[str, str],
azure_data_lake_section: Optional['outputs.AzureDataLakeSectionResponse'] = None,
azure_my_sql_section: Optional['outputs.AzureMySqlSectionResponse'] = None,
azure_postgre_sql_section: Optional['outputs.AzurePostgreSqlSectionResponse'] = None,
azure_sql_database_section: Optional['outputs.AzureSqlDatabaseSectionResponse'] = None,
azure_storage_section: Optional['outputs.AzureStorageSectionResponse'] = None,
data_store_type: Optional[str] = None,
description: Optional[str] = None,
gluster_fs_section: Optional['outputs.GlusterFsSectionResponse'] = None,
has_been_validated: Optional[bool] = None,
linked_info: Optional['outputs.LinkedInfoResponse'] = None,
name: Optional[str] = None):
"""
Machine Learning datastore object.
:param 'UserInfoResponse' created_by: The User who created the datastore.
:param str created_time: The date and time when the datastore was created.
:param 'UserInfoResponse' modified_by: The User who modified the datastore.
:param str modified_time: The date and time when the datastore was last modified.
:param Mapping[str, str] tags: Tags for this datastore.
:param str data_store_type: The datastore type.
:param str description: Description of the datastore.
:param 'GlusterFsSectionResponse' gluster_fs_section: Data specific to GlusterFS.
:param bool has_been_validated: A read only property that denotes whether the service datastore has been validated with credentials.
:param 'LinkedInfoResponse' linked_info: Info about origin if it is linked.
:param str name: Name of the datastore.
"""
pulumi.set(__self__, "created_by", created_by)
pulumi.set(__self__, "created_time", created_time)
pulumi.set(__self__, "modified_by", modified_by)
pulumi.set(__self__, "modified_time", modified_time)
pulumi.set(__self__, "tags", tags)
if azure_data_lake_section is not None:
pulumi.set(__self__, "azure_data_lake_section", azure_data_lake_section)
if azure_my_sql_section is not None:
pulumi.set(__self__, "azure_my_sql_section", azure_my_sql_section)
if azure_postgre_sql_section is not None:
pulumi.set(__self__, "azure_postgre_sql_section", azure_postgre_sql_section)
if azure_sql_database_section is not None:
pulumi.set(__self__, "azure_sql_database_section", azure_sql_database_section)
if azure_storage_section is not None:
pulumi.set(__self__, "azure_storage_section", azure_storage_section)
if data_store_type is not None:
pulumi.set(__self__, "data_store_type", data_store_type)
if description is not None:
pulumi.set(__self__, "description", description)
if gluster_fs_section is not None:
pulumi.set(__self__, "gluster_fs_section", gluster_fs_section)
if has_been_validated is None:
has_been_validated = False
if has_been_validated is not None:
pulumi.set(__self__, "has_been_validated", has_been_validated)
if linked_info is not None:
pulumi.set(__self__, "linked_info", linked_info)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> 'outputs.UserInfoResponse':
"""
The User who created the datastore.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> str:
"""
The date and time when the datastore was created.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="modifiedBy")
def modified_by(self) -> 'outputs.UserInfoResponse':
"""
The User who modified the datastore.
"""
return pulumi.get(self, "modified_by")
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> str:
"""
The date and time when the datastore was last modified.
"""
return pulumi.get(self, "modified_time")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Tags for this datastore.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="azureDataLakeSection")
def azure_data_lake_section(self) -> Optional['outputs.AzureDataLakeSectionResponse']:
return pulumi.get(self, "azure_data_lake_section")
@property
@pulumi.getter(name="azureMySqlSection")
def azure_my_sql_section(self) -> Optional['outputs.AzureMySqlSectionResponse']:
return pulumi.get(self, "azure_my_sql_section")
@property
@pulumi.getter(name="azurePostgreSqlSection")
def azure_postgre_sql_section(self) -> Optional['outputs.AzurePostgreSqlSectionResponse']:
return pulumi.get(self, "azure_postgre_sql_section")
@property
@pulumi.getter(name="azureSqlDatabaseSection")
def azure_sql_database_section(self) -> Optional['outputs.AzureSqlDatabaseSectionResponse']:
return pulumi.get(self, "azure_sql_database_section")
@property
@pulumi.getter(name="azureStorageSection")
def azure_storage_section(self) -> Optional['outputs.AzureStorageSectionResponse']:
return pulumi.get(self, "azure_storage_section")
@property
@pulumi.getter(name="dataStoreType")
def data_store_type(self) -> Optional[str]:
"""
The datastore type.
"""
return pulumi.get(self, "data_store_type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the datastore.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="glusterFsSection")
def gluster_fs_section(self) -> Optional['outputs.GlusterFsSectionResponse']:
"""
Data specific to GlusterFS.
"""
return pulumi.get(self, "gluster_fs_section")
@property
@pulumi.getter(name="hasBeenValidated")
def has_been_validated(self) -> Optional[bool]:
"""
A read only property that denotes whether the service datastore has been validated with credentials.
"""
return pulumi.get(self, "has_been_validated")
@property
@pulumi.getter(name="linkedInfo")
def linked_info(self) -> Optional['outputs.LinkedInfoResponse']:
"""
Info about origin if it is linked.
"""
return pulumi.get(self, "linked_info")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the datastore.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class DockerBuildResponse(dict):
"""
Class to represent configuration settings for Docker Build
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dockerSpecificationType":
suggest = "docker_specification_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DockerBuildResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DockerBuildResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DockerBuildResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
docker_specification_type: str,
dockerfile: str,
context: Optional[str] = None,
platform: Optional['outputs.DockerImagePlatformResponse'] = None):
"""
Class to represent configuration settings for Docker Build
:param str docker_specification_type: Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Build'.
:param str dockerfile: Docker command line instructions to assemble an image.
<seealso href="https://repo2docker.readthedocs.io/en/latest/config_files.html#dockerfile-advanced-environments" />
:param str context: Path to a snapshot of the Docker Context. This property is only valid if Dockerfile is specified.
The path is relative to the asset path which must contain a single Blob URI value.
<seealso href="https://docs.docker.com/engine/context/working-with-contexts/" />
:param 'DockerImagePlatformResponse' platform: The platform information of the docker image.
"""
pulumi.set(__self__, "docker_specification_type", 'Build')
pulumi.set(__self__, "dockerfile", dockerfile)
if context is not None:
pulumi.set(__self__, "context", context)
if platform is not None:
pulumi.set(__self__, "platform", platform)
@property
@pulumi.getter(name="dockerSpecificationType")
def docker_specification_type(self) -> str:
"""
Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Build'.
"""
return pulumi.get(self, "docker_specification_type")
@property
@pulumi.getter
def dockerfile(self) -> str:
"""
Docker command line instructions to assemble an image.
<seealso href="https://repo2docker.readthedocs.io/en/latest/config_files.html#dockerfile-advanced-environments" />
"""
return pulumi.get(self, "dockerfile")
@property
@pulumi.getter
def context(self) -> Optional[str]:
"""
Path to a snapshot of the Docker Context. This property is only valid if Dockerfile is specified.
The path is relative to the asset path which must contain a single Blob URI value.
<seealso href="https://docs.docker.com/engine/context/working-with-contexts/" />
"""
return pulumi.get(self, "context")
@property
@pulumi.getter
def platform(self) -> Optional['outputs.DockerImagePlatformResponse']:
"""
The platform information of the docker image.
"""
return pulumi.get(self, "platform")
@pulumi.output_type
class DockerImagePlatformResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "operatingSystemType":
suggest = "operating_system_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DockerImagePlatformResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DockerImagePlatformResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DockerImagePlatformResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
operating_system_type: Optional[str] = None):
"""
:param str operating_system_type: The OS type the Environment.
"""
if operating_system_type is not None:
pulumi.set(__self__, "operating_system_type", operating_system_type)
@property
@pulumi.getter(name="operatingSystemType")
def operating_system_type(self) -> Optional[str]:
"""
The OS type the Environment.
"""
return pulumi.get(self, "operating_system_type")
@pulumi.output_type
class DockerImageResponse(dict):
"""
Class to represent configuration settings for Docker Build
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dockerImageUri":
suggest = "docker_image_uri"
elif key == "dockerSpecificationType":
suggest = "docker_specification_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DockerImageResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DockerImageResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DockerImageResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
docker_image_uri: str,
docker_specification_type: str,
platform: Optional['outputs.DockerImagePlatformResponse'] = None):
"""
Class to represent configuration settings for Docker Build
:param str docker_image_uri: Image name of a custom base image.
<seealso href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image" />
:param str docker_specification_type: Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Image'.
:param 'DockerImagePlatformResponse' platform: The platform information of the docker image.
"""
pulumi.set(__self__, "docker_image_uri", docker_image_uri)
pulumi.set(__self__, "docker_specification_type", 'Image')
if platform is not None:
pulumi.set(__self__, "platform", platform)
@property
@pulumi.getter(name="dockerImageUri")
def docker_image_uri(self) -> str:
"""
Image name of a custom base image.
<seealso href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image" />
"""
return pulumi.get(self, "docker_image_uri")
@property
@pulumi.getter(name="dockerSpecificationType")
def docker_specification_type(self) -> str:
"""
Enum to determine docker specification type. Must be either Build or Image.
Expected value is 'Image'.
"""
return pulumi.get(self, "docker_specification_type")
@property
@pulumi.getter
def platform(self) -> Optional['outputs.DockerImagePlatformResponse']:
"""
The platform information of the docker image.
"""
return pulumi.get(self, "platform")
@pulumi.output_type
class EncryptionPropertyResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyVaultProperties":
suggest = "key_vault_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EncryptionPropertyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EncryptionPropertyResponse.__key_warning(key)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/fitting/dataset.py
# Module with decoration for RooAbsData and related RooFit classes
# @see RooAbsData
# @see RooDataSet
# @author <NAME> <EMAIL>
# @date 2011-06-07
# =============================================================================
"""Module with decoration for RooAbsData and related RooFit classes
- see RooAbsData
- see RooDataSet
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "<NAME> <EMAIL>"
__date__ = "2011-06-07"
__all__ = (
'setStorage' , ## define the default storage for RooDataStore
'useStorage' , ## define (as context) the default storage for RooDataStore
'ds_draw' , ## draw varibales from RooDataSet
'ds_project' , ## project variables from RooDataSet to histogram
)
# =============================================================================
import ROOT, random
from builtins import range
from ostap.core.core import Ostap, VE, hID, dsID , valid_pointer
from ostap.core.ostap_types import integer_types, string_types
import ostap.fitting.variables
import ostap.fitting.roocollections
import ostap.fitting.printable
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger , allright, attention
if '__main__' == __name__ : logger = getLogger( 'ostap.fitting.dataset' )
else : logger = getLogger( __name__ )
# =============================================================================
logger.debug( 'Some useful decorations for RooAbsData object')
# =============================================================================
_new_methods_ = []
# =============================================================================
## iterator for RooAbsData
# @author <NAME> <EMAIL>
# @date 2011-06-07
def _rad_iter_ ( self ) :
"""Iterator for RooAbsData
>>> dataset = ...
>>> for i in dataset : ...
"""
_l = len ( self )
for i in range ( 0 , _l ) :
yield self.get ( i )
# =============================================================================
## access to the entries in RooAbsData
# @code
# dataset = ...
# event = dataset[4]
# events = dataset[0:1000]
# events = dataset[0:-1:10]
# @eendcode
# @author <NAME> <EMAIL>
# @date 2013-03-31
def _rad_getitem_ ( self , i ) :
"""Get the entry from RooDataSet
>>> dataset = ...
>>> event = dataset[4]
>>> events = dataset[0:1000]
>>> events = dataset[0:-1:10]
"""
if isinstance ( i , slice ) :
start , stop , step = i.indices ( len ( self ) )
if 1 == step : return self.reduce ( ROOT.RooFit.EventRange ( start , stop ) )
result = self.emptyClone( dsID() )
for j in range ( start , stop , step ) : result.add ( self [j] )
return result
elif isinstance ( i , integer_types ) and 0<= i < len ( self ) :
return self.get ( i )
raise IndexError ( 'Invalid index %s'% i )
# =============================================================================
## Get variables in form of RooArgList
# @author <NAME> <EMAIL>
# @date 2013-03-31
def _rad_vlist_ ( self ) :
"""Get variables in form of RooArgList
"""
vlst = ROOT.RooArgList()
vset = self.get()
for v in vset : vlst.add ( v )
#
return vlst
# =============================================================================
## check the presence of variable with given name in dataset
# @author <NAME> <EMAIL>
# @date 2013-03-31
def _rad_contains_ ( self , aname ) :
"""Check the presence of variable in dataset
>>> if 'mass' in dataset : print 'ok!'
"""
vset = self.get()
return aname in vset
# =============================================================================
## merge/append two datasets into a single one
# @code
# dset1 = ...
# dset2 = ...
# dset1 += dset2
# @endcode
def _rad_iadd_ ( self , another ) :
""" Merge/append two datasets into a single one
- two datasets must have identical structure
>>> dset1 = ...
>>> dset2 = ...
>>> dset1 += dset2
"""
if isinstance ( self , ROOT.RooDataSet ) :
if isinstance ( another , ROOT.RooDataSet ) :
self.append ( another )
return self
return NotImplemented
# =============================================================================
## merge/append two datasets into a single one
# @code
# dset1 = ...
# dset2 = ...
# dset = dset1 + dset2
# @endcode
def _rad_add_ ( self , another ) :
""" Merge/append two datasets into a single one
- two datasets must have identical structure
>>> dset1 = ...
>>> dset2 = ...
>>> dset = dset1 + dset2
"""
if isinstance ( self , ROOT.RooDataSet ) :
if isinstance ( another , ROOT.RooDataSet ) :
result = self.emptyClone( dsID() )
result.append ( self )
result.append ( another )
return result
return NotImplemented
# =============================================================================
# merge/append two datasets into a single one
def _rad_imul_ ( self , another ) :
""" Merge/append two datasets into a single one
- two datasets must have the same number of entries!
>>> dset1 = ...
>>> dset2 = ...
>>> dset1 *= dset2
"""
if isinstance ( another , ROOT.RooAbsData ) :
if len ( self ) == len ( another ) :
self.merge ( another )
return self
return NotImplemented
# =============================================================================
## merge two dataset (of same length) OR get small (random) fraction of dataset
# @code
# ## get smaller dataset:
# dataset = ....
# small = dataset * 0.1
# ## merge two dataset of the same lenth
# merged = dataset1 * dataset2
# @endcode
def _rad_mul_ ( self , another ) :
"""
- (1) Get small (random) fraction of dataset:
>>> dataset = ....
>>> small = 0.1 * dataset
- (2) Merge two dataset (of the same length)
>>> dataset3 = dataset1 * dataset2
"""
if isinstance ( another , ROOT.RooAbsData ) :
if len ( self ) == len ( another ) :
result = self.emptyClone( dsID() )
result.append ( self )
result.merge ( another )
return result
return NotImplemented
fraction = another
if isinstance ( fraction , float ) and 0 < fraction < 1 :
res = self.emptyClone()
l = len ( self )
for i in range ( l ) :
if random.uniform(0,1) < fraction : res.add ( self[i] )
return res
elif 1 == fraction : return self.clone ()
elif 0 == fraction : return self.emptyClone ()
return NotImplemented
# =============================================================================
## get small (random) fraction of dataset
# @code
# dataset = ....
# small = dataset / 10
# @endcode
def _rad_div_ ( self , fraction ) :
""" Get small (random) fraction
>>> dataset = ....
>>> small = dataset / 10
"""
if isinstance ( fraction , integer_types ) and 1 < fraction :
return _rad_mul_ ( self , 1.0 / fraction )
elif 1 == fraction : return self.clone ()
return NotImplemented
# =============================================================================
## get small (fixed) fraction of dataset
# @code
# dataset = ....
# small = dataset % 10
# @endcode
def _rad_mod_ ( self , fraction ) :
""" Get small (fixed) fraction of dataset
>>> dataset = ....
>>> small = dataset % 10
"""
if isinstance ( fraction , integer_types ) and 1 < fraction :
res = self.emptyClone()
s = slice ( 0 , -1 , fraction )
for i in range ( *s.indices ( len ( self ) ) ) :
res.add ( self[i] )
return res
elif 1 == fraction : return self.clone ()
return NotImplemented
# =============================================================================
## get (random) sub-sample from the dataset
# @code
# data = ...
# subset = data.sample ( 100 ) ## get 100 events
# subset = data.sample ( 0.01 ) ## get 1% of events
# @endcode
def _rad_sample_ ( self , num ) :
"""Get (random) sub-sample from the dataset
>>> data = ...
>>> subset = data.sample ( 100 ) ## get 100 events
>>> subset = data.sample ( 0.01 ) ## get 1% of events
"""
if 0 == num : return self.emptyClone ( dsID () )
elif isinstance ( num , integer_types ) and 0 < num :
num = min ( num , len ( self ) )
elif isinstance ( num , float ) and 0 < num < 1 :
from ostap.math.random_ext import poisson
num = poisson ( num * len ( self ) )
return _rad_sample_ ( self , num )
else :
raise TypeError("Unknown ``num''=%s" % num )
result = self.emptyClone ( dsID () )
indices = random.sample ( range ( len ( self ) ) , num )
while indices :
i = indices.pop()
result.add ( self[i] )
return result
# =============================================================================
## get the shuffled sample
# @code
# data = ....
# shuffled = data.shuffle()
# @endcode | |
inferred from
the general limits/nominal values (e.g. i)
"""
I_SD_IDX = 0
I_SQ_IDX = 1
EPSILON_IDX = 2
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_sd', 'i_sq']
VOLTAGES = ['u_sd', 'u_sq']
_model_constants = None
_initializer = None
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **kwargs):
# Docstring of superclass
nominal_values = nominal_values or {}
limit_values = limit_values or {}
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
self._update_model()
self._update_limits()
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def reset(self, state_space,
state_positions,
**__):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + 1)
def torque(self, state):
# Docstring of superclass
raise NotImplementedError
def _update_model(self):
"""
Set motor parameters into a matrix for faster computation
"""
raise NotImplementedError
def electrical_ode(self, state, u_dq, omega, *_):
"""
The differential equation of the Synchronous Motor.
Args:
state: The current state of the motor. [i_sd, i_sq, epsilon]
omega: The mechanical load
u_qd: The input voltages [u_sd, u_sq]
Returns:
The derivatives of the state vector d/dt([i_sd, i_sq, epsilon])
"""
return np.matmul(self._model_constants, np.array([
omega,
state[self.I_SD_IDX],
state[self.I_SQ_IDX],
u_dq[0],
u_dq[1],
omega * state[self.I_SD_IDX],
omega * state[self.I_SQ_IDX],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
# def initialize(self,
# state_space,
# state_positions,
# **__):
# super().initialize(state_space, state_positions)
class SynchronousReluctanceMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/AMC.2008.4516099 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {'p': 4,
'l_d': 10.1e-3,
'l_q': 4.1e-3,
'j_rotor': 0.8e-3,
'r_s': 0.57
}
_default_nominal_values = {'i': 10, 'torque': 0, 'omega': 3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_limits = {'i': 13, 'torque': 0, 'omega': 4.3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_sd, i_sq, u_sd, u_sq, omega * i_sd, omega * i_sq
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[ 0, 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[mp['p'], 0, 0, 0, 0, 0, 0]
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
return self.torque([self._limits['i_sd'] / np.sqrt(2), self._limits['i_sq'] / np.sqrt(2), 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (
(mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * \
currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0],
[-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX],
mp['p']
]),
np.array([
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX],
0
])
)
class PermanentMagnetSynchronousMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
epsilon Electrical rotational angle
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/TPEL.2020.3006779 (<NAME>, <NAME>, <NAME>, <NAME>)
#### and DOI: 10.1109/IEMDC.2019.8785122 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 3,
'l_d': 0.37e-3,
'l_q': 1.2e-3,
'j_rotor': 0.3883,
'r_s': 18e-3,
'psi_p': 66e-3,
}
HAS_JACOBIAN = True
_default_limits = dict(omega=12e3 * np.pi / 30, torque=0.0, i=260, epsilon=math.pi, u=300)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=240, epsilon=math.pi, | |
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Implementation of the entity tree widget consisting of a tree view that displays the
contents of a Shotgun Data Model, a text search and a filter control.
"""
import weakref
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from ..ui.entity_tree_form import Ui_EntityTreeForm
from .entity_tree_proxy_model import EntityTreeProxyModel
from ..framework_qtwidgets import Breadcrumb
from ..util import get_model_str, map_to_source, get_source_model, monitor_qobject_lifetime
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
ShotgunEntityModel = shotgun_model.ShotgunEntityModel
class EntityTreeForm(QtGui.QWidget):
"""
Entity tree widget class
"""
class _EntityBreadcrumb(Breadcrumb):
"""
Breadcrumb for a single model item.
"""
def __init__(self, label, entity):
"""
Constructor.
:param label: Text label for the breabcrumb.
:param entity: Entity associated with this breadcrumb.
"""
Breadcrumb.__init__(self, label)
self.entity = entity
# Signal emitted when an entity is selected in the tree.
entity_selected = QtCore.Signal(object, object)# selection details, breadcrumbs
# Signal emitted when the 'New Task' button is clicked.
create_new_task = QtCore.Signal(object, object)# entity, step
def __init__(self, entity_model, search_label, allow_task_creation, extra_fields, parent):
"""
Construction
:param entity_model: The Shotgun Model this widget should connect to
:param search_label: The hint label to be displayed on the search control
:param allow_task_creation: Indicates if the form is allowed by the app settings to show the
create task button.
:param extra_fields: Extra fields to use when comparing model entries.
:param parent: The parent QWidget for this control
"""
QtGui.QWidget.__init__(self, parent)
# control if step->tasks in the entity hierarchy should be collapsed when building
# the search details.
self._collapse_steps_with_tasks = True
# keep track of the entity to select when the model is updated:
self._entity_to_select = None
# keep track of the currently selected item:
self._current_item_ref = None
# keep track of expanded items as items in the tree are expanded/collapsed. We
# also want to auto-expand root items the first time they appear so track them
# as well
self._expanded_items = set()
self._auto_expanded_root_items = set()
# load the setting that states whether the first level of the tree should be auto expanded
app = sgtk.platform.current_bundle()
self._auto_expand_tree = app.get_setting("auto_expand_tree")
# set up the UI
self._ui = Ui_EntityTreeForm()
self._ui.setupUi(self)
self._ui.search_ctrl.set_placeholder_text("Search %s" % search_label)
self._ui.search_ctrl.setToolTip("Press enter to complete the search")
# enable/hide the my-tasks-only button if we are showing tasks:
have_tasks = (entity_model and entity_model.get_entity_type() == "Task")
if not have_tasks:
self._ui.my_tasks_cb.hide()
# enable/hide the new task button if we have tasks and task creation is allowed:
if have_tasks and allow_task_creation:
# enable and connect the new task button
self._ui.new_task_btn.clicked.connect(self._on_new_task)
self._ui.new_task_btn.setEnabled(False)
else:
self._ui.new_task_btn.hide()
self._ui.entity_tree.expanded.connect(self._on_item_expanded)
self._ui.entity_tree.collapsed.connect(self._on_item_collapsed)
self._is_resetting_model = False
entity_model.modelAboutToBeReset.connect(self._model_about_to_reset)
entity_model.modelReset.connect(self._model_reset)
if entity_model:
# Every time the model is refreshed with data from Shotgun, we'll need to re-expand nodes
# that were expanded and reapply the current selection.
entity_model.data_refreshed.connect(self._on_data_refreshed)
if True:
# create a filter proxy model between the source model and the task tree view:
filter_model = EntityTreeProxyModel(self, ["content", {"entity": "name"}] + extra_fields)
monitor_qobject_lifetime(filter_model, "%s entity filter model" % search_label)
filter_model.setSourceModel(entity_model)
self._ui.entity_tree.setModel(filter_model)
# connect up the filter controls:
self._ui.search_ctrl.search_changed.connect(self._on_search_changed)
self._ui.my_tasks_cb.toggled.connect(self._on_my_tasks_only_toggled)
else:
self._ui.entity_tree.setModel(entity_model)
self._expand_root_rows()
# connect to the selection model for the tree view:
selection_model = self._ui.entity_tree.selectionModel()
if selection_model:
selection_model.selectionChanged.connect(self._on_selection_changed)
def _model_about_to_reset(self):
# Catch the currently selected item and convert it to dictionary form
# so we can pick it back after the model is reset.
if self._current_item_ref:
item = self._current_item_ref()
if item:
idx = item.index()
self._entity_to_select = idx.model().get_entity(item)
self._is_resetting_model = True
def _model_reset(self):
self._is_resetting_model = False
def shut_down(self):
"""
Clean up as much as we can to help the gc once the widget is finished with.
"""
signals_blocked = self.blockSignals(True)
try:
# clear any references:
self._entity_to_select = None
self._expanded_items = set()
self._auto_expanded_root_items = set()
# clear the selection:
if self._ui.entity_tree.selectionModel():
self._ui.entity_tree.selectionModel().clear()
# detach the filter model from the view:
view_model = self._ui.entity_tree.model()
if view_model:
self._ui.entity_tree.setModel(None)
if isinstance(view_model, EntityTreeProxyModel):
view_model.setSourceModel(None)
finally:
self.blockSignals(signals_blocked)
def select_entity(self, entity_type, entity_id):
"""
Select the specified entity in the tree. If the tree is still being populated then the selection
will happen when an item representing the entity appears in the model.
Note that this doesn't emit an entity_selected signal.
:param entity_type: The type of the entity to select
:param entity_id: The id of the entity to select
"""
# track the selected entity - this allows the entity to be selected when
# it appears in the model even if the model hasn't been fully populated yet:
self._entity_to_select = {"type":entity_type, "id":entity_id}
# reset the current selection without emitting a signal:
prev_selected_item = self._reset_selection()
self._current_item_ref = None
self._update_ui()
# try to update the selection to reflect the change:
self._update_selection(prev_selected_item)
def get_selection(self):
"""
Get the currently selected item as well as the breadcrumb trail that represents
the path for the selection.
:returns: A Tuple containing the details and breadcrumb trail of the current selection:
(selection_details, breadcrumb_trail)
- selection_details is a dictionary containing:
{"label":label, "entity":entity, "children":[children]}
- breadcrumb_trail is a list of Breadcrumb instances
"""
selection_details = {}
breadcrumb_trail = []
# get the currently selected index:
selected_indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(selected_indexes) == 1:
selection_details = self._get_entity_details(selected_indexes[0])
breadcrumb_trail = self._build_breadcrumb_trail(selected_indexes[0])
return (selection_details, breadcrumb_trail)
def navigate_to(self, breadcrumb_trail):
"""
Update the selection to match the specified breadcrumb trail
:param breadcrumb_trail: A list of Breadcrumb instances that represent
an item in the tree.
"""
tree_model = self._ui.entity_tree.model()
entity_model = get_source_model(tree_model)
if not entity_model:
return
# figure out the item in the tree to select from the breadcrumb trail:
current_item = entity_model.invisibleRootItem()
for crumb in breadcrumb_trail:
# look for an item under the current item that this breadcrumb represents:
found_item = None
if isinstance(crumb, EntityTreeForm._EntityBreadcrumb):
# look for a child item that represents the entity:
for row in range(current_item.rowCount()):
child_item = current_item.child(row)
sg_entity = entity_model.get_entity(child_item)
if (sg_entity["type"] == crumb.entity["type"]
and sg_entity["id"] == crumb.entity["id"]):
found_item = child_item
break
else:
# look for a child item that has the same label:
for row in range(current_item.rowCount()):
child_item = current_item.child(row)
if get_model_str(child_item) == crumb.label:
found_item = child_item
break
if not found_item:
# stop traversal!
break
if isinstance(tree_model, QtGui.QAbstractProxyModel):
# check to see if the item is visible in the current filtered model:
filtered_idx = tree_model.mapFromSource(found_item.index())
if not filtered_idx.isValid():
# stop traversal as the item isn't in the filtered model!
break
# iterate down to the next level:
current_item = found_item
# finally, select the item in the tree:
idx_to_select = current_item.index()
if isinstance(tree_model, QtGui.QAbstractProxyModel):
idx_to_select = tree_model.mapFromSource(current_item.index())
self._ui.entity_tree.selectionModel().setCurrentIndex(idx_to_select, QtGui.QItemSelectionModel.SelectCurrent)
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
def _get_selected_item(self):
"""
Get the currently selected item.
:returns: The currently selected model item if any
"""
item = None
indexes = self._ui.entity_tree.selectionModel().selectedIndexes()
if len(indexes) == 1:
item = self._item_from_index(indexes[0])
return item
def _reset_selection(self):
"""
Reset the current selection, returning the currently selected item if any. This
doesn't result in any signals being emitted by the current selection model.
:returns: The selected item before the selection was reset if any
"""
prev_selected_item = self._get_selected_item()
# reset the current selection without emitting any signals:
self._ui.entity_tree.selectionModel().reset()
self._update_ui()
return prev_selected_item
def _get_entity_details(self, idx):
"""
Get entity details for the specified model index. If steps are being collapsed into tasks
then these details will reflect that and will not be a 1-1 representation of the tree itself.
:param idx: The QModelIndex of the item to get the entity details for.
:returns: A dictionary containing entity information about the specified index containing the
following information:
{"label":label, "entity":entity, "children":[children]}
- label: The label of the corresponding item
- entity: The entity dictionary for the corresponding item
- children: A list of immediate children for the corresponding item - each item in
the list is a dictionary containing 'label' and 'entity'.
"""
if not idx.isValid():
return {}
# first, ensure that all child data has been loaded
idx.model().ensure_data_is_loaded(idx)
item = self._item_from_index(idx)
entity_model | |
import torch
import re
import os
import collections
from torch._six import string_classes, int_classes
import cv2
from opt import opt
from tqdm import tqdm
import time
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import math
import copy
from put_gif import put_gif
import statistics
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
_use_shared_memory = True
def collate_fn(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def collate_fn_list(batch):
img, inp, im_name = zip(*batch)
img = collate_fn(img)
im_name = collate_fn(im_name)
return img, inp, im_name
def vis_frame_fast(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED,BLUE,BLUE]
else:
NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy, line_color[i], 2*(kp_scores[start_p] + kp_scores[end_p]) + 1)
return img
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(17, 11), (17, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [(0, 255, 255), (0, 191, 255),(0, 255, 102),(0, 77, 255), (0, 255, 0), #Nose, LEye, REye, LEar, REar
(77,255,255), (77, 255, 204), (77,204,255), (191, 255, 77), (77,191,255), (191, 255, 77), #LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
(204,77,255), (77,255,204), (191,77,255), (77,255,191), (127,77,255), (77,255,127), (0, 255, 255)] #LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50),
(77,255,222), (77,196,255), (77,135,255), (191,255,77), (77,255,77),
(77,222,255), (255,156,127),
(0,127,255), (255,127,77), (0,77,255), (255,77,36)]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame
height,width = img.shape[:2]
img = cv2.resize(img,(int(width/2), int(height/2)))
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
kp_preds = torch.cat((kp_preds, torch.unsqueeze((kp_preds[5,:]+kp_preds[6,:])/2,0)))
kp_scores = torch.cat((kp_scores, torch.unsqueeze((kp_scores[5,:]+kp_scores[6,:])/2,0)))
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.05:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (int(cor_x/2), int(cor_y/2))
bg = img.copy()
# print(part_line[n][1])
# cv2.circle(bg, (int(cor_x/2), int(cor_y/2)), 2, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
# transparency = max(0, min(1, kp_scores[n]))
# img = cv2.addWeighted(bg, transparency, img, 1-transparency, 0)
# if n==1:
try:
# both chin 25 is left part 26 is right (Example: Face chin)
part_line[25] = (statistics.mean([part_line[0][0],part_line[3][0]]), statistics.mean([part_line[0][1],part_line[3][1]]))
part_line[26] = (statistics.mean([part_line[0][0],part_line[4][0]]), statistics.mean([part_line[0][1],part_line[4][1]]))
# middle of eyes (Example: Sunglasses)
part_line[18] = (statistics.mean([part_line[1][0], part_line[2][0]]), statistics.mean([part_line[1][1],part_line[2][1]]))
# mean of ears
part_line[19] = (statistics.mean([part_line[3][0], part_line[4][0]]), statistics.mean([part_line[3][1],part_line[4][1]]))
part_line[20] = (part_line[18][0]-part_line[0][0], part_line[18][1]-part_line[0][1])
# expected head, mouth (Example: Crowns for 21 Fire for 31)
part_line[21] = (part_line[0][0]+5*part_line[20][0], part_line[0][1]+5*part_line[20][1])
part_line[31] = (part_line[0][0]+6*part_line[20][0], part_line[0][1]+6*part_line[20][1])
part_line[23] = (part_line[0][0]-2*part_line[20][0], part_line[0][1]-2*part_line[20][1])
# hip part calulation
part_line[40] = (part_line[11][0]-part_line[12][0], part_line[11][1]-part_line[12][1])
part_line[41] = (part_line[11][0]+2*part_line[40][0], part_line[11][1]+2*part_line[40][1])
part_line[42] = (part_line[11][0]+part_line[40][0], part_line[11][1]+part_line[40][1])
part_line[43] = (part_line[12][0]-2*part_line[40][0], part_line[12][1]-2*part_line[40][1])
part_line[44] = (part_line[12][0]-part_line[40][0], part_line[12][1]-part_line[40][1])
# expected static
part_line[50] = (int(3*width/7), int(height/4))
part_line[51] = (int(width/8), int(height/4))
part_line[52] = (int(width/4), int(height/4))
part_line[53] = (int(3*width/8), int(height/4))
part_line[54] = (int(width/6.2), int(height/9.157))
part_line[55] = (int(width/3.326), int(height/58))
part_line[56] = (int(width/6.514), int(height/6.842))
part_line[57] = (int(width/3.406), int(height/8.533))
part_line[58] = (int(width/2.319), int(height/9.846))
part_line[59] = (int(width/4.24), int(height/3.052))
part_line[60] = (int(width/3.212), int(height/2.74))
# 2~3.5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/devil_mask.gif', 15, 2, 64, 112)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 3.5~5
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 1, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/money2_SA.gif', 3, 2, 112, 160)
img = put_gif(im_name, img, part_line, 2, 1, 2, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 112, 160)
img = put_gif(im_name, img, part_line, 51, 51, 51, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 128, 160)
img = put_gif(im_name, img, part_line, 52, 52, 52, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/moneyrain_crop.gif', 130, 1, 144, 160)
img = put_gif(im_name, img, part_line, 53, 53, 53, scale, replay_speed, start, end, file_name)
# 5~6
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/spark2.gif', 20, 1, 174, 189)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/flame_edited.gif', 20, 1, 189, 207)
img = put_gif(im_name, img, part_line, 10, 10, 10, scale, replay_speed, start, end, file_name)
# 6~7
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/angel_ring.gif', 25, 1, 223, 233)
img = put_gif(im_name, img, part_line, 31, 31, 31, scale, replay_speed, start, end, file_name)
# 7~9
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/sunglass.gif', 7, 2, 240, 257)
img = put_gif(im_name, img, part_line, 0, 1, 2, scale, replay_speed, start, end, file_name)
# 9~11
(file_name, scale, replay_speed, start, end) = ('examples/gif/YG/skull_yellow.gif', 7, 3, 277, 329)
img = put_gif(im_name, img, part_line, | |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core DTensor Python API."""
import contextlib
import os
import threading
from typing import Any, Callable, List, Optional, Sequence, Union
from tensorflow.dtensor.python import dtensor_device
from tensorflow.dtensor.python import gen_dtensor_ops
from tensorflow.dtensor.python import layout as layout_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import config as tf_config
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
_DT_CLIENT_ID = "DTENSOR_CLIENT_ID"
_DT_NUM_CLIENTS = "DTENSOR_NUM_CLIENTS"
_DT_JOB_NAME = "DTENSOR_JOB_NAME"
_DT_JOBS = "DTENSOR_JOBS"
_DT_CPU_COUNT = "DTENSOR_CPU_CORE_COUNT"
_DT_GPU_COUNT = "DTENSOR_GPU_CORE_COUNT"
_DT_TPU_COUNT = "DTENSOR_TPU_CORE_COUNT"
_DT_HEARTBEAT_ENABLED = "DTENSOR_ENABLE_HEARTBEAT"
_dtensor_singleton = None
_dtensor_singleton_lock = threading.Lock()
# -----------------------------------------------------------------------------
# Main methods to launch DTensor computations.
@tf_export("experimental.dtensor.call_with_layout", v1=[])
def call_with_layout(fn: Callable[...,
Any], layout: Optional[layout_lib.Layout],
*args, **kwargs) -> Any:
"""Calls a function in the DTensor device scope if `layout` is not None.
If `layout` is not None, `fn` consumes DTensor(s) as input and produces a
DTensor as output; a DTensor is a tf.Tensor with layout-related attributes.
If `layout` is None, `fn` consumes and produces regular tf.Tensors.
Args:
fn: A supported TF API function such as tf.zeros.
layout: Optional, the layout of the output DTensor.
*args: Arguments given to `fn`.
**kwargs: Keyword arguments given to `fn`.
Returns:
The return value of `fn` transformed to a DTensor if requested.
"""
if layout is not None:
if not context.executing_eagerly():
# This is a workaround for b/199324097, where functions such as tf.ones
# could attach an incorrect layout to the tf.const generated under the
# hood. The op runs successfully in eager mode, but in graph mode, MLIR
# passes sometimes attach the default layout to a scalar constant.
# %cst = tf.Const([1]) -- With the given layout
# %0 = "tf.DTensorLayout"(%cst). -- Fails in MLIR pass since shape for
# -- layout could be different than
# -- shape[0] for %cst.
# %1 = tf.Fill(%0, 1)
result = fn(*args, **kwargs)
return relayout(result, layout)
else:
with run_on(layout.mesh):
with _dtensor_device()._default_layout(layout): # pylint: disable=protected-access
return fn(*args, **kwargs)
return fn(*args, **kwargs)
@tf_export("experimental.dtensor.run_on", v1=[])
@contextlib.contextmanager
def run_on(mesh: layout_lib.Mesh):
"""Runs enclosed functions in the DTensor device scope.
This function returns a scope. All the ops and tf.functions in this scope will
run on the DTensor device using the mesh provided.
This is useful for wrapping any tf.function that doesn't take a DTensor as
input but would like to produce DTensor as result. The scope will also make
sure all small constants be replicated as DTensor.
Args:
mesh: A Mesh instance to extract a default mesh from.
Yields:
A context in which all ops and tf.functions will run on the DTensor device.
"""
if not isinstance(mesh, layout_lib.Mesh):
raise ValueError(f"Expect `mesh` to be `Mesh`, got {type(mesh)}")
with _dtensor_device()._experimental_default_mesh(mesh): # pylint: disable=protected-access
with ops.device(device_name()):
yield
@tf_export("experimental.dtensor.device_name", v1=[])
def device_name() -> str:
"""Returns the singleton DTensor device's name.
This function can be used in the following way:
```python
import tensorflow as tf
with tf.device(dtensor.device_name()):
# ...
```
"""
return _dtensor_device().name
# -----------------------------------------------------------------------------
# Data transfer methods.
@tf_export("experimental.dtensor.copy_to_mesh", v1=[])
def copy_to_mesh(
tensor: Any,
layout: layout_lib.Layout,
source_layout: Optional[layout_lib.Layout] = None) -> ops.Tensor:
"""Copies a tf.Tensor onto the DTensor device with the given layout.
Copies a regular tf.Tensor onto the DTensor device. Use the mesh attached to
`layout` as target mesh. This method currently only supports replicated
layouts. To get a DTensor with a sharded layout, use the `pack` method.
Args:
tensor: A regular tf.Tensor to be copied as a DTensor.
layout: Target layout (and mesh) for the result DTensor.
source_layout: Source layout of the tensor before copy, used for backward
passes.
Returns:
A DTensor on the DTensor device with the given layout.
"""
return _dtensor_device().copy_to_mesh(tensor, layout, source_layout)
@tf_export("experimental.dtensor.pack", v1=[])
def pack(tensors: Sequence[Any], layout: layout_lib.Layout) -> Any:
"""Packs `tf.Tensor` components into a DTensor.
Packing and unpacking are inverse operations:
```
* unpack(pack(tensors)) == tensors
* pack(unpack(dtensor)) == dtensor
```
1. For any DTensor on the mesh, `unpack` returns the raw components placed on
each underlying device.
2. Packing these raw components in the same order using `pack` returns a
DTensor which should be identical to the original DTensor--both the content
value and the layout.
**Shape, Rank, and Scalars**: The rank of the DTensor is the same as the
rank of its raw components, i.e., rank is preserved. This leads to a
consistent interpretation for packing scalar values into a DTensor. The only
valid layout for a scalar value is fully replicated, and the individual
components must be identical scalars.
Each input `tensors[i]` will be copied to `layout.mesh.local_device[i]`
if not already on the local device. Non-local components should not be passed
to `pack`; use `copy_to_mesh` and `relayout` to place tensors on all global
devices on a mesh.
It is the caller's responsibility to ensure that the underlying values
for `pack` adhere to the specified layout, and that only as many values are
specified as there are local devices. Pack does not move data between clients.
See examples below for more detail about layouts.
For example, assume we have a mesh `[X(2), Y(3)]`, which has in total 6
underlying devices. Futuremore, assume that the device location mapping is
the following:
```
device_ID | location X, Y
0 0, 0
1 0, 1
2 0, 2
3 1, 0
4 1, 1
5 1, 2
```
1. For 1-D vector DTensor with shape `[128]` with layout `[mesh.X]` and value
as `range(128)`, the raw components will have shape `[64]` each, and the
raw components will be:
```
device_ID | raw component
0 range(0, 64)
1 range(0, 64)
2 range(0, 64)
3 range(64, 128)
4 range(64, 128)
5 range(64, 128)
```
This also means for a 1-D DTensor with shape `[2]` and layout `[mesh.X]`,
the raw components have shape `[1]` rather than the shape for scalar values
`[]`.
2. For 2-D vector DTensor with shape `[2, 3]` with layout `[mesh.X, mesh.Y]`
and value as `range(6)`, this is basically a fully-sharded DTensor.
From global view, the content looks like
```
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
]
```
The raw components will have shape `[1, 1]` each, and have the following
content:
```
device_ID | raw component
0 [[0.0]]
1 [[1.0]]
2 [[2.0]]
3 [[3.0]]
4 [[4.0]]
5 [[5.0]]
```
3. For a scalar value `123.0` DTensor, it can only have one legitimate layout
`[]` (no dimension, but fully replicated).
The raw components will have shape `[]` each, and have the following
content:
```
device_ID | raw component
0 123.0
1 123.0
2 123.0
3 123.0
4 123.0
5 123.0
```
Again, caller of `pack` is expected to provide 6 identical value raw
components with scalar shapes.
4. For 3-D vector DTensor with shape `[2, 2, 3]` with layout
`[X, unsharded, unsharded]` and value as `range(12)`,
From global view, the content looks like:
```
[
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
],
[
[6.0, 7.0, 8.0],
[9.0, 10., 11.],
],
]
```
The raw components will have shape `[1, 2, 3]` each, and have the following
content:
```
device_ID | raw component
0 range(6).reshape([1, 2, 3])
1 range(6).reshape([1, 2, 3])
2 range(6).reshape([1, 2, 3])
3 range(6, 12).reshape([1, 2, 3])
4 range(6, 12).reshape([1, 2, 3])
5 range(6, 12).reshape([1, 2, 3])
```
Args:
tensors: The list of local tensor components to pack into a DTensor.
layout: The layout of the DTensor to be created.
Returns:
A DTensor created from the individual component tensors.
Raises:
RuntimeError: When `pack` is not called eagerly.
"""
return _dtensor_device().pack(tensors, layout)
@tf_export("experimental.dtensor.unpack", v1=[])
def unpack(tensor: Any) -> Sequence[Any]:
"""Unpacks a DTensor into `tf.Tensor` components.
Packing and unpacking are inverse operations:
```
* unpack(pack(tensors)) == tensors
* pack(unpack(dtensor)) == dtensor
```
1. | |
pandas series and got {0}'.\
format(type(data)))
if user_col not in data or pd.isnull(data[user_col]):
raise ValueError('Missing required username')
if (hpc_user_col not in data or pd.isnull(data[hpc_user_col])) and \
(password_col not in data or pd.isnull(data[password_col])):
raise ValueError('Missing required field password for non-hpc user {0}'.\
format(data[user_col]))
username = data[user_col]
hpc_username = data[hpc_user_col]
password = data[password_col]
check_cmd1 = ['iadmin','lu']
check_cmd2 = ['grep','-w',quote(username)]
c_proc1 = \
subprocess.Popen(
check_cmd1,
stdout=subprocess.PIPE)
c_proc2 = \
subprocess.Popen(
check_cmd2,
stdin=c_proc1.stdout,
stdout=subprocess.PIPE)
c_proc1.stdout.close()
if c_proc1.returncode !=None:
raise ValueError(
'Failed running command {0}:{1}'.\
format(check_cmd1,c_proc1.returncode))
result = c_proc2.communicate()[0]
result = result.decode('UTF-8')
if result != '' and pd.isnull(data[hpc_user_col]): # for non hpc users
if self.check_hpc_user:
raise ValueError(
'Can not reset iRODS password for non hpc user {0} with check_hpc_user option'.\
format(username))
else:
if password is not None or password != '':
irods_passwd_cmd = \
'{0} {1} {2}#{3} {4} {5}'.\
format(
'iadmin',
'moduser',
quote(username),
'igfZone',
'password',
quote(password)) # format irods command for shell
subprocess.check_call(
irods_passwd_cmd,
shell=True)
if self.log_slack:
message = \
'resetting irods account password for non-hpc user: {0}, password length: {1}'.\
format(username,len(password))
self.igf_slack.post_message_to_channel(
message,reaction='pass')
else:
raise ValueError('Missing password for non-hpc user {0}'.\
format(quote(username)))
elif result=='':
irods_mkuser_cmd = [
'iadmin',
'mkuser',
'{0}#igfZone'.format(quote(username)),
'rodsuser']
subprocess.check_call(irods_mkuser_cmd) # create irods user
irods_chmod_cmd = [
'ichmod',
'-M',
'own',
'igf',
'/igfZone/home/{0}'.format(quote(username))]
subprocess.check_call(irods_chmod_cmd) # change permission for irods user
irods_inherit_cmd = [
'ichmod',
'-r',
'inherit',
'/igfZone/home/{0}'.format(quote(username))]
subprocess.check_call(irods_inherit_cmd) # inherit irods user
if (hpc_username is None or hpc_username == '' ) and \
(password is not None or password != ''):
if len(password)>20:
raise ValueError(
'check password for non hpc user {0}: {1}'.\
format(username,password)) # it could be the encrypted password
irods_passwd_cmd = \
'{0} {1} {2}#{3} {4} {5}'.\
format(
'iadmin',
'moduser',
quote(username),
'igfZone',
'password',
quote(password)) # format irods command for shell
subprocess.check_call(
irods_passwd_cmd,
shell=True) # set password for non-hpc user
if self.log_slack:
message='created irods account for non-hpc user: {0}'.\
format(username)
self.igf_slack.post_message_to_channel(
message,reaction='pass')
except Exception as e:
raise ValueError('Failed to setup irods account, error: {0}'.format(e))
def _get_hpc_username(self,username):
'''
An internal method for checking hpc accounts for new users
This method is not reliable as the ldap server can be down from time to time
:param username: A username string
'''
try:
cmd1 = [
'ssh',
'{0}@{1}'.format(quote(self.hpc_user),
quote(self.hpc_address)),
'ldapsearch -x -h {0}'.format(quote(self.ldap_server))]
cmd2 = [
'grep',
'-w',
'uid: {0}'.format(quote(username))]
proc1 = \
subprocess.Popen(
cmd1,
stdout=subprocess.PIPE)
proc2 = \
subprocess.Popen(
cmd2,
stdin=proc1.stdout,
stdout=subprocess.PIPE)
proc1.stdout.close()
if proc1.returncode !=None:
raise ValueError(
'Failed running command {0}:{1}'.\
format(cmd1,proc1.returncode))
result = proc2.communicate()[0]
result = result.decode('UTF-8')
if result=='':
hpc_username = None
else:
hpc_username = username
return hpc_username
except Exception as e:
raise ValueError('Failed to get hpc user name, error: {0}'.format(e))
def _assign_username_and_password(
self,data,user_col='username',hpc_user_col='hpc_username',password_col='password',
email_col='email_id',hpc_category='HPC_USER',category_col='category'):
'''
An internal method for assigning new user account and password
:param data: A pandas series containing user data
:param user_col: Column name for username, deffault username
:param password_col: Column name for password, default password
:param hpc_user_col: Column name for hpc_username, default hpc_username
:param email_id_col: Column name for email id, default email_id
:param category_col: Column name for user category, default category
:param hpc_category: Category tag for hpc user, default: HPC_USER
'''
try:
if not isinstance(data, pd.Series):
raise ValueError('Expecting a pandas series and got {0}'.\
format(type(data)))
if (user_col not in data or pd.isnull(data[user_col])) and \
(hpc_user_col in data and not pd.isnull(data[hpc_user_col])): # if hpc username found, make it username
data[user_col] = data[hpc_user_col]
if (user_col not in data or \
(user_col in data and pd.isnull(data[user_col]))): # assign username from email id
username,_ = data[email_col].split('@',1) # get username from email id
data[user_col] = \
username[:10] \
if len(username)>10 \
else username # allowing only first 10 chars of the email id
if (hpc_user_col not in data or pd.isnull(data[hpc_user_col])) and \
self.check_hpc_user: # assign hpc username
hpc_username = \
self._get_hpc_username(
username=data[user_col])
data[hpc_user_col] = hpc_username # set hpc username
if user_col in data and not pd.isnull(data[user_col]) and \
hpc_user_col in data and not pd.isnull(data[hpc_user_col]) and \
data[user_col] != data[hpc_user_col]: # if user name and hpc username both are present, they should be same
raise ValueError(
'username {0} and hpc_username {1} should be same'.\
format(data[user_col],data[hpc_user_col]))
if (hpc_user_col not in data or pd.isnull(data[hpc_user_col])) and \
(password_col not in data or pd.isnull(data[password_col])):
data[password_col] = self._get_user_password() # assign a random password if its not supplied
if (category_col not in data or pd.isnull(data[category_col])) and \
(hpc_user_col in data and not pd.isnull(data[hpc_user_col])): # set user category for hpc users
data[category_col] = hpc_category
return data
except Exception as e:
raise ValueError(\
'Failed to assign username and pass, error: {0}'.format(e))
def _add_default_user_to_project(self,project_user_data):
'''
An internal method for adding default user to the project_user_data dataframe
:param project_user_data: A dataframe containing project_igf_id and email_id column
:returns: a pandas dataframe with new row for the project_igf_id and default_user_email
'''
try:
new_project_user_data=list()
for row in project_user_data.to_dict(orient='records'):
new_project_user_data.append(row)
row2 = deepcopy(row)
row2[self.user_lookup_column] = self.default_user_email
new_project_user_data.append(row2)
new_project_user_data = pd.DataFrame(new_project_user_data)
return new_project_user_data
except Exception as e:
raise ValueError('Failed to default user, error: {0}'.format(e))
def _check_and_register_data(self,data,project_info_file):
'''
An internal method for checking and registering data
:param data: A dictionary containing following keys
* project_data
* user_data
* project_user_data
* sample_data
:param project_info_file: A filepath for project info
'''
try:
db_connected = False
project_data = pd.DataFrame(data['project_data'])
user_data = pd.DataFrame(data['user_data'])
project_user_data = pd.DataFrame(data['project_user_data'])
sample_data = pd.DataFrame(data['sample_data'])
base = BaseAdaptor(**{'session_class':self.session_class})
base.start_session() # connect_to db
db_connected = True
project_data = \
project_data[project_data[self.project_lookup_column].isnull()==False]
project_data = project_data.drop_duplicates()
if project_data.index.size > 0:
project_data['EXISTS'] = ''
project_data = \
project_data.apply(
lambda x: \
self._check_existing_data(
data=x,
dbsession=base.session,
table_name='project',
check_column='EXISTS'),
axis=1,
result_type=None) # get project map
project_data = \
project_data[project_data['EXISTS']==False] # filter existing projects
project_data.drop(
'EXISTS',
axis=1,
inplace=True) # remove extra column
user_data = \
user_data[user_data[self.user_lookup_column].isnull()==False]
user_data = user_data.drop_duplicates()
if user_data.index.size > 0:
user_data = \
user_data.apply(
lambda x: \
self._assign_username_and_password(x),
axis=1) # check for use account and password
user_data['EXISTS'] = ''
user_data = \
user_data.apply(
lambda x: \
self._check_existing_data(
data=x,
dbsession=base.session,
table_name='user',
check_column='EXISTS'),\
axis=1,
result_type=None) # get user map
user_data = \
user_data[user_data['EXISTS']==False] # filter existing users
user_data.drop(
'EXISTS',
axis=1,
inplace=True) # remove extra column
sample_data = \
sample_data[sample_data[self.sample_lookup_column].isnull()==False]
sample_data = \
sample_data.drop_duplicates()
if sample_data.index.size > 0:
sample_data['EXISTS'] = ''
sample_data = \
sample_data.apply(
lambda x: \
self._check_existing_data(
data=x,
dbsession=base.session,
table_name='sample',
check_column='EXISTS'),\
axis=1,
result_type=None) # get sample map
sample_data = \
sample_data[sample_data['EXISTS']==False] # filter existing samples
sample_data.drop(
'EXISTS',
axis=1,
inplace=True) # remove extra column
project_user_data = \
project_user_data.drop_duplicates()
project_user_data_mask = \
(project_user_data[self.project_lookup_column].isnull()==False) & \
(project_user_data[self.user_lookup_column].isnull()==False)
project_user_data = \
project_user_data[project_user_data_mask] # not allowing any empty values for project or user lookup
if project_user_data.index.size > 0:
project_user_data = \
self._add_default_user_to_project(project_user_data) # update project_user_data with default users
project_user_data['EXISTS'] = ''
project_user_data[self.data_authority_column] = ''
project_user_data = \
project_user_data.apply(
lambda x: \
self._check_existing_data(
data=x,
dbsession=base.session,
table_name='project_user',
check_column='EXISTS'),\
axis=1,
result_type=None) # get project user map
project_user_data = \
project_user_data[project_user_data['EXISTS']==False] # filter existing project user
project_user_data.drop(
'EXISTS',
axis=1,
inplace=True) # remove extra column
if len(project_data.index) > 0: # store new projects
pa1 = \
ProjectAdaptor(**{'session':base.session}) # connect to project adaptor
pa1.store_project_and_attribute_data(
data=project_data,
autosave=False) # load project data
if len(user_data.index) > 0: # store new users
ua = UserAdaptor(**{'session':base.session})
ua.store_user_data(
data=user_data,
autosave=False) # load user data
if len(project_user_data.index) > 0: # store new project users
pa2 = ProjectAdaptor(**{'session':base.session}) # connect to project adaptor
project_user_data = \
project_user_data.to_dict(orient='records') # convert dataframe to dictionary
pa2.assign_user_to_project(
data=project_user_data,
autosave=False) # load project user data
if len(sample_data.index) > 0: # store new samples
sa = SampleAdaptor(**{'session':base.session}) # connect to sample adaptor
sa.store_sample_and_attribute_data(
data=sample_data,
autosave=False) # load samples data
if self.setup_irods:
user_data.apply(
lambda x: \
self._setup_irods_account(data=x),
axis=1) # create irods account
file_checksum = \
calculate_file_checksum(
filepath=project_info_file)
file_size = \
os.path.getsize(project_info_file)
file_data = [{
'file_path':project_info_file,
'location':'ORWELL',
'md5':file_checksum,
'size':file_size}]
fa = FileAdaptor(**{'session':base.session}) # connect to file adaptor
fa.store_file_data(
data=file_data,
autosave=False)
except Exception as e:
if db_connected:
base.rollback_session() # rollback session
raise ValueError(
'Failed to check and register data, error: {0}'.format(e))
else:
if db_connected:
base.commit_session() # commit changes to db
if len(user_data.index) > 0 and self.notify_user:
user_data.apply(
lambda x: \
self._notify_about_new_user_account(x),
axis=1) # send mail to new user with their password | |
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInstanceInformation(Paginator):
def paginate(self, InstanceInformationFilterList: List = None, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_instance_information`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceInformation>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
InstanceInformationFilterList=[
{
'key': 'InstanceIds'|'AgentVersion'|'PingStatus'|'PlatformTypes'|'ActivationIds'|'IamRole'|'ResourceType'|'AssociationStatus',
'valueSet': [
'string',
]
},
],
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InstanceInformationList': [
{
'InstanceId': 'string',
'PingStatus': 'Online'|'ConnectionLost'|'Inactive',
'LastPingDateTime': datetime(2015, 1, 1),
'AgentVersion': 'string',
'IsLatestVersion': True|False,
'PlatformType': 'Windows'|'Linux',
'PlatformName': 'string',
'PlatformVersion': 'string',
'ActivationId': 'string',
'IamRole': 'string',
'RegistrationDate': datetime(2015, 1, 1),
'ResourceType': 'ManagedInstance'|'Document'|'EC2Instance',
'Name': 'string',
'IPAddress': 'string',
'ComputerName': 'string',
'AssociationStatus': 'string',
'LastAssociationExecutionDate': datetime(2015, 1, 1),
'LastSuccessfulAssociationExecutionDate': datetime(2015, 1, 1),
'AssociationOverview': {
'DetailedStatus': 'string',
'InstanceAssociationStatusAggregatedCount': {
'string': 123
}
}
},
],
}
**Response Structure**
- *(dict) --*
- **InstanceInformationList** *(list) --*
The instance information list.
- *(dict) --*
Describes a filter for a specific list of instances.
- **InstanceId** *(string) --*
The instance ID.
- **PingStatus** *(string) --*
Connection status of SSM Agent.
- **LastPingDateTime** *(datetime) --*
The date and time when agent last pinged Systems Manager service.
- **AgentVersion** *(string) --*
The version of SSM Agent running on your Linux instance.
- **IsLatestVersion** *(boolean) --*
Indicates whether latest version of SSM Agent is running on your instance. Some older versions of Windows Server use the EC2Config service to process SSM requests. For this reason, this field does not indicate whether or not the latest version is installed on Windows managed instances.
- **PlatformType** *(string) --*
The operating system platform type.
- **PlatformName** *(string) --*
The name of the operating system platform running on your instance.
- **PlatformVersion** *(string) --*
The version of the OS platform running on your instance.
- **ActivationId** *(string) --*
The activation ID created by Systems Manager when the server or VM was registered.
- **IamRole** *(string) --*
The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instances. This call does not return the IAM role for Amazon EC2 instances.
- **RegistrationDate** *(datetime) --*
The date the server or VM was registered with AWS as a managed instance.
- **ResourceType** *(string) --*
The type of instance. Instances are either EC2 instances or managed instances.
- **Name** *(string) --*
The name of the managed instance.
- **IPAddress** *(string) --*
The IP address of the managed instance.
- **ComputerName** *(string) --*
The fully qualified host name of the managed instance.
- **AssociationStatus** *(string) --*
The status of the association.
- **LastAssociationExecutionDate** *(datetime) --*
The date the association was last run.
- **LastSuccessfulAssociationExecutionDate** *(datetime) --*
The last date the association was successfully run.
- **AssociationOverview** *(dict) --*
Information about the association.
- **DetailedStatus** *(string) --*
Detailed status information about the aggregated associations.
- **InstanceAssociationStatusAggregatedCount** *(dict) --*
The number of associations for the instance(s).
- *(string) --*
- *(integer) --*
:type InstanceInformationFilterList: list
:param InstanceInformationFilterList:
This is a legacy method. We recommend that you don\'t use this method. Instead, use the InstanceInformationFilter action. The ``InstanceInformationFilter`` action enables you to return instance information by using tags that are specified as a key-value mapping.
If you do use this method, then you can\'t use the ``InstanceInformationFilter`` action. Using this method and the ``InstanceInformationFilter`` action causes an exception error.
- *(dict) --*
Describes a filter for a specific list of instances. You can filter instances information by using tags. You specify tags by using a key-value mapping.
Use this action instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The ``InstanceInformationFilterList`` method is a legacy method and does not support tags.
- **key** *(string) --* **[REQUIRED]**
The name of the filter.
- **valueSet** *(list) --* **[REQUIRED]**
The filter values.
- *(string) --*
:type Filters: list
:param Filters:
One or more filters. Use a filter to return a more specific list of instances. You can filter on Amazon EC2 tag. Specify tags by using a key-value mapping.
- *(dict) --*
The filters to describe or get information about your managed instances.
- **Key** *(string) --* **[REQUIRED]**
The filter key name to describe your instances. For example:
\"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag Key\"
- **Values** *(list) --* **[REQUIRED]**
The filter values.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeInstancePatchStates(Paginator):
def paginate(self, InstanceIds: List, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_instance_patch_states`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatchStates>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
InstanceIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InstancePatchStates': [
{
'InstanceId': 'string',
'PatchGroup': 'string',
'BaselineId': 'string',
'SnapshotId': 'string',
'InstallOverrideList': 'string',
'OwnerInformation': 'string',
'InstalledCount': 123,
'InstalledOtherCount': 123,
'InstalledRejectedCount': 123,
'MissingCount': 123,
'FailedCount': 123,
'NotApplicableCount': 123,
'OperationStartTime': datetime(2015, 1, 1),
'OperationEndTime': datetime(2015, 1, 1),
'Operation': 'Scan'|'Install'
},
],
}
**Response Structure**
- *(dict) --*
- **InstancePatchStates** *(list) --*
The high-level patch state for the requested instances.
- *(dict) --*
Defines the high-level patch compliance state for a managed instance, providing information about the number of installed, missing, not applicable, and failed patches along with metadata about the operation when this information was gathered for the instance.
- **InstanceId** *(string) --*
The ID of the managed instance the high-level patch compliance information was collected for.
- **PatchGroup** *(string) --*
The name of the patch group the managed instance belongs to.
- **BaselineId** *(string) --*
The ID of the patch baseline used to patch the instance.
- **SnapshotId** *(string) --*
The ID of the patch baseline snapshot used during the patching operation when this compliance data was collected.
- **InstallOverrideList** *(string) --*
An https URL or an Amazon S3 path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an Amazon S3 bucket in YAML format and specify in the SSM document ``AWS-RunPatchBaseline`` , overrides the patches specified by the default patch baseline.
For more information about the ``InstallOverrideList`` parameter, see `About the SSM Document AWS-RunPatchBaseline <http://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html>`__ in the *AWS Systems Manager User Guide* .
- **OwnerInformation** *(string) --*
Placeholder information. This field will always be empty in the current release of the service.
- **InstalledCount** *(integer) --*
The number of patches from the patch baseline that are installed on the instance.
- **InstalledOtherCount** *(integer) --*
The number of patches not specified in the patch baseline that are installed on the instance.
- **InstalledRejectedCount** *(integer) --*
The number of instances with patches installed that are specified in a RejectedPatches list. Patches with a status of *InstalledRejected* | |
#!/usr/bin/env python3
import sys
import os
import os.path
import subprocess
import tempfile
import re
import itertools
import hashlib
import shutil
import argparse
import codecs
import json
import multiprocessing
from functools import partial
backend = 'glsl'
args = {}
def remove_file(path):
#print('Removing file:', path)
os.remove(path)
def create_temporary(suff = ''):
f, path = tempfile.mkstemp(suffix = suff)
os.close(f)
#print('Creating temporary:', path)
return path
def parse_stats(stats):
m = re.search('([0-9]+) work registers', stats)
registers = int(m.group(1)) if m else 0
m = re.search('([0-9]+) uniform registers', stats)
uniform_regs = int(m.group(1)) if m else 0
m_list = re.findall('(-?[0-9]+)\s+(-?[0-9]+)\s+(-?[0-9]+)', stats)
alu_short = float(m_list[1][0]) if m_list else 0
ls_short = float(m_list[1][1]) if m_list else 0
tex_short = float(m_list[1][2]) if m_list else 0
alu_long = float(m_list[2][0]) if m_list else 0
ls_long = float(m_list[2][1]) if m_list else 0
tex_long = float(m_list[2][2]) if m_list else 0
return (registers, uniform_regs, alu_short, ls_short, tex_short, alu_long, ls_long, tex_long)
def get_shader_type(shader):
_, ext = os.path.splitext(shader)
if ext == '.vert':
return '--vertex'
elif ext == '.frag':
return '--fragment'
elif ext == '.comp':
return '--compute'
elif ext == '.tesc':
return '--tessellation_control'
elif ext == '.tese':
return '--tessellation_evaluation'
elif ext == '.geom':
return '--geometry'
else:
return ''
def get_shader_stats(shader):
path = create_temporary()
p = subprocess.Popen(['malisc', get_shader_type(shader), '--core', 'Mali-T760', '-V', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
remove_file(path)
if p.returncode != 0:
print(stderr.decode('utf-8'))
raise OSError('malisc failed')
p.wait()
returned = stdout.decode('utf-8')
return parse_stats(returned)
def print_msl_compiler_version():
try:
subprocess.check_call(['xcrun', '--sdk', 'iphoneos', 'metal', '--version'])
print('...are the Metal compiler characteristics.\n') # display after so xcrun FNF is silent
except OSError as e:
if (e.errno != os.errno.ENOENT): # Ignore xcrun not found error
raise
def validate_shader_msl(shader, opt):
msl_path = reference_path(shader[0], shader[1], opt)
msl2 = '.msl2.' in msl_path
try:
msl_os = 'macosx'
# msl_os = 'iphoneos'
subprocess.check_call(['xcrun', '--sdk', msl_os, 'metal', '-x', 'metal', '-std=osx-metal{}'.format('2.0' if msl2 else '1.2'), '-Werror', '-Wno-unused-variable', msl_path])
print('Compiled Metal shader: ' + msl_path) # display after so xcrun FNF is silent
except OSError as oe:
if (oe.errno != os.errno.ENOENT): # Ignore xcrun not found error
raise
except subprocess.CalledProcessError:
print('Error compiling Metal shader: ' + msl_path)
sys.exit(1)
def cross_compile_msl(shader, spirv, opt):
msl2 = '.msl2.' in shader
spirv_path = create_temporary()
msl_path = create_temporary(os.path.basename(shader))
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader])
if opt:
subprocess.check_call(['spirv-opt', '-O', '-o', spirv_path, spirv_path])
spirv_cross_path = './spirv-cross'
msl_args = [spirv_cross_path, '--entry', 'main', '--output', msl_path, spirv_path, '--msl']
if msl2:
msl_args.append('--msl-version')
msl_args.append('20000')
subprocess.check_call(msl_args)
if not shader_is_invalid_spirv(msl_path):
subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path])
return (spirv_path, msl_path)
def shader_model_hlsl(shader):
if '.vert' in shader:
return '-Tvs_5_1'
elif '.frag' in shader:
return '-Tps_5_1'
elif '.comp' in shader:
return '-Tcs_5_1'
else:
return None
def shader_to_win_path(shader):
# It's (very) convenient to be able to run HLSL testing in wine on Unix-likes, so support that.
try:
with subprocess.Popen(['winepath', '-w', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE) as f:
stdout_data, stderr_data = f.communicate()
return stdout_data.decode('utf-8')
except OSError as oe:
if (oe.errno != os.errno.ENOENT): # Ignore not found errors
return shader
except subprocess.CalledProcessError:
raise
return shader
ignore_fxc = False
def validate_shader_hlsl(shader):
subprocess.check_call(['glslangValidator', '-e', 'main', '-D', '--target-env', 'vulkan1.1', '-V', shader])
is_no_fxc = '.nofxc.' in shader
global ignore_fxc
if (not ignore_fxc) and (not args.force_no_external_validation) and (not is_no_fxc):
try:
win_path = shader_to_win_path(shader)
subprocess.check_call(['fxc', '-nologo', shader_model_hlsl(shader), win_path])
except OSError as oe:
if (oe.errno != os.errno.ENOENT): # Ignore not found errors
raise
else:
ignore_fxc = True
except subprocess.CalledProcessError:
print('Failed compiling HLSL shader:', shader, 'with FXC.')
sys.exit(1)
def shader_to_sm(shader):
if '.sm60.' in shader:
return '60'
elif '.sm51.' in shader:
return '51'
elif '.sm20.' in shader:
return '20'
else:
return '50'
def cross_compile_hlsl(shader, spirv, opt):
spirv_path = create_temporary()
hlsl_path = create_temporary(os.path.basename(shader))
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader])
if opt:
subprocess.check_call(['spirv-opt', '-O', '-o', spirv_path, spirv_path])
spirv_cross_path = './spirv-cross'
sm = shader_to_sm(shader)
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', hlsl_path, spirv_path, '--hlsl-enable-compat', '--hlsl', '--shader-model', sm])
if not shader_is_invalid_spirv(hlsl_path):
subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path])
validate_shader_hlsl(hlsl_path)
return (spirv_path, hlsl_path)
def cross_compile_reflect(shader, spirv, opt):
spirv_path = create_temporary()
reflect_path = create_temporary(os.path.basename(shader))
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader])
if opt:
subprocess.check_call(['spirv-opt', '-O', '-o', spirv_path, spirv_path])
spirv_cross_path = './spirv-cross'
sm = shader_to_sm(shader)
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', reflect_path, spirv_path, '--reflect'])
return (spirv_path, reflect_path)
def validate_shader(shader, vulkan):
if vulkan:
subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', shader])
else:
subprocess.check_call(['glslangValidator', shader])
def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt):
spirv_path = create_temporary()
glsl_path = create_temporary(os.path.basename(shader))
if vulkan or spirv:
vulkan_glsl_path = create_temporary('vk' + os.path.basename(shader))
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader])
if opt and (not invalid_spirv):
subprocess.check_call(['spirv-opt', '-O', '-o', spirv_path, spirv_path])
if not invalid_spirv:
subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path])
extra_args = []
if eliminate:
extra_args += ['--remove-unused-variables']
if is_legacy:
extra_args += ['--version', '100', '--es']
if flatten_ubo:
extra_args += ['--flatten-ubo']
if sso:
extra_args += ['--separate-shader-objects']
if flatten_dim:
extra_args += ['--flatten-multidimensional-arrays']
spirv_cross_path = './spirv-cross'
# A shader might not be possible to make valid GLSL from, skip validation for this case.
if not ('nocompat' in glsl_path):
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path] + extra_args)
validate_shader(glsl_path, False)
else:
remove_file(glsl_path)
glsl_path = None
if vulkan or spirv:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path] + extra_args)
validate_shader(vulkan_glsl_path, True)
# SPIR-V shaders might just want to validate Vulkan GLSL output, we don't always care about the output.
if not vulkan:
remove_file(vulkan_glsl_path)
return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None)
def make_unix_newline(buf):
decoded = codecs.decode(buf, 'utf-8')
decoded = decoded.replace('\r', '')
return codecs.encode(decoded, 'utf-8')
def md5_for_file(path):
md5 = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: make_unix_newline(f.read(8192)), b''):
md5.update(chunk)
return md5.digest()
def make_reference_dir(path):
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
def reference_path(directory, relpath, opt):
split_paths = os.path.split(directory)
reference_dir = os.path.join(split_paths[0], 'reference/' + ('opt/' if opt else ''))
reference_dir = os.path.join(reference_dir, split_paths[1])
return os.path.join(reference_dir, relpath)
def json_ordered(obj):
if isinstance(obj, dict):
return sorted((k, json_ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(json_ordered(x) for x in obj)
else:
return obj
def json_compare(json_a, json_b):
return json_ordered(json_a) == json_ordered(json_b)
def regression_check_reflect(shader, json_file, update, keep, opt):
reference = reference_path(shader[0], shader[1], opt) + '.json'
joined_path = os.path.join(shader[0], shader[1])
print('Reference shader reflection path:', reference)
if os.path.exists(reference):
actual = ''
expected = ''
with open(json_file) as f:
actual_json = f.read();
actual = json.loads(actual_json)
with open(reference) as f:
expected = json.load(f)
if (json_compare(actual, expected) != True):
if update:
print('Generated reflection json has changed for {}!'.format(reference))
# If we expect changes, update the reference file.
if os.path.exists(reference):
remove_file(reference)
make_reference_dir(reference)
shutil.move(json_file, reference)
else:
print('Generated reflection json in {} does not match reference {}!'.format(json_file, reference))
with open(json_file, 'r') as f:
print('')
print('Generated:')
print('======================')
print(f.read())
print('======================')
print('')
# Otherwise, fail the test. Keep the shader file around so we can inspect.
if not keep:
remove_file(json_file)
sys.exit(1)
else:
remove_file(json_file)
else:
print('Found new shader {}. Placing generated source code in {}'.format(joined_path, reference))
make_reference_dir(reference)
shutil.move(json_file, reference)
def regression_check(shader, glsl, update, keep, opt):
reference = reference_path(shader[0], shader[1], opt)
joined_path = os.path.join(shader[0], shader[1])
print('Reference shader path:', reference)
if os.path.exists(reference):
if md5_for_file(glsl) != md5_for_file(reference):
if update:
print('Generated source code has changed for {}!'.format(reference))
# If we expect changes, update the reference file.
if os.path.exists(reference):
remove_file(reference)
make_reference_dir(reference)
shutil.move(glsl, reference)
else:
print('Generated source code in {} does not match reference {}!'.format(glsl, reference))
with open(glsl, 'r') as f:
print('')
print('Generated:')
print('======================')
print(f.read())
print('======================')
print('')
# Otherwise, fail the test. Keep the shader file around so we can inspect.
if not keep:
remove_file(glsl)
sys.exit(1)
else:
remove_file(glsl)
else:
print('Found new shader {}. Placing generated source code in {}'.format(joined_path, reference))
make_reference_dir(reference)
shutil.move(glsl, reference)
def shader_is_vulkan(shader):
return '.vk.' in shader
def shader_is_desktop(shader):
return '.desktop.' in shader
def shader_is_eliminate_dead_variables(shader):
return '.noeliminate.' not in shader
def shader_is_spirv(shader):
return '.asm.' in shader
def shader_is_invalid_spirv(shader):
return '.invalid.' in shader
def shader_is_legacy(shader):
return '.legacy.' in shader
def shader_is_flatten_ubo(shader):
return '.flatten.' in shader
def shader_is_sso(shader):
return '.sso.' in shader
def shader_is_flatten_dimensions(shader):
return '.flatten_dim.' in shader
def shader_is_noopt(shader):
return '.noopt.' in shader
def test_shader(stats, shader, update, keep, opt):
joined_path = os.path.join(shader[0], shader[1])
vulkan = shader_is_vulkan(shader[1])
desktop = shader_is_desktop(shader[1])
eliminate = shader_is_eliminate_dead_variables(shader[1])
is_spirv = shader_is_spirv(shader[1])
invalid_spirv = shader_is_invalid_spirv(shader[1])
is_legacy = shader_is_legacy(shader[1])
flatten_ubo = shader_is_flatten_ubo(shader[1])
sso = shader_is_sso(shader[1])
flatten_dim = shader_is_flatten_dimensions(shader[1])
noopt = shader_is_noopt(shader[1])
print('Testing shader:', joined_path)
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt and (not noopt))
# Only test GLSL stats if we have a shader | |
'.6e'), '# cluPh1322'],
[1280, format(angle(scaled_wc('lu_1332')), '.6e'), '# cluPh1332'],
[1281, format(angle(scaled_wc('lu_1323')), '.6e'), '# cluPh1323'],
[1282, format(angle(scaled_wc('lu_1333')), '.6e'), '# cluPh1333'],
[1283, format(angle(scaled_wc('lu_2213')), '.6e'), '# cluPh2213'],
[1284, format(angle(scaled_wc('lu_2223')), '.6e'), '# cluPh2223'],
[1285, format(angle(scaled_wc('lu_2311')), '.6e'), '# cluPh2311'],
[1286, format(angle(scaled_wc('lu_2312')), '.6e'), '# cluPh2312'],
[1287, format(angle(scaled_wc('lu_2313')), '.6e'), '# cluPh2313'],
[1288, format(angle(scaled_wc('lu_2321')), '.6e'), '# cluPh2321'],
[1289, format(angle(scaled_wc('lu_2322')), '.6e'), '# cluPh2322'],
[1290, format(angle(scaled_wc('lu_2323')), '.6e'), '# cluPh2323'],
[1291, format(angle(scaled_wc('lu_2331')), '.6e'), '# cluPh2331'],
[1292, format(angle(scaled_wc('lu_2332')), '.6e'), '# cluPh2332'],
[1293, format(angle(scaled_wc('lu_2333')), '.6e'), '# cluPh2333'],
[1294, format(angle(scaled_wc('lu_3323')), '.6e'), '# cluPh3323'],
[1295, format(angle(scaled_wc('lu_3312')), '.6e'), '# cluPh3312'],
[1296, format(angle(scaled_wc('lu_3313')), '.6e'), '# cluPh3313'],
[1297, format(scaled_wc('ld_1111')* lambda_smeft_value**2, '.6e'), '# cldAbs1111'],
[1298, format(abs(scaled_wc('ld_1112'))* lambda_smeft_value**2, '.6e'), '# cldAbs1112'],
[1299, format(abs(scaled_wc('ld_1113'))* lambda_smeft_value**2, '.6e'), '# cldAbs1113'],
[1300, format(abs(scaled_wc('ld_1123'))* lambda_smeft_value**2, '.6e'), '# cldAbs1123'],
[1301, format(scaled_wc('ld_1122')* lambda_smeft_value**2, '.6e'), '# cldAbs1122'],
[1302, format(scaled_wc('ld_1133')* lambda_smeft_value**2, '.6e'), '# cldAbs1133'],
[1303, format(abs(scaled_wc('ld_1211'))* lambda_smeft_value**2, '.6e'), '# cldAbs1211'],
[1304, format(abs(scaled_wc('ld_1212'))* lambda_smeft_value**2, '.6e'), '# cldAbs1212'],
[1305, format(abs(scaled_wc('ld_1221'))* lambda_smeft_value**2, '.6e'), '# cldAbs1221'],
[1306, format(abs(scaled_wc('ld_1213'))* lambda_smeft_value**2, '.6e'), '# cldAbs1213'],
[1307, format(abs(scaled_wc('ld_1231'))* lambda_smeft_value**2, '.6e'), '# cldAbs1231'],
[1308, format(abs(scaled_wc('ld_1222'))* lambda_smeft_value**2, '.6e'), '# cldAbs1222'],
[1309, format(abs(scaled_wc('ld_1223'))* lambda_smeft_value**2, '.6e'), '# cldAbs1223'],
[1310, format(abs(scaled_wc('ld_1232'))* lambda_smeft_value**2, '.6e'), '# cldAbs1232'],
[1311, format(abs(scaled_wc('ld_1233'))* lambda_smeft_value**2, '.6e'), '# cldAbs1233'],
[1312, format(abs(scaled_wc('ld_1311'))* lambda_smeft_value**2, '.6e'), '# cldAbs1311'],
[1313, format(abs(scaled_wc('ld_1312'))* lambda_smeft_value**2, '.6e'), '# cldAbs1312'],
[1314, format(abs(scaled_wc('ld_1313'))* lambda_smeft_value**2, '.6e'), '# cldAbs1313'],
[1315, format(abs(scaled_wc('ld_1331'))* lambda_smeft_value**2, '.6e'), '# cldAbs1331'],
[1316, format(abs(scaled_wc('ld_1321'))* lambda_smeft_value**2, '.6e'), '# cldAbs1321'],
[1317, format(abs(scaled_wc('ld_1322'))* lambda_smeft_value**2, '.6e'), '# cldAbs1322'],
[1318, format(abs(scaled_wc('ld_1332'))* lambda_smeft_value**2, '.6e'), '# cldAbs1332'],
[1319, format(abs(scaled_wc('ld_1323'))* lambda_smeft_value**2, '.6e'), '# cldAbs1323'],
[1320, format(abs(scaled_wc('ld_1333'))* lambda_smeft_value**2, '.6e'), '# cldAbs1333'],
[1321, format(scaled_wc('ld_2211')* lambda_smeft_value**2, '.6e'), '# cldAbs2211'],
[1322, format(abs(scaled_wc('ld_2212'))* lambda_smeft_value**2, '.6e'), '# cldAbs2212'],
[1323, format(abs(scaled_wc('ld_2213'))* lambda_smeft_value**2, '.6e'), '# cldAbs2213'],
[1324, format(scaled_wc('ld_2222')* lambda_smeft_value**2, '.6e'), '# cldAbs2222'],
[1325, format(abs(scaled_wc('ld_2223'))* lambda_smeft_value**2, '.6e'), '# cldAbs2223'],
[1326, format(scaled_wc('ld_2233')* lambda_smeft_value**2, '.6e'), '# cldAbs2233'],
[1327, format(abs(scaled_wc('ld_2311'))* lambda_smeft_value**2, '.6e'), '# cldAbs2311'],
[1328, format(abs(scaled_wc('ld_2312'))* lambda_smeft_value**2, '.6e'), '# cldAbs2312'],
[1329, format(abs(scaled_wc('ld_2313'))* lambda_smeft_value**2, '.6e'), '# cldAbs2313'],
[1330, format(abs(scaled_wc('ld_2321'))* lambda_smeft_value**2, '.6e'), '# cldAbs2321'],
[1331, format(abs(scaled_wc('ld_2322'))* lambda_smeft_value**2, '.6e'), '# cldAbs2322'],
[1332, format(abs(scaled_wc('ld_2323'))* lambda_smeft_value**2, '.6e'), '# cldAbs2323'],
[1333, format(abs(scaled_wc('ld_2331'))* lambda_smeft_value**2, '.6e'), '# cldAbs2331'],
[1334, format(abs(scaled_wc('ld_2332'))* lambda_smeft_value**2, '.6e'), '# cldAbs2332'],
[1335, format(abs(scaled_wc('ld_2333'))* lambda_smeft_value**2, '.6e'), '# cldAbs2333'],
[1336, format(scaled_wc('ld_3311')* lambda_smeft_value**2, '.6e'), '# cldAbs3311'],
[1337, format(abs(scaled_wc('ld_3312'))* lambda_smeft_value**2, '.6e'), '# cldAbs3312'],
[1338, format(abs(scaled_wc('ld_3313'))* lambda_smeft_value**2, '.6e'), '# cldAbs3313'],
[1339, format(scaled_wc('ld_3322')* lambda_smeft_value**2, '.6e'), '# cldAbs3322'],
[1340, format(scaled_wc('ld_3333')* lambda_smeft_value**2, '.6e'), '# cldAbs3333'],
[1341, format(abs(scaled_wc('ld_3323'))* lambda_smeft_value**2, '.6e'), '# cldAbs3323'],
[1342, format(angle(scaled_wc('ld_1112')), '.6e'), '# cldPh1112'],
[1343, format(angle(scaled_wc('ld_2212')), '.6e'), '# cldPh2212'],
[1344, format(angle(scaled_wc('ld_1113')), '.6e'), '# cldPh1113'],
[1345, format(angle(scaled_wc('ld_1123')), '.6e'), '# cldPh1123'],
[1346, format(angle(scaled_wc('ld_1211')), '.6e'), '# cldPh1211'],
[1347, format(angle(scaled_wc('ld_1212')), '.6e'), '# cldPh1212'],
[1348, format(angle(scaled_wc('ld_1221')), '.6e'), '# cldPh1221'],
[1349, format(angle(scaled_wc('ld_1213')), '.6e'), '# cldPh1213'],
[1350, format(angle(scaled_wc('ld_1231')), '.6e'), '# cldPh1231'],
[1351, format(angle(scaled_wc('ld_1222')), '.6e'), '# cldPh1222'],
[1352, format(angle(scaled_wc('ld_1223')), '.6e'), '# cldPh1223'],
[1353, format(angle(scaled_wc('ld_1232')), '.6e'), '# cldPh1232'],
[1354, format(angle(scaled_wc('ld_1233')), '.6e'), '# cldPh1233'],
[1355, format(angle(scaled_wc('ld_1311')), '.6e'), '# cldPh1311'],
[1356, format(angle(scaled_wc('ld_1312')), '.6e'), '# cldPh1312'],
[1357, format(angle(scaled_wc('ld_1313')), '.6e'), '# cldPh1313'],
[1358, format(angle(scaled_wc('ld_1331')), '.6e'), '# cldPh1331'],
[1359, format(angle(scaled_wc('ld_1321')), '.6e'), '# cldPh1321'],
[1360, format(angle(scaled_wc('ld_1322')), '.6e'), '# cldPh1322'],
[1361, format(angle(scaled_wc('ld_1332')), '.6e'), '# cldPh1332'],
[1362, format(angle(scaled_wc('ld_1323')), '.6e'), '# cldPh1323'],
[1363, format(angle(scaled_wc('ld_1333')), '.6e'), '# cldPh1333'],
[1364, format(angle(scaled_wc('ld_2213')), '.6e'), '# cldPh2213'],
[1365, format(angle(scaled_wc('ld_2223')), '.6e'), '# cldPh2223'],
[1366, format(angle(scaled_wc('ld_2311')), '.6e'), '# cldPh2311'],
[1367, format(angle(scaled_wc('ld_2312')), '.6e'), '# cldPh2312'],
[1368, format(angle(scaled_wc('ld_2313')), '.6e'), '# cldPh2313'],
[1369, format(angle(scaled_wc('ld_2321')), '.6e'), '# cldPh2321'],
[1370, format(angle(scaled_wc('ld_2322')), '.6e'), '# cldPh2322'],
[1371, format(angle(scaled_wc('ld_2323')), '.6e'), '# cldPh2323'],
[1372, format(angle(scaled_wc('ld_2331')), '.6e'), '# cldPh2331'],
[1373, format(angle(scaled_wc('ld_2332')), '.6e'), '# cldPh2332'],
[1374, format(angle(scaled_wc('ld_2333')), '.6e'), '# cldPh2333'],
[1375, format(angle(scaled_wc('ld_3323')), '.6e'), '# cldPh3323'],
[1376, format(angle(scaled_wc('ld_3312')), '.6e'), '# cldPh3312'],
[1377, format(angle(scaled_wc('ld_3313')), '.6e'), '# cldPh3313'],
[1378, format(scaled_wc('qe_1111')* lambda_smeft_value**2, '.6e'), '# cqeAbs1111'],
[1379, format(abs(scaled_wc('qe_1112'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1112'],
[1380, format(abs(scaled_wc('qe_1113'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1113'],
[1381, format(abs(scaled_wc('qe_1123'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1123'],
[1382, format(scaled_wc('qe_1122')* lambda_smeft_value**2, '.6e'), '# cqeAbs1122'],
[1383, format(scaled_wc('qe_1133')* lambda_smeft_value**2, '.6e'), '# cqeAbs1133'],
[1384, format(abs(scaled_wc('qe_1211'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1211'],
[1385, format(abs(scaled_wc('qe_1212'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1212'],
[1386, format(abs(scaled_wc('qe_1221'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1221'],
[1387, format(abs(scaled_wc('qe_1213'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1213'],
[1388, format(abs(scaled_wc('qe_1231'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1231'],
[1389, format(abs(scaled_wc('qe_1222'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1222'],
[1390, format(abs(scaled_wc('qe_1223'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1223'],
[1391, format(abs(scaled_wc('qe_1232'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1232'],
[1392, format(abs(scaled_wc('qe_1233'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1233'],
[1393, format(abs(scaled_wc('qe_1311'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1311'],
[1394, format(abs(scaled_wc('qe_1312'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1312'],
[1395, format(abs(scaled_wc('qe_1313'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1313'],
[1396, format(abs(scaled_wc('qe_1331'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1331'],
[1397, format(abs(scaled_wc('qe_1321'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1321'],
[1398, format(abs(scaled_wc('qe_1322'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1322'],
[1399, format(abs(scaled_wc('qe_1332'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1332'],
[1400, format(abs(scaled_wc('qe_1323'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1323'],
[1401, format(abs(scaled_wc('qe_1333'))* lambda_smeft_value**2, '.6e'), '# cqeAbs1333'],
[1402, format(scaled_wc('qe_2211')* lambda_smeft_value**2, '.6e'), '# cqeAbs2211'],
[1403, format(abs(scaled_wc('qe_2212'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2212'],
[1404, format(abs(scaled_wc('qe_2213'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2213'],
[1405, format(scaled_wc('qe_2222')* lambda_smeft_value**2, '.6e'), '# cqeAbs2222'],
[1406, format(abs(scaled_wc('qe_2223'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2223'],
[1407, format(scaled_wc('qe_2233')* lambda_smeft_value**2, '.6e'), '# cqeAbs2233'],
[1408, format(abs(scaled_wc('qe_2311'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2311'],
[1409, format(abs(scaled_wc('qe_2312'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2312'],
[1410, format(abs(scaled_wc('qe_2313'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2313'],
[1411, format(abs(scaled_wc('qe_2321'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2321'],
[1412, format(abs(scaled_wc('qe_2322'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2322'],
[1413, format(abs(scaled_wc('qe_2323'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2323'],
[1414, format(abs(scaled_wc('qe_2331'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2331'],
[1415, format(abs(scaled_wc('qe_2332'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2332'],
[1416, format(abs(scaled_wc('qe_2333'))* lambda_smeft_value**2, '.6e'), '# cqeAbs2333'],
[1417, format(scaled_wc('qe_3311')* lambda_smeft_value**2, '.6e'), '# cqeAbs3311'],
[1418, format(abs(scaled_wc('qe_3312'))* lambda_smeft_value**2, '.6e'), '# cqeAbs3312'],
[1419, format(abs(scaled_wc('qe_3313'))* lambda_smeft_value**2, '.6e'), '# cqeAbs3313'],
[1420, format(scaled_wc('qe_3322')* lambda_smeft_value**2, '.6e'), '# cqeAbs3322'],
[1421, format(scaled_wc('qe_3333')* lambda_smeft_value**2, '.6e'), '# cqeAbs3333'],
[1422, format(abs(scaled_wc('qe_3323'))* lambda_smeft_value**2, '.6e'), '# cqeAbs3323'],
[1423, format(angle(scaled_wc('qe_1112')), '.6e'), '# cqePh1112'],
[1424, format(angle(scaled_wc('qe_2212')), '.6e'), '# cqePh2212'],
[1425, format(angle(scaled_wc('qe_1113')), '.6e'), '# cqePh1113'],
[1426, format(angle(scaled_wc('qe_1123')), '.6e'), '# cqePh1123'],
[1427, format(angle(scaled_wc('qe_1211')), '.6e'), '# cqePh1211'],
[1428, format(angle(scaled_wc('qe_1212')), '.6e'), '# cqePh1212'],
[1429, format(angle(scaled_wc('qe_1221')), '.6e'), '# cqePh1221'],
[1430, format(angle(scaled_wc('qe_1213')), '.6e'), '# cqePh1213'],
[1431, format(angle(scaled_wc('qe_1231')), '.6e'), '# cqePh1231'],
[1432, format(angle(scaled_wc('qe_1222')), '.6e'), '# cqePh1222'],
[1433, format(angle(scaled_wc('qe_1223')), '.6e'), '# cqePh1223'],
[1434, format(angle(scaled_wc('qe_1232')), '.6e'), '# cqePh1232'],
[1435, format(angle(scaled_wc('qe_1233')), '.6e'), '# cqePh1233'],
[1436, format(angle(scaled_wc('qe_1311')), '.6e'), '# cqePh1311'],
[1437, format(angle(scaled_wc('qe_1312')), '.6e'), '# cqePh1312'],
[1438, format(angle(scaled_wc('qe_1313')), '.6e'), '# cqePh1313'],
[1439, format(angle(scaled_wc('qe_1331')), '.6e'), '# cqePh1331'],
[1440, format(angle(scaled_wc('qe_1321')), '.6e'), '# cqePh1321'],
[1441, format(angle(scaled_wc('qe_1322')), '.6e'), '# cqePh1322'],
[1442, format(angle(scaled_wc('qe_1332')), '.6e'), '# cqePh1332'],
[1443, format(angle(scaled_wc('qe_1323')), '.6e'), '# cqePh1323'],
[1444, format(angle(scaled_wc('qe_1333')), '.6e'), '# cqePh1333'],
[1445, format(angle(scaled_wc('qe_2213')), '.6e'), '# cqePh2213'],
[1446, format(angle(scaled_wc('qe_2223')), '.6e'), '# cqePh2223'],
[1447, format(angle(scaled_wc('qe_2311')), '.6e'), '# cqePh2311'],
[1448, format(angle(scaled_wc('qe_2312')), '.6e'), '# cqePh2312'],
[1449, format(angle(scaled_wc('qe_2313')), '.6e'), '# cqePh2313'],
[1450, format(angle(scaled_wc('qe_2321')), '.6e'), '# cqePh2321'],
[1451, format(angle(scaled_wc('qe_2322')), '.6e'), '# cqePh2322'],
[1452, format(angle(scaled_wc('qe_2323')), '.6e'), '# cqePh2323'],
[1453, format(angle(scaled_wc('qe_2331')), '.6e'), '# cqePh2331'],
[1454, format(angle(scaled_wc('qe_2332')), '.6e'), '# cqePh2332'],
[1455, format(angle(scaled_wc('qe_2333')), '.6e'), '# cqePh2333'],
[1456, format(angle(scaled_wc('qe_3323')), '.6e'), '# cqePh3323'],
[1457, format(angle(scaled_wc('qe_3312')), '.6e'), '# cqePh3312'],
[1458, format(angle(scaled_wc('qe_3313')), '.6e'), '# cqePh3313'],
]}
card['Block']['FRBlock10'] = {'values': [
[1, 1, format(angle(scaled_wc('uphi_11')), '.6e'), '# cuHPh1x1'],
[1, 2, format(angle(scaled_wc('uphi_12')), '.6e'), '# cuHPh1x2'],
[1, 3, format(angle(scaled_wc('uphi_13')), '.6e'), '# cuHPh1x3'],
[2, 1, format(angle(scaled_wc('uphi_21')), '.6e'), '# cuHPh2x1'],
[2, 2, format(angle(scaled_wc('uphi_22')), '.6e'), '# cuHPh2x2'],
[2, 3, format(angle(scaled_wc('uphi_23')), '.6e'), '# cuHPh2x3'],
[3, 1, format(angle(scaled_wc('uphi_31')), '.6e'), '# cuHPh3x1'],
[3, 2, format(angle(scaled_wc('uphi_32')), '.6e'), '# cuHPh3x2'],
[3, 3, format(angle(scaled_wc('uphi_33')), '.6e'), '# cuHPh3x3'],
]}
card['Block']['FRBlock11'] = {'values': [
[1, 1, format(angle(scaled_wc('dphi_11')), '.6e'), '# cdHPh1x1'],
[1, 2, format(angle(scaled_wc('dphi_12')), '.6e'), '# cdHPh1x2'],
[1, 3, format(angle(scaled_wc('dphi_13')), '.6e'), '# cdHPh1x3'],
[2, 1, format(angle(scaled_wc('dphi_21')), '.6e'), '# cdHPh2x1'],
[2, 2, format(angle(scaled_wc('dphi_22')), '.6e'), '# cdHPh2x2'],
[2, 3, format(angle(scaled_wc('dphi_23')), '.6e'), '# cdHPh2x3'],
[3, 1, format(angle(scaled_wc('dphi_31')), '.6e'), '# cdHPh3x1'],
[3, 2, format(angle(scaled_wc('dphi_32')), '.6e'), '# cdHPh3x2'],
[3, 3, format(angle(scaled_wc('dphi_33')), '.6e'), '# cdHPh3x3'],
]}
card['Block']['FRBlock15'] = {'values': [
[1, 1, format(abs(scaled_wc('eW_11'))* lambda_smeft_value**2, '.6e'), '# ceWAbs1x1'],
[1, 2, format(abs(scaled_wc('eW_12'))* lambda_smeft_value**2, '.6e'), '# ceWAbs1x2'],
[1, 3, format(abs(scaled_wc('eW_13'))* lambda_smeft_value**2, '.6e'), '# ceWAbs1x3'],
[2, 1, format(abs(scaled_wc('eW_21'))* lambda_smeft_value**2, '.6e'), '# ceWAbs2x1'],
[2, 2, format(abs(scaled_wc('eW_22'))* lambda_smeft_value**2, '.6e'), '# ceWAbs2x2'],
[2, 3, format(abs(scaled_wc('eW_23'))* lambda_smeft_value**2, '.6e'), '# ceWAbs2x3'],
[3, 1, format(abs(scaled_wc('eW_31'))* lambda_smeft_value**2, '.6e'), '# ceWAbs3x1'],
[3, 2, format(abs(scaled_wc('eW_32'))* lambda_smeft_value**2, '.6e'), '# ceWAbs3x2'],
[3, 3, format(abs(scaled_wc('eW_33'))* lambda_smeft_value**2, '.6e'), '# ceWAbs3x3'],
]}
card['Block']['FRBlock16'] = {'values': [
[1, 1, format(angle(scaled_wc('eW_11')), '.6e'), '# ceWPh1x1'],
[1, 2, format(angle(scaled_wc('eW_12')), '.6e'), '# ceWPh1x2'],
[1, 3, format(angle(scaled_wc('eW_13')), '.6e'), '# ceWPh1x3'],
[2, 1, format(angle(scaled_wc('eW_21')), '.6e'), '# ceWPh2x1'],
[2, 2, format(angle(scaled_wc('eW_22')), '.6e'), '# ceWPh2x2'],
[2, 3, format(angle(scaled_wc('eW_23')), '.6e'), '# ceWPh2x3'],
[3, 1, format(angle(scaled_wc('eW_31')), '.6e'), '# ceWPh3x1'],
[3, 2, format(angle(scaled_wc('eW_32')), '.6e'), '# ceWPh3x2'],
[3, 3, format(angle(scaled_wc('eW_33')), '.6e'), '# ceWPh3x3'],
]}
card['Block']['FRBlock18'] = {'values': [
[1, 1, format(abs(scaled_wc('eB_11'))* lambda_smeft_value**2, '.6e'), '# ceBAbs1x1'],
[1, 2, format(abs(scaled_wc('eB_12'))* lambda_smeft_value**2, '.6e'), '# ceBAbs1x2'],
[1, 3, format(abs(scaled_wc('eB_13'))* lambda_smeft_value**2, '.6e'), '# ceBAbs1x3'],
[2, 1, format(abs(scaled_wc('eB_21'))* lambda_smeft_value**2, '.6e'), '# ceBAbs2x1'],
[2, 2, format(abs(scaled_wc('eB_22'))* lambda_smeft_value**2, '.6e'), '# ceBAbs2x2'],
[2, 3, format(abs(scaled_wc('eB_23'))* lambda_smeft_value**2, '.6e'), '# ceBAbs2x3'],
[3, 1, format(abs(scaled_wc('eB_31'))* lambda_smeft_value**2, '.6e'), '# ceBAbs3x1'],
[3, 2, format(abs(scaled_wc('eB_32'))* lambda_smeft_value**2, '.6e'), '# ceBAbs3x2'],
[3, 3, format(abs(scaled_wc('eB_33'))* lambda_smeft_value**2, '.6e'), '# ceBAbs3x3'],
]}
card['Block']['FRBlock19'] = {'values': [
[1, 1, format(angle(scaled_wc('eB_11')), '.6e'), '# ceBPh1x1'],
[1, 2, format(angle(scaled_wc('eB_12')), '.6e'), '# ceBPh1x2'],
[1, 3, format(angle(scaled_wc('eB_13')), '.6e'), '# ceBPh1x3'],
[2, 1, format(angle(scaled_wc('eB_21')), '.6e'), '# ceBPh2x1'],
[2, 2, format(angle(scaled_wc('eB_22')), '.6e'), '# ceBPh2x2'],
[2, 3, format(angle(scaled_wc('eB_23')), '.6e'), '# ceBPh2x3'],
[3, 1, format(angle(scaled_wc('eB_31')), '.6e'), '# ceBPh3x1'],
[3, 2, format(angle(scaled_wc('eB_32')), '.6e'), '# ceBPh3x2'],
[3, 3, format(angle(scaled_wc('eB_33')), '.6e'), '# ceBPh3x3'],
]}
card['Block']['FRBlock21'] = {'values': [
[1, 1, format(abs(scaled_wc('uG_11'))* lambda_smeft_value**2, '.6e'), '# cuGAbs1x1'],
[1, 2, format(abs(scaled_wc('uG_12'))* lambda_smeft_value**2, '.6e'), '# cuGAbs1x2'],
[1, 3, format(abs(scaled_wc('uG_13'))* lambda_smeft_value**2, '.6e'), '# cuGAbs1x3'],
[2, 1, format(abs(scaled_wc('uG_21'))* lambda_smeft_value**2, '.6e'), '# cuGAbs2x1'],
[2, 2, format(abs(scaled_wc('uG_22'))* lambda_smeft_value**2, '.6e'), '# cuGAbs2x2'],
[2, 3, format(abs(scaled_wc('uG_23'))* lambda_smeft_value**2, '.6e'), '# cuGAbs2x3'],
[3, 1, format(abs(scaled_wc('uG_31'))* lambda_smeft_value**2, '.6e'), '# cuGAbs3x1'],
[3, 2, format(abs(scaled_wc('uG_32'))* lambda_smeft_value**2, '.6e'), '# cuGAbs3x2'],
[3, 3, format(abs(scaled_wc('uG_33'))* lambda_smeft_value**2, '.6e'), '# cuGAbs3x3'],
]}
card['Block']['FRBlock22'] = {'values': [
[1, 1, format(angle(scaled_wc('uG_11')), '.6e'), '# cuGPh1x1'],
[1, 2, format(angle(scaled_wc('uG_12')), '.6e'), '# cuGPh1x2'],
[1, 3, format(angle(scaled_wc('uG_13')), '.6e'), '# cuGPh1x3'],
[2, 1, format(angle(scaled_wc('uG_21')), '.6e'), '# cuGPh2x1'],
[2, 2, | |
for path: ['S', 'W', 'W', 'W', 'S', 'E', 'E', 'S', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D4 E1 E4 F1 G1 I4 J4
# Score 6 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E']
# D2 D4 D7 E7 F5 H7
# Score 6 for path: ['E', 'S', 'W', 'W', 'W', 'S', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D2 D4 E4 F1 F4 I2
# Score 6 for path: ['E', 'S', 'W', 'W', 'W', 'S', 'E', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S']
# C2 D4 E1 E4 F1 I4
# Score 6 for path: ['E', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E']
# D2 D4 D5 D7 F5 H7
# Score 6 for path: ['W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E']
# E2 E7 F2 F7 G5 H5
# Score 7 for path: ['W', 'S', 'W', 'W', 'S', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E', 'E']
# E2 E4 F4 G1 G4 H1 J2
# Score 6 for path: ['W', 'S', 'W', 'W', 'S', 'E', 'S', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E', 'E']
# E4 F1 F4 G1 H1 J4
# Score 6 for path: ['W', 'S', 'W', 'W', 'S', 'E', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D2 E4 F1 F4 G1 J4
# Score 6 for path: ['N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 B4 B5 D5 E5 F7
# Score 6 for path: ['S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# A5 A7 B4 B5 D5 F5
# Score 6 for path: ['S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# A5 A7 B4 B5 D5 D6
# Score 6 for path: ['S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'W', 'W', 'W', 'N', 'N', 'E']
# A6 A7 B4 D5 D6 G4
# Score 6 for path: ['N', 'E', 'N', 'W', 'W', 'W', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S']
# C8 C9 E9 E10 F7 H7
# Score 6 for path: ['W', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'S', 'S']
# D8 E7 E8 F7 I8 I10
# Score 6 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'S', 'S', 'E']
# A8 C8 C9 C10 F7 F9
# Score 7 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'S', 'E', 'S']
# A8 A9 C9 C10 D7 F7 F9
# Score 6 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'E', 'S', 'S']
# A9 C10 D7 D8 F7 F9
# Score 6 for path: ['N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N', 'N', 'N', 'E', 'N']
# A7 A8 C9 C10 F7 F8
# Score 6 for path: ['N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N', 'N', 'E', 'N', 'N']
# A7 A8 C9 C10 F7 F8
# Score 6 for path: ['N', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'S', 'E', 'S']
# A8 A9 C9 C10 D7 F7
# Score 6 for path: ['S', 'E', 'S', 'S', 'S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'N']
# A1 A2 B4 C1 C2 E2
# checkExhaustivePathsWithAutoRepairPatterns(20, 4)
# Score 4 for path: ['S', 'S', 'S', 'E', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'S', 'S', 'E', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'S', 'S', 'E', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'S', 'S', 'E', 'E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'S', 'S', 'E', 'E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'S', 'S', 'E', 'E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E']
# D4 E2 F2 G2
# Score 4 for path: ['S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E']
# C1 C2 E2 G4
# Score 4 for path: ['E', 'N', 'W', 'N', 'E', 'N', 'W', 'W', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E', 'S', 'S', 'E', 'S']
# E5 F5 F6 I4
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'W', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'N', 'E', 'N']
# D4 E7 F5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'E', 'N', 'N']
# D4 E7 F5 H7
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['E', | |
+ database._cacheDB['ftpd'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['ftpd'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'netflow'):
if (len( database._cacheDB['netflow']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['netflow']:
particle = "\"" + str(database._cacheDB['netflow'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['netflow'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['netflow'][i]._datatime + "\", "
particle = particle + "\"duration\" : \"" + database._cacheDB['netflow'][i]._duration + "\", "
particle = particle + "\"protocol\" : \"" + database._cacheDB['netflow'][i]._protocol + "\", "
particle = particle + "\"sourceip\" : \"" + database._cacheDB['netflow'][i]._sourceip + "\", "
particle = particle + "\"sourceport\" : \"" + database._cacheDB['netflow'][i]._sourceport + "\", "
particle = particle + "\"destinationip\" : \"" + database._cacheDB['netflow'][i]._destinationip + "\", "
particle = particle + "\"destport\" : \"" + database._cacheDB['netflow'][i]._destport + "\", "
particle = particle + "\"packets\" : \"" + database._cacheDB['netflow'][i]._packets + "\", "
particle = particle + "\"pbytes\" : \"" + database._cacheDB['netflow'][i]._pbytes + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['netflow'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['netflow'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'evt'):
if (len( database._cacheDB['evt']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['evt']:
particle = "\"" + str(database._cacheDB['evt'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['evt'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['evt'][i]._datatime + "\", "
particle = particle + "\"source\" : \"" + database._cacheDB['evt'][i]._source + "\", "
particle = particle + "\"eventid\" : \"" + database._cacheDB['evt'][i]._eventid + "\", "
particle = particle + "\"evtdatetime\" : \"" + database._cacheDB['evt'][i]._evtdatetime + "\", "
particle = particle + "\"user\" : \"" + database._cacheDB['evt'][i]._user.replace("\\","\\\\") + "\", "
particle = particle + "\"computer\" : \"" + database._cacheDB['evt'][i]._computer + "\", "
particle = particle + "\"processid\" : \"" + database._cacheDB['evt'][i]._processid + "\", "
particle = particle + "\"threadid\" : \"" + database._cacheDB['evt'][i]._threadid + "\", "
particle = particle + "\"keywords\" : \"" + database._cacheDB['evt'][i]._keywords + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['evt'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['evt'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'ipv4'):
if (len( database._cacheDB['ipv4']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['ipv4']:
particle = "\"" + str(database._cacheDB['ipv4'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['ipv4'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['ipv4'][i]._datatime + "\", "
particle = particle + "\"version\" : \"" + database._cacheDB['ipv4'][i]._version + "\", "
particle = particle + "\"ihl\" : \"" + database._cacheDB['ipv4'][i]._ihl + "\", "
particle = particle + "\"tos\" : \"" + database._cacheDB['ipv4'][i]._tos + "\", "
particle = particle + "\"tlen\" : \"" + database._cacheDB['ipv4'][i]._tlen + "\", "
particle = particle + "\"packident\" : \"" + database._cacheDB['ipv4'][i]._packident + "\", "
particle = particle + "\"fragoff\" : \"" + database._cacheDB['ipv4'][i]._fragoff + "\", "
particle = particle + "\"ttl\" : \"" + database._cacheDB['ipv4'][i]._ttl + "\", "
particle = particle + "\"protocol\" : \"" + database._cacheDB['ipv4'][i]._protocol + "\", "
particle = particle + "\"hcs\" : \"" + database._cacheDB['ipv4'][i]._hcs + "\", "
particle = particle + "\"sourceip\" : \"" + database._cacheDB['ipv4'][i]._sourceip + "\", "
particle = particle + "\"destinationip\" : \"" + database._cacheDB['ipv4'][i]._destinationip + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['ipv4'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['ipv4'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'tcp'):
if (len( database._cacheDB['tcp']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['tcp']:
particle = "\"" + str(database._cacheDB['tcp'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['tcp'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['tcp'][i]._datatime + "\", "
particle = particle + "\"sourport\" : \"" + database._cacheDB['tcp'][i]._sourport + "\", "
particle = particle + "\"destport\" : \"" + database._cacheDB['tcp'][i]._destport + "\", "
particle = particle + "\"sequnum\" : \"" + database._cacheDB['tcp'][i]._sequnum + "\", "
particle = particle + "\"acknum\" : \"" + database._cacheDB['tcp'][i]._acknum + "\", "
particle = particle + "\"winsize\" : \"" + database._cacheDB['tcp'][i]._winsize + "\", "
particle = particle + "\"checksum\" : \"" + database._cacheDB['tcp'][i]._checksum + "\", "
particle = particle + "\"urgptr\" : \"" + database._cacheDB['tcp'][i]._urgptr + "\", "
particle = particle + "\"dataoffset\" : \"" + database._cacheDB['tcp'][i]._dataoffset + "\", "
particle = particle + "\"ackflag\" : \"" + database._cacheDB['tcp'][i]._ackflag + "\", "
particle = particle + "\"cwrflag\" : \"" + database._cacheDB['tcp'][i]._cwrflag + "\", "
particle = particle + "\"synflag\" : \"" + database._cacheDB['tcp'][i]._synflag + "\", "
particle = particle + "\"pushflag\" : \"" + database._cacheDB['tcp'][i]._pushflag + "\", "
particle = particle + "\"ecnflag\" : \"" + database._cacheDB['tcp'][i]._ecnflag + "\", "
particle = particle + "\"finflag\" : \"" + database._cacheDB['tcp'][i]._finflag + "\", "
particle = particle + "\"rstflag\" : \"" + database._cacheDB['tcp'][i]._rstflag + "\", "
particle = particle + "\"urgflag\" : \"" + database._cacheDB['tcp'][i]._urgflag + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['tcp'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['tcp'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'udp'):
if (len( database._cacheDB['udp']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['udp']:
particle = "\"" + str(database._cacheDB['udp'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['udp'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['udp'][i]._datatime + "\", "
particle = particle + "\"checksum\" : \"" + database._cacheDB['udp'][i]._checksum + "\", "
particle = particle + "\"length\" : \"" + database._cacheDB['udp'][i]._length + "\", "
particle = particle + "\"sourport\" : \"" + database._cacheDB['udp'][i]._sourport + "\", "
particle = particle + "\"destport\" : \"" + database._cacheDB['udp'][i]._destport + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['udp'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['udp'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result = "{ \"retrunValue\" : \"True\", \"data\" : " + particles +" } }"
#
#
#
if (command[1] == 'icmp'):
if (len( database._cacheDB['icmp']) == 0):
result = "{ \"retrunValue\" : \"True\", \"data\" : \"NONE\" }"
else:
for i in database._cacheDB['icmp']:
particle = "\"" + str(database._cacheDB['icmp'][i]._ident) + "\" : "
particle = particle + "{ \"identifier\" : \"" + str(database._cacheDB['icmp'][i]._ident) + "\", "
particle = particle + "\"datetime\" : \"" + database._cacheDB['icmp'][i]._datatime + "\", "
particle = particle + "\"type\" : \"" + database._cacheDB['icmp'][i]._type + "\", "
particle = particle + "\"code\" : \"" + database._cacheDB['icmp'][i]._code + "\", "
particle = particle + "\"checksum\" : \"" + database._cacheDB['icmp'][i]._checksum + "\", "
#
particle = particle + "\"subtype_id\" : \"" + database._cacheDB['icmp'][i]._sub_id + "\", "
particle = particle + "\"super_id\" : \"" + database._cacheDB['icmp'][i]._super_id + "\"}, "
particles = particles + particle
particles = particles[0:(len(particles)-2)]
result | |
* defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_DREADNOUGHT: {
'setShipClass': NAVY_DREADNOUGHT,
'modelClass': WARSHIPL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 3),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L3] * 14,
'leftBroadsides': [
Cannons.L4] * 9,
'rightBroadsides': [
Cannons.L4] * 9,
'broadsideAmmo': InventoryType.CannonRoundShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': 0,
'hp': 2100,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 14,
'maxBroadsides': 20,
'rammingPower': 900,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_ELITE: {
'setShipClass': NAVY_ELITE,
'modelClass': WARSHIPL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 3),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L3] * 14,
'leftBroadsides': [
Cannons.L4] * 9,
'rightBroadsides': [
Cannons.L4] * 9,
'broadsideAmmo': InventoryType.CannonRoundShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': 0,
'hp': 4200,
'sp': 6000,
'maxCargo': 5,
'maxCrew': 8,
'maxCannons': 14,
'maxBroadsides': 20,
'rammingPower': 900,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_BULWARK: {
'setShipClass': NAVY_BULWARK,
'modelClass': MERCHANTL1,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 1),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 4,
'leftBroadsides': [
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'rightBroadsides': [
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': Prows.Lady,
'hp': 1400,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 6,
'maxCannons': 4,
'maxBroadsides': 10,
'rammingPower': 150,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_VANGUARD: {
'setShipClass': NAVY_VANGUARD,
'modelClass': MERCHANTL2,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 1),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': (Masts.Main_Square, 1),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 6,
'leftBroadsides': [
Cannons.L3] * 5,
'rightBroadsides': [
Cannons.L3] * 5,
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': Prows.Lady,
'hp': 1800,
'sp': 5000,
'maxCargo': 3,
'maxCrew': 10,
'maxCannons': 8,
'maxBroadsides': 18,
'rammingPower': 300,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_MONARCH: {
'setShipClass': NAVY_MONARCH,
'modelClass': MERCHANTL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': (Masts.Main_Square, 2),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 8,
'leftBroadsides': [
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
0,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'rightBroadsides': [
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
0,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1800,
'sp': 5500,
'maxCargo': 3,
'maxCrew': 14,
'maxCannons': 10,
'maxBroadsides': 24,
'rammingPower': 600,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_COLOSSUS: {
'setShipClass': NAVY_COLOSSUS,
'modelClass': MERCHANTL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': (Masts.Main_Square, 3),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 10,
'leftBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'rightBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1800,
'sp': 5500,
'maxCargo': 3,
'maxCrew': 14,
'maxCannons': 10,
'maxBroadsides': 24,
'rammingPower': 600,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_BASTION: {
'setShipClass': NAVY_BASTION,
'modelClass': MERCHANTL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': (Masts.Main_Square, 3),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 10,
'leftBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'rightBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0],
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 3600,
'sp': 5500,
'maxCargo': 5,
'maxCrew': 14,
'maxCannons': 10,
'maxBroadsides': 24,
'rammingPower': 600,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
NAVY_FERRET: {
'setShipClass': NAVY_FERRET,
'modelClass': INTERCEPTORL1,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 2,
'leftBroadsides': [],
'rightBroadsides': [],
'broadsideAmmo': InventoryType.CannonRoundShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': Prows.Lady,
'hp': 1000,
'sp': 3000,
'maxCargo': 1,
'maxCrew': 4,
'maxCannons': 2,
'maxBroadsides': 6,
'rammingPower': 75,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
NAVY_GREYHOUND: {
'setShipClass': NAVY_GREYHOUND,
'modelClass': INTERCEPTORL2,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': 0,
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 4,
'leftBroadsides': [
Cannons.L1] * 3,
'rightBroadsides': [
Cannons.L1] * 3,
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': Prows.Lady,
'hp': 1200,
'sp': 3500,
'maxCargo': 2,
'maxCrew': 8,
'maxCannons': 6,
'maxBroadsides': 10,
'rammingPower': 225,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
NAVY_KINGFISHER: {
'setShipClass': NAVY_KINGFISHER,
'modelClass': INTERCEPTORL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 6,
'leftBroadsides': [
Cannons.L1] * 5,
'rightBroadsides': [
Cannons.L1] * 5,
'broadsideAmmo': InventoryType.CannonRoundShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1200,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 3,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 450,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
NAVY_PREDATOR: {
'setShipClass': NAVY_PREDATOR,
'modelClass': INTERCEPTORL3,
'defaultStyle': Styles.Navy,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.NoLogo,
'cannons': [
Cannons.L1] * 8,
'leftBroadsides': [
Cannons.L1] * 7,
'rightBroadsides': [
Cannons.L1] * 7,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1200,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 3,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 450,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
EITC_CORVETTE: {
'setShipClass': EITC_CORVETTE,
'modelClass': WARSHIPL1,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 1),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L3] * 6,
'leftBroadsides': [
Cannons.L2] * 5,
'rightBroadsides': [
Cannons.L2] * 5,
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': 0,
'hp': 1700,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 3,
'maxCannons': 8,
'maxBroadsides': 10,
'rammingPower': 150,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_MARAUDER: {
'setShipClass': EITC_MARAUDER,
'modelClass': WARSHIPL2,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L3] * 8,
'leftBroadsides': [
Cannons.L2] * 7,
'rightBroadsides': [
Cannons.L2] * 7,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonChainShot,
'prow': 0,
'hp': 2100,
'sp': 5000,
'maxCargo': 3,
'maxCrew': 6,
'maxCannons': 10,
'maxBroadsides': 14,
'rammingPower': 450,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_WARLORD: {
'setShipClass': EITC_WARLORD,
'modelClass': WARSHIPL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 3),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L3] * 12,
'leftBroadsides': [
Cannons.L2] * 9,
'rightBroadsides': [
Cannons.L2] * 9,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': 0,
'hp': 2100,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 14,
'maxBroadsides': 20,
'rammingPower': 900,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_JUGGERNAUT: {
'setShipClass': EITC_JUGGERNAUT,
'modelClass': WARSHIPL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 3),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L3] * 14,
'leftBroadsides': [
Cannons.L2] * 10,
'rightBroadsides': [
Cannons.L2] * 10,
'broadsideAmmo': InventoryType.CannonExplosive,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': 0,
| |
<reponame>ratschlab/spladder<gh_stars>10-100
import numpy as np
import statsmodels.api as sm
import statsmodels.sandbox as sms
import h5py
import sys
import os
import pdb
import pickle
import warnings
import time
import datetime
from scipy.optimize import minimize_scalar
from scipy.special import polygamma
from scipy.stats import chi2,nbinom,scoreatpercentile
import numpy.random as npr
import hashlib
from .alt_splice import quantify
from .testing import likelihood
from . import settings
from .viz import diagnose as plot
import multiprocessing as mp
import signal as sig
from .helpers import log_progress, decodeUTF8, codeUTF8
TIME0 = time.time()
def get_gene_expression(options, fn_out=None, strain_subset=None):
if options.verbose:
sys.stdout.write('Quantifying gene expression ...\n')
### load gene information
genes = pickle.load(open(options.fname_genes, 'rb'), encoding='latin1')[0]
numgenes = genes.shape[0]
### open hdf5 file containing graph count information
IN = h5py.File(options.fname_count_in, 'r')
strains = IN['strains'][:].astype('str')
### sort by strain ID
strain_idx_all = np.argsort(strains)
if strain_subset is None:
strain_idx = strain_idx_all.copy()
else:
strain_idx = strain_idx_all[np.in1d(strains[strain_idx_all], strain_subset)]
gene_counts = np.zeros((numgenes, strain_idx.shape[0]), dtype='float')
gene_names = np.array([x.name for x in genes], dtype='str')
seg_lens = IN['seg_len'][:]
gene_ids_segs = IN['gene_ids_segs'][:].astype('int')
### no longer assume that the gene_ids_segs are sorted by gene ID
s_idx = np.argsort(gene_ids_segs[:, 0], kind='mergesort')
_, u_idx = np.unique(gene_ids_segs[s_idx, 0], return_index=True)
s_idx = s_idx[u_idx]
### iterate over genes
for gidx, iidx in enumerate(s_idx):
if options.verbose:
log_progress(gidx, numgenes, 100)
### get idx of non alternative segments
non_alt_idx = genes[gidx].get_non_alt_seg_ids()
seg_idx = np.arange(iidx, iidx + genes[gidx].segmentgraph.seg_edges.shape[0])
gene_idx = gene_ids_segs[seg_idx, 0]
if len(gene_idx.shape) > 0:
gene_idx = gene_idx[0]
assert(decodeUTF8(IN['gene_names'][:][gene_idx]) == genes[gidx].name)
assert(genes[gidx].name == gene_names[gidx])
if options.non_alt_norm:
seg_idx = seg_idx[non_alt_idx]
### compute gene expression as the read count over all non alternative segments
#gene_counts[gidx, :] = np.dot(IN['segments'][seg_idx, :].T, IN['seg_len'][:][seg_idx]) / np.sum(IN['seg_len'][:][seg_idx])
if seg_idx.shape[0] > 1:
gene_counts[gidx, :] = np.squeeze(np.dot(IN['segments'][seg_idx, :][:, strain_idx].T, seg_lens[seg_idx])) / options.readlen
else:
gene_counts[gidx, :] = IN['segments'][seg_idx[0], :][strain_idx] * seg_lens[seg_idx] / options.readlen
#seg_offset += genes[gidx].segmentgraph.seg_edges.shape[0]
IN.close()
if options.verbose:
sys.stdout.write('\n... done.\n')
### write results to hdf5
if fn_out is not None:
OUT = h5py.File(fn_out, 'w')
OUT.create_dataset(name='all_strains', data=codeUTF8(strains[strain_idx_all]))
OUT.create_dataset(name='strains', data=codeUTF8(strains[strain_idx]))
OUT.create_dataset(name='genes', data=codeUTF8(gene_names))
OUT.create_dataset(name='raw_count', data=gene_counts, compression="gzip")
OUT.close()
return (gene_counts, strains[strain_idx_all], strains[strain_idx], gene_names)
def get_size_factors(gene_counts, options):
if options.verbose:
print('Estimating size factors')
### take geometric mean of counts
gmean = np.exp(np.mean(np.log(gene_counts + 1), axis=1))
size_factors = []
for i in range(gene_counts.shape[1]):
idx = gene_counts[:, i] > 0
size_factors.append(np.median(gene_counts[idx, i] / gmean[idx]))
size_factors = np.array(size_factors, dtype='float')
return size_factors
def re_quantify_events(options):
"""This is more a legacy function for testing that requantifies events on a given graph"""
ev = pickle.load(open(options.fname_events, 'rb'))[0]
cov = quantify.quantify_from_graph(ev, np.arange(1000), 'exon_skip', options, fn_merge=sys.argv[1])
return cov
def estimate_dispersion_chunk(gene_counts, matrix, sf, options, test_idx, idx, log=False):
disp_raw = np.empty((idx.shape[0], 1), dtype='float')
disp_raw.fill(np.nan)
disp_raw_conv = np.zeros((idx.shape[0], 1), dtype='bool')
npr.seed(23)
for i in range(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
disp = 0.1
resp = gene_counts[i, :].astype('int')
if sum(resp / sf) < options.min_count or np.mean(resp == 0) > 0.6 or not test_idx[i]:
continue
for j in range(10):
modNB = sm.GLM(resp, matrix, family=sm.families.NegativeBinomial(alpha=disp), offset=np.log(sf))
result = modNB.fit()
np.set_printoptions(12)
last_disp = disp
yhat = result.mu
sign = -1.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = minimize_scalar(likelihood.adj_loglikelihood_scalar, args=(matrix, resp, yhat, sign), method='Bounded', bounds=(0, 10.0), tol=1e-5)
disp = res.x
if abs(np.log(disp) - np.log(last_disp)) < 1e-4:
disp_raw[i] = disp
disp_raw_conv[i] = True
break
else:
disp_raw[i] = disp
disp_raw_conv[i] = False
if log:
log_progress(idx.shape[0], idx.shape[0])
return (disp_raw, disp_raw_conv, idx)
def estimate_dispersion(gene_counts, matrix, sf, options, test_idx, event_type):
if options.verbose:
print('Estimating raw dispersions')
if options.parallel > 1:
disp_raw = np.empty((gene_counts.shape[0], 1), dtype='float')
disp_raw.fill(np.nan)
disp_raw_conv = np.zeros((gene_counts.shape[0], 1), dtype='bool')
pool = mp.Pool(processes=options.parallel, initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [np.arange(x, min(x + binsize, gene_counts.shape[0])) for x in range(0, gene_counts.shape[0], binsize)]
try:
result = [pool.apply_async(estimate_dispersion_chunk, args=(gene_counts[idx, :], matrix, sf, options, test_idx[idx], idx,)) for idx in idx_chunks]
res_cnt = 0
while result:
tmp = result.pop(0).get()
for i, j in enumerate(tmp[2]):
if options.verbose:
log_progress(res_cnt, gene_counts.shape[0])
res_cnt += 1
disp_raw[j] = tmp[0][i]
disp_raw_conv[j] = tmp[1][i]
if options.verbose:
log_progress(gene_counts.shape[0], gene_counts.shape[0])
print('')
pool.terminate()
pool.join()
except KeyboardInterrupt:
print('Keyboard Interrupt - exiting', file=sys.stderr)
pool.terminate()
pool.join()
sys.exit(1)
else:
(disp_raw, disp_raw_conv, _) = estimate_dispersion_chunk(gene_counts, matrix, sf, options, test_idx, np.arange(gene_counts.shape[0]), log=options.verbose)
if np.sum(disp_raw_conv) == 0:
print('\nERROR: None of the dispersion estimates converged. Exiting.', file=sys.stderr)
sys.exit(1)
if options.diagnose_plots:
plot.mean_variance_plot(counts=gene_counts,
disp=disp_raw,
matrix=matrix,
figtitle='Raw Dispersion Estimate',
filename=os.path.join(options.plot_dir, 'dispersion_raw_%s.%s' % (event_type, options.plot_format)),
options=options)
return (disp_raw, disp_raw_conv)
def fit_dispersion(counts, disp_raw, disp_conv, sf, options, dmatrix1, event_type):
mean_count = np.mean(counts / sf, axis=1)[:, np.newaxis]
index = np.where(disp_conv)[0]
lowerBound = np.percentile(np.unique(disp_raw[index]), 1)
upperBound = np.percentile(np.unique(disp_raw[index]), 99)
idx = np.where((disp_raw > lowerBound) & (disp_raw < upperBound))[0]
matrix = np.ones((idx.shape[0], 2), dtype='float')
matrix[:, 0] /= mean_count[idx].ravel()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
modGamma = sm.GLM(disp_raw[idx], matrix, family=sm.families.Gamma(sm.families.links.identity()))
res = modGamma.fit()
Lambda = res.params
disp_fitted = disp_raw.copy()
ok_idx = np.where(~np.isnan(disp_fitted))[0]
disp_fitted[ok_idx] = Lambda[0] / mean_count[ok_idx] + Lambda[1]
if np.sum(disp_fitted > 0) > 0:
print("\nFound dispersion fit")
if options.diagnose_plots:
plot.mean_variance_plot(counts=counts,
disp=disp_fitted,
matrix=dmatrix1,
figtitle='Fitted Dispersion Estimate',
filename=os.path.join(options.plot_dir, 'dispersion_fitted_%s.%s' % (event_type, options.plot_format)),
options=options)
return (disp_fitted, Lambda, idx)
def adj_loglikelihood_shrink_scalar_onedisper(disp, explanatory, response, yhat, dispFitted, varPrior, sign):
"""
"""
loglik_adj = adj_loglikelihood_scalar(disp, explanatory, response, yhat, 1.0)
logprior = (np.log(disp) - np.log(dispFitted)) ** 2 / (2 * varPrior ** 2)
loglik_adj_shrk = loglik_adj - logprior
return loglik_adj_shrk * sign
def adj_loglikelihood_scalar(disp, X, y, mu, sign):
n = 1 / disp
p = n / (n + mu)
loglik = sum(nbinom.logpmf(y, n, p))
diagVec = mu / (1 + mu * disp)
diagWM = np.diag(diagVec)
xtwx = np.dot(np.dot(X.T, diagWM), X)
coxreid = 0.5 * np.log(np.linalg.det(xtwx))
return (loglik - coxreid) * sign
def adjust_dispersion_chunk(counts, dmatrix1, disp_raw, disp_fitted, varPrior, sf, options, idx, log=False):
disp_adj = np.empty((counts.shape[0], 1))
disp_adj.fill(np.nan)
disp_adj_conv = np.zeros_like(disp_adj, dtype='bool')
error_cnt = 0
for i in range(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
if not np.isnan(disp_raw[i]):
### init dispersion and response
disp = 0.1
resp = counts[i, :].astype('int')
### run for max 10 iterations
for j in range(10):
modNB = sm.GLM(resp, dmatrix1, family=sm.families.NegativeBinomial(alpha=disp), offset=np.log(sf))
result = modNB.fit()
dispBef = disp
yhat = result.mu
sign = -1.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
res = minimize_scalar(adj_loglikelihood_shrink_scalar_onedisper, args=(dmatrix1, resp, yhat, disp_fitted[i], varPrior, sign), method='Bounded', bounds=(0, 10.0), tol=1e-5)
except TypeError:
disp_adj[i] = disp
disp_adj_conv[i] = False
error_cnt += 1
break
disp = res.x
if abs(np.log(disp) - np.log(dispBef)) < 1e-4:
disp_adj[i] = disp
disp_adj_conv[i] = True
break
else:
disp_adj[i] = disp
disp_adj_conv[i] = False
if log:
log_progress(idx.shape[0], idx.shape[0])
print('')
#if error_cnt > 0:
# print 'Warning: %i events did not fit due to a TypeError' % error_cnt
return (disp_adj, disp_adj_conv, idx)
def adjust_dispersion(counts, dmatrix1, disp_raw, disp_fitted, idx, sf, options, event_type):
if options.verbose:
print('Estimating adjusted dispersions.')
varLogDispSamp = polygamma(1, (dmatrix1.shape[0] - dmatrix1.shape[1] ) / 2) ## number of samples - number of coefficients
varPrior = calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp)
if options.parallel > 1:
disp_adj = np.empty((counts.shape[0], 1))
disp_adj.fill(np.nan)
disp_adj_conv = np.zeros_like(disp_adj, dtype='bool')
pool = mp.Pool(processes=options.parallel, initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [np.arange(x, min(x + binsize, counts.shape[0])) for x in range(0, counts.shape[0], binsize)]
try:
result = [pool.apply_async(adjust_dispersion_chunk, args=(counts[cidx, :], dmatrix1, disp_raw[cidx], disp_fitted[cidx], varPrior, sf, options, cidx,)) for cidx in idx_chunks]
res_cnt = 0
while result:
tmp = result.pop(0).get()
for i, j in enumerate(tmp[2]):
if options.verbose:
log_progress(res_cnt, counts.shape[0])
res_cnt += 1
disp_adj[j] = tmp[0][i]
disp_adj_conv[j] = tmp[1][i]
if options.verbose:
log_progress(counts.shape[0], counts.shape[0])
print('')
pool.terminate()
pool.join()
except KeyboardInterrupt:
print('Keyboard Interrupt - exiting', file=sys.stderr)
pool.terminate()
pool.join()
sys.exit(1)
else:
(disp_adj, disp_adj_conv, _) = adjust_dispersion_chunk(counts, dmatrix1, disp_raw, disp_fitted, varPrior, sf, options, np.arange(counts.shape[0]), log=options.verbose)
if options.diagnose_plots:
plot.mean_variance_plot(counts=counts,
disp=disp_adj,
matrix=dmatrix1,
figtitle='Adjusted Dispersion Estimate',
filename=os.path.join(options.plot_dir, 'dispersion_adjusted_%s.%s' % (event_type, options.plot_format)),
options=options)
return (disp_adj, disp_adj_conv)
def test_count_chunk(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, options, test_idx, idx, log=False):
pval = np.zeros((gene_counts.shape[0], 1), dtype='float')
pval.fill(np.nan)
npr.seed(23)
for i in range(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
if np.isnan(disp_adj[i]) or not test_idx[i]:
continue
response = gene_counts[i, :].astype('int')
if np.sum(response[:int(response.shape[0] / 2)] == 0) > options.max_0_frac * response.shape[0] / 2:
continue
modNB0 = sm.GLM(response, dmatrix0, family=sm.families.NegativeBinomial(alpha=disp_adj[i]), offset=np.log(sf))
modNB1 = sm.GLM(response, dmatrix1, family=sm.families.NegativeBinomial(alpha=disp_adj[i]), offset=np.log(sf))
try:
result0 = modNB0.fit()
result1 = modNB1.fit()
except:
print('\nWARNING: SVD did not converge - skipping', file=sys.stderr)
#traceback.print_exc(file=sys.stderr)
continue
pval[i] = 1 - chi2.cdf(result0.deviance - result1.deviance, dmatrix1.shape[1] - dmatrix0.shape[1])
if log:
log_progress(idx.shape[0], idx.shape[0])
print('')
return (pval, idx)
def test_count(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, options, test_idx):
if options.verbose:
print('Running the statistical test.')
if options.parallel > 1:
pval = np.zeros((gene_counts.shape[0], 1), dtype='float')
pval.fill(np.nan)
pool = mp.Pool(processes=options.parallel, initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [np.arange(x, min(x + binsize, gene_counts.shape[0])) for x in range(0, gene_counts.shape[0], binsize)]
try:
result = [pool.apply_async(test_count_chunk, args=(gene_counts[cidx, :], disp_adj[cidx], sf, dmatrix0, dmatrix1, options, test_idx[cidx], | |
:param data_list: list of dicts with keys `xml_id` (XMLID to
assign), `noupdate` (flag on XMLID), `values` (field values)
:param update: should be ``True`` when upgrading a module
:return: the records corresponding to ``data_list``
"""
original_self = self.browse()
# records created during installation should not display messages
self = self.with_context(install_mode=True)
imd = self.env['ir.model.data'].sudo()
# The algorithm below partitions 'data_list' into three sets: the ones
# to create, the ones to update, and the others. For each set, we assign
# data['record'] for each data. All those records are then retrieved for
# the result.
# determine existing xml_ids
xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
existing = {
("%s.%s" % row[1:3]): row
for row in imd._lookup_xmlids(xml_ids, self)
}
# determine which records to create and update
to_create = [] # list of data
to_update = [] # list of data
imd_data_list = [] # list of data for _update_xmlids()
for data in data_list:
xml_id = data.get('xml_id')
if not xml_id:
vals = data['values']
if vals.get('id'):
data['record'] = self.browse(vals['id'])
to_update.append(data)
elif not update:
to_create.append(data)
continue
row = existing.get(xml_id)
if not row:
to_create.append(data)
continue
d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
record = self.browse(d_res_id)
if r_id:
data['record'] = record
imd_data_list.append(data)
if not (update and d_noupdate):
to_update.append(data)
else:
imd.browse(d_id).unlink()
to_create.append(data)
# update existing records
for data in to_update:
data['record']._load_records_write(data['values'])
# check for records to create with an XMLID from another module
module = self.env.context.get('install_module')
if module:
prefix = module + "."
for data in to_create:
if data.get('xml_id') and not data['xml_id'].startswith(prefix):
_logger.warning("Creating record %s in module %s.", data['xml_id'], module)
# create records
records = self._load_records_create([data['values'] for data in to_create])
for data, record in zip(to_create, records):
data['record'] = record
if data.get('xml_id'):
# add XML ids for parent records that have just been created
for parent_model, parent_field in self._inherits.items():
if not data['values'].get(parent_field):
imd_data_list.append({
'xml_id': f"{data['xml_id']}_{parent_model.replace('.', '_')}",
'record': record[parent_field],
'noupdate': data.get('noupdate', False),
})
imd_data_list.append(data)
# create or update XMLIDs
imd._update_xmlids(imd_data_list, update)
return original_self.concat(*(data['record'] for data in data_list))
# TODO: ameliorer avec NULL
@api.model
def _where_calc(self, domain, active_test=True):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
# if the object has an active field ('active', 'x_active'), filter out all
# inactive records unless they were explicitely asked for
if self._active_name and active_test and self._context.get('active_test', True):
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == self._active_name for item in domain):
domain = [(self._active_name, '=', 1)] + domain
if domain:
return expression.expression(domain, self).query
else:
return Query(self.env.cr, self._table, self._table_query)
def _check_qorder(self, word):
if not regex_order.match(word):
raise UserError(_(
'Invalid "order" specified (%s). A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)',
word,
))
return True
@api.model
def _apply_ir_rules(self, query, mode='read'):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if self.env.su:
return
# apply main rules on the object
Rule = self.env['ir.rule']
domain = Rule._compute_domain(self._name, mode)
if domain:
expression.expression(domain, self.sudo(), self._table, query)
# apply ir.rules from the parents (through _inherits)
for parent_model_name in self._inherits:
domain = Rule._compute_domain(parent_model_name, mode)
if domain:
parent_model = self.env[parent_model_name]
parent_alias = self._inherits_join_add(self, parent_model_name, query)
expression.expression(domain, parent_model.sudo(), parent_alias, query)
@api.model
def _generate_translated_field(self, table_alias, field, query):
"""
Add possibly missing JOIN with translations table to ``query`` and
generate the expression for the translated field.
:return: the qualified field name (or expression) to use for ``field``
"""
if self.env.lang:
alias = query.left_join(
table_alias, 'id', 'ir_translation', 'res_id', field,
extra='"{rhs}"."type" = \'model\' AND "{rhs}"."name" = %s AND "{rhs}"."lang" = %s AND "{rhs}"."value" != %s',
extra_params=["%s,%s" % (self._name, field), self.env.lang, ""],
)
return 'COALESCE("%s"."%s", "%s"."%s")' % (alias, 'value', table_alias, field)
else:
return '"%s"."%s"' % (table_alias, field)
@api.model
def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
field = self._fields[order_field]
if field.inherited:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(alias, order_field, query)
alias, order_field = qualified_field.replace('"', '').split('.', 1)
field = field.base_field
assert field.type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not field.store:
_logger.debug("Many2one function/related fields must be stored "
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return []
# figure out the applicable order_by for the m2o
dest_model = self.env[field.comodel_name]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
dest_alias = query.left_join(alias, order_field, dest_model._table, 'id', order_field)
return dest_model._generate_order_by_inner(dest_alias, m2o_order, query,
reverse_direction, seen)
@api.model
def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
if seen is None:
seen = set()
self._check_qorder(order_spec)
order_by_elements = []
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
if reverse_direction:
order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
do_reverse = order_direction == 'DESC'
field = self._fields.get(order_field)
if not field:
raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
else:
if field.inherited:
field = field.base_field
if field.store and field.type == 'many2one':
key = (field.model_name, field.comodel_name, order_field)
if key not in seen:
seen.add(key)
order_by_elements += self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
elif field.store and field.column_type:
qualifield_name = self._inherits_join_calc(alias, order_field, query)
if field.type == 'boolean':
qualifield_name = "COALESCE(%s, false)" % qualifield_name
order_by_elements.append("%s %s" % (qualifield_name, order_direction))
else:
_logger.warning("Model %r cannot be sorted on field %r (not a column)", self._name, order_field)
continue # ignore non-readable or "non-joinable" fields
return order_by_elements
@api.model
def _generate_order_by(self, order_spec, query):
"""
Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise ValueError in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
@api.model
def _flush_search(self, domain, fields=None, order=None, seen=None):
""" Flush all the fields appearing in `domain`, `fields` and `order`. """
if seen is None:
seen = set()
elif self._name in seen:
return
seen.add(self._name)
to_flush = defaultdict(set) # {model_name: field_names}
if fields:
to_flush[self._name].update(fields)
# also take into account the fields in the record rules
domain = list(domain) + (self.env['ir.rule']._compute_domain(self._name, 'read') or [])
for arg in domain:
if isinstance(arg, str):
continue
if not isinstance(arg[0], str):
continue
model_name = self._name
for fname in arg[0].split('.'):
field = self.env[model_name]._fields.get(fname)
if not field:
break
to_flush[model_name].add(fname)
# DLE P111: `test_message_process_email_partner_find`
# Search on res.users with email_normalized in domain
# must trigger the recompute and flush of res.partner.email_normalized
if field.related_field:
model = self
# DLE P129: `test_transit_multi_companies`
# `self.env['stock.picking'].search([('product_id', '=', product.id)])`
# Should flush `stock.move.picking_ids` as `product_id` on `stock.picking` is defined as:
# `product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=False)`
for f in field.related:
rfield = model._fields.get(f)
if rfield:
to_flush[model._name].add(f)
if rfield.type in ('many2one', 'one2many', 'many2many'):
model = self.env[rfield.comodel_name]
if rfield.type == 'one2many' and rfield.inverse_name:
to_flush[rfield.comodel_name].add(rfield.inverse_name)
if field.comodel_name:
model_name = field.comodel_name
# hierarchy operators need the parent field
if arg[1] in ('child_of', 'parent_of'):
model = self.env[model_name]
if model._parent_store:
to_flush[model_name].add(model._parent_name)
| |
"Ankor Wat: Map 186 to Map 183"],
586: [587, 0, 0, 379, 381, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (W)"],
587: [586, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (W)"],
588: [589, 0, 0, 381, 380, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (E)"],
589: [588, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (E)"],
590: [591, 0, 0, 381, 384, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 188"],
591: [590, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187"],
592: [593, 0, 0, 393, 386, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 189"],
593: [592, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 188"],
594: [595, 0, 0, 387, 389, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (E)"],
595: [594, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (E)"],
596: [596, 0, 0, 388, 390, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (W)"],
597: [597, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (W)"],
598: [599, 0, 0, 390, 391, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 191"],
599: [598, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 191 to Map 190"],
600: [ 0, 0, 0, 366, 368, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180 (drop)"],
601: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-L (drop)"],
602: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-R (drop)"],
603: [ 0, 0, 0, 392, 383, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NE (drop)"],
604: [ 0, 0, 0, 393, 382, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 SW (drop)"],
605: [ 0, 0, 0, 389, 388, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (drop)"],
# Dao
612: [613, 0, 0, 400, 401, "1a27c", b"", False, False, False, "Dao: NW House (in)"],
613: [612, 0, 0, 0, 0, "1a2d2", b"", False, False, False, "Dao: NW House (out)"],
614: [615, 0, 0, 400, 402, "1a288", b"", False, False, False, "Dao: Neil's House (in)"],
615: [614, 0, 0, 0, 0, "1a30a", b"", False, False, False, "Dao: Neil's House (out)"],
616: [617, 0, 0, 400, 403, "1a294", b"", False, False, False, "Dao: Snake Game House (in)"],
617: [616, 0, 0, 0, 0, "1a2ee", b"", False, False, False, "Dao: Snake Game House (out)"],
618: [619, 0, 0, 400, 404, "1a2a0", b"", False, False, False, "Dao: SW House (in)"],
619: [618, 0, 0, 0, 0, "1a2fc", b"", False, False, False, "Dao: SW House (out)"],
620: [621, 0, 0, 400, 405, "1a2ac", b"", False, False, False, "Dao: S House (in)"],
621: [620, 0, 0, 0, 0, "1a2e0", b"", False, False, False, "Dao: S House (out)"],
622: [623, 0, 0, 400, 406, "1a2b8", b"", False, False, False, "Dao: SE House (in)"],
623: [622, 0, 0, 0, 0, "1a318", b"", False, False, False, "Dao: SE House (out)"],
# Pyramid
634: [635, 0, 0, 411, 415, "", b"", False, True, False, "Pyramid: Map 204 to Map 205"],
635: [634, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 205 to Map 204"],
636: [637, 0, 0, 413, 416, "", b"", False, True, False, "Pyramid: Map 204 to Map 206"], # Room 1
637: [636, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 206 to Map 204"],
638: [639, 0, 0, 417, 418, "", b"", False, True, False, "Pyramid: Map 206 to Map 207"],
639: [638, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 207 to Map 206"],
640: [641, 0, 0, 419, 442, "", b"", False, True, False, "Pyramid: Map 207 to Map 218"],
641: [640, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 207"],
642: [643, 0, 0, 413, 420, "", b"", False, True, False, "Pyramid: Map 204 to Map 208"], # Room 2
643: [642, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 208 to Map 204"],
644: [645, 0, 0, 421, 422, "", b"", False, True, False, "Pyramid: Map 208 to Map 209"],
645: [644, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 209 to Map 208"],
646: [647, 0, 0, 423, 443, "", b"", False, True, False, "Pyramid: Map 209 to Map 218"],
647: [646, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 209"],
648: [649, 0, 0, 413, 431, "", b"", False, True, False, "Pyramid: Map 204 to Map 214"], # Room 3
649: [648, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 204"],
650: [651, 0, 0, 434, 435, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
651: [650, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
652: [653, 0, 0, 435, 444, "", b"", False, True, False, "Pyramid: Map 215 to Map 218"],
653: [652, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 215"],
654: [655, 0, 0, 413, 436, "", b"", False, True, False, "Pyramid: Map 204 to Map 216"], # Room 4
655: [654, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 216 to Map 204"],
656: [657, 0, 0, 437, 438, "", b"", False, True, False, "Pyramid: Map 216 to Map 217"],
657: [656, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 217 to Map 216"],
658: [659, 0, 0, 439, 440, "", b"", False, True, False, "Pyramid: Map 217 to Map 219"],
659: [658, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 219 to Map 217"],
660: [661, 0, 0, 441, 445, "", b"", False, True, False, "Pyramid: Map 219 to Map 218"],
661: [660, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 219"],
662: [663, 0, 0, 413, 426, "", b"", False, True, False, "Pyramid: Map 204 to Map 212"], # Room 5
663: [662, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 212 to Map 204"],
664: [665, 0, 0, 429, 430, "", b"", False, True, False, "Pyramid: Map 212 to Map 213"],
665: [664, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 213 to Map 212"],
666: [667, 0, 0, 430, 446, "", b"", False, True, False, "Pyramid: Map 213 to Map 218"],
667: [666, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 213"],
668: [669, 0, 0, 413, 424, "", b"", False, True, False, "Pyramid: Map 204 to Map 210"], # Room 6
669: [668, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 210 to Map 204"],
670: [671, 0, 0, 424, 425, "", b"", False, True, False, "Pyramid: Map 210 to Map 211"],
671: [670, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 211 to Map 210"],
672: [673, 0, 0, 425, 447, "", b"", False, True, False, "Pyramid: Map 211 to Map 218"],
673: [672, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 211"],
# Babel
682: [683, 0, | |
OUTbound from the newly-created node.
Note: this is a simpler version of create_node_with_relationships()
EXAMPLE:
create_node_with_children(
labels="PERSON",
properties={"name": "Julian", "city": "Berkeley"},
children_list=[ (123, "EMPLOYS") , (456, "OWNS") ]
)
:param labels: Labels to assign to the newly-created node (a string, possibly empty, or list of strings)
:param children_list: Optional list of pairs of the form (Neo4j ID, relationship name);
use None, or an empty list, to indicate if there aren't any
:param properties: A dictionary of optional properties to assign to the newly-created node
:return: An integer with the Neo4j ID of the newly-created node
"""
assert children_list is None or type(children_list) == list, \
f"The argument `children_list` in create_node_with_children() must be a list or None; instead, it's {type(children_list)}"
if self.debug:
print(f"In create_node_with_children(). labels: {labels}, children_list: {children_list}, node_properties: {properties}")
# Create the new node
new_node_id = self.create_node(labels, properties)
number_properties = "NO" if properties is None else len(properties) # Only used for debugging
if children_list is None or children_list == []:
print(f"\nCreated a new node with ID: {new_node_id}, with {number_properties} attribute(s), and NO children")
return new_node_id
# Add relationships to all children, if any
print(f"\nCreated a new node with ID: {new_node_id}, "
f"with {number_properties} attributes and {len(children_list)} children: ", children_list)
node_match = self.find(neo_id=new_node_id, dummy_node_name="from")
# Add each relationship in turn (TODO: maybe do this with a single Cypher query)
for item in children_list:
assert type(item) == tuple and len(item) == 2, \
f"The list items in `children_list` in create_node_with_children() must be pairs; instead, the following item was seen: {item}"
child_id, rel_name = item
child_match = self.find(neo_id=child_id, dummy_node_name="to")
self.add_edges(match_from=node_match, match_to=child_match, rel_name=rel_name)
return new_node_id
#---------------------------------------------------------------------------------------------------#
# #
# ~ DELETE NODES ~ #
# #
#___________________________________________________________________________________________________#
def delete_nodes(self, match: Union[int, dict]) -> int:
"""
Delete the node or nodes specified by the match argument. Return the number of nodes deleted.
:param match: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:return: The number of nodes deleted (possibly zero)
"""
match = CypherUtils.validate_and_standardize(match) # Validate, and possibly create, the match dictionary
# Unpack needed values from the match dictionary
(node, where, data_binding) = CypherUtils.unpack_match(match, include_dummy=False)
q = f"MATCH {node} {CypherUtils.prepare_where(where)} DETACH DELETE n"
self.debug_print(q, data_binding, "delete_nodes")
stats = self.update_query(q, data_binding)
number_nodes_deleted = stats.get("nodes_deleted", 0)
return number_nodes_deleted
def delete_nodes_by_label(self, delete_labels=None, keep_labels=None) -> None:
"""
Empty out (by default completely) the Neo4j database.
Optionally, only delete nodes with the specified labels, or only keep nodes with the given labels.
Note: the keep_labels list has higher priority; if a label occurs in both lists, it will be kept.
IMPORTANT: it does NOT clear indexes; "ghost" labels may remain!
:param delete_labels: An optional string, or list of strings, indicating specific labels to DELETE
:param keep_labels: An optional string or list of strings, indicating specific labels to KEEP
(keep_labels has higher priority over delete_labels)
:return: None
"""
if (delete_labels is None) and (keep_labels is None):
# Delete ALL nodes AND ALL relationship from the database; for efficiency, do it all at once
q = "MATCH (n) DETACH DELETE(n)"
self.query(q)
return
if not delete_labels:
delete_labels = self.get_labels() # If no specific labels to delete were given,
# then consider all labels for possible deletion (unless marked as "keep", below)
else:
if type(delete_labels) == str:
delete_labels = [delete_labels] # If a string was passed, turn it into a list
if not keep_labels:
keep_labels = [] # Initialize list of labels to keep, if not provided
else:
if type(keep_labels) == str:
keep_labels = [keep_labels] # If a string was passed, turn it into a list
# Delete all nodes with labels in the delete_labels list,
# EXCEPT for any label in the keep_labels list
for label in delete_labels:
if not (label in keep_labels):
q = f"MATCH (x:`{label}`) DETACH DELETE x"
self.debug_print(q, method="delete_nodes_by_label")
self.query(q)
def empty_dbase(self, keep_labels=None, drop_indexes=True, drop_constraints=True) -> None:
"""
Use this to get rid of everything in the database,
including all the indexes and constraints (unless otherwise specified.)
Optionally, keep nodes with a given label, or keep the indexes, or keep the constraints
:param keep_labels: An optional list of strings, indicating specific labels to KEEP
:param drop_indexes: Flag indicating whether to also ditch all indexes (by default, True)
:param drop_constraints:Flag indicating whether to also ditch all constraints (by default, True)
:return: None
"""
self.delete_nodes_by_label(keep_labels=keep_labels)
if drop_indexes:
self.drop_all_indexes(including_constraints=drop_constraints)
#---------------------------------------------------------------------------------------------------#
# #
# ~ MODIFY FIELDS ~ #
# #
#___________________________________________________________________________________________________#
def set_fields(self, match: Union[int, dict], set_dict: dict ) -> int:
"""
EXAMPLE - locate the "car" with vehicle id 123 and set its color to white and price to 7000
match = find(labels = "car", properties = {"vehicle id": 123})
set_fields(match=match, set_dict = {"color": "white", "price": 7000})
NOTE: other fields are left un-disturbed
Return the number of properties set.
TODO: if any field is blank, offer the option drop it altogether from the node,
with a "REMOVE n.field" statement in Cypher; doing SET n.field = "" doesn't drop it
:param match: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param set_dict: A dictionary of field name/values to create/update the node's attributes
(note: blanks ARE allowed in the keys)
:return: The number of properties set
"""
if set_dict == {}:
return 0 # There's nothing to do
match = CypherUtils.validate_and_standardize(match) # Validate, and possibly create, the match dictionary
# Unpack needed values from the match dictionary
(node, where, data_binding, dummy_node_name) = CypherUtils.unpack_match(match)
cypher_match = f"MATCH {node} {CypherUtils.prepare_where(where)} "
set_list = []
for field_name, field_value in set_dict.items(): # field_name, field_value are key/values in set_dict
field_name_safe = field_name.replace(" ", "_") # To protect against blanks in name. E.g., "end date" becomes "end_date"
set_list.append(f"{dummy_node_name}.`{field_name}` = ${field_name_safe}") # Example: "n.`field1` = $field1"
data_binding[field_name_safe] = field_value # EXTEND the Cypher data-binding dictionary
# Example of data_binding at the end of the loop: {'n_par_1': 123, 'n_par_2': 7500, 'color': 'white', 'price': 7000}
# in this example, the first 2 keys arise from the match (find) operation to locate the node,
# while the last 2 are for the use of the SET operation
set_clause = "SET " + ", ".join(set_list) # Example: "SET n.`color` = $color, n.`price` = $price"
cypher = cypher_match + set_clause
# Example of cypher:
# "MATCH (n :`car` {`vehicle id`: $n_par_1, `price`: $n_par_2}) SET n.`color` = $color, n.`price` = $price"
# Example of data binding:
# {'n_par_1': 123, 'n_par_2': 7500, 'color': 'white', 'price': 7000}
self.debug_print(cypher, data_binding, "set_fields")
#self.query(cypher, data_binding)
stats = self.update_query(cypher, data_binding)
#print(stats)
number_properties_set = stats.get("properties_set", 0)
return number_properties_set
#---------------------------------------------------------------------------------------------------#
# #
# ~ RELATIONSHIPS ~ #
# #
#___________________________________________________________________________________________________#
def get_relationship_types(self) -> [str]:
"""
Extract and return a list of all the Neo4j relationship names (i.e. types of relationships)
present in the database, in no particular order.
:return: A list of strings
"""
results = self.query("call db.relationshipTypes() yield relationshipType return relationshipType")
return [x['relationshipType'] for x in results]
def add_edges(self, match_from: Union[int, dict], match_to: Union[int, dict], rel_name:str, rel_props = None) -> int:
"""
Add one or more edges (relationships, with the specified rel_name),
originating in any of the nodes specified by the match_from specifications,
and terminating in any of the nodes specified by the match_to specifications
Return the number of edges added; if none were added, or in case of error, raise an Exception.
Notes: - if a relationship with the same name already exists, nothing gets created (and an Exception is raised)
- more than 1 node could be present in either of the matches
:param match_from: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param match_to: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a | |
match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.NoSQLInstanceSpecClassSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpecClassSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpecResourceRef(dict):
"""
A ResourceReference specifies an existing managed resource, in any namespace, to which this resource claim should attempt to bind. Omit the resource reference to enable dynamic provisioning using a resource class; the resource reference will be automatically populated by Crossplane.
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
field_path: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
resource_version: Optional[str] = None,
uid: Optional[str] = None):
"""
A ResourceReference specifies an existing managed resource, in any namespace, to which this resource claim should attempt to bind. Omit the resource reference to enable dynamic provisioning using a resource class; the resource reference will be automatically populated by Crossplane.
:param str api_version: API version of the referent.
:param str field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param str kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param str namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param str resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param str uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[str]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[str]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@property
@pulumi.getter
def uid(self) -> Optional[str]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceSpecWriteConnectionSecretToRef(dict):
"""
WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this resource claim, to which any connection details for this resource claim should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource bound to this resource claim.
"""
def __init__(__self__, *,
name: str):
"""
WriteConnectionSecretToReference specifies the name of a Secret, in the same namespace as this resource claim, to which any connection details for this resource claim should be written. Connection details frequently include the endpoint, username, and password required to connect to the managed resource bound to this resource claim.
:param str name: Name of the secret.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the secret.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceStatus(dict):
"""
A ResourceClaimStatus represents the observed status of a resource claim.
"""
def __init__(__self__, *,
binding_phase: Optional[str] = None,
conditions: Optional[Sequence['outputs.NoSQLInstanceStatusConditions']] = None):
"""
A ResourceClaimStatus represents the observed status of a resource claim.
:param str binding_phase: Phase represents the binding phase of a managed resource or claim. Unbindable resources cannot be bound, typically because they are currently unavailable, or still being created. Unbound resource are available for binding, and Bound resources have successfully bound to another resource.
:param Sequence['NoSQLInstanceStatusConditionsArgs'] conditions: Conditions of the resource.
"""
if binding_phase is not None:
pulumi.set(__self__, "binding_phase", binding_phase)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter(name="bindingPhase")
def binding_phase(self) -> Optional[str]:
"""
Phase represents the binding phase of a managed resource or claim. Unbindable resources cannot be bound, typically because they are currently unavailable, or still being created. Unbound resource are available for binding, and Bound resources have successfully bound to another resource.
"""
return pulumi.get(self, "binding_phase")
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.NoSQLInstanceStatusConditions']]:
"""
Conditions of the resource.
"""
return pulumi.get(self, "conditions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NoSQLInstanceStatusConditions(dict):
"""
A Condition that may apply to a resource.
"""
def __init__(__self__, *,
last_transition_time: str,
reason: str,
status: str,
type: | |
ret = vmdk_ops.disk_attach(vmdk_path=fullpath, vm=vm[0])
self.assertTrue("Error" in ret)
# detach all the attached disks
for id in range(1, self.max_vol_count + 1):
volName = 'VmdkAttachDetachTestVol' + str(id)
fullpath = os.path.join(self.datastore_path, volName + '.vmdk')
ret = vmdk_ops.disk_detach(vmdk_path=fullpath,
vm=vm[0])
self.assertTrue(ret is None)
class VmdkAuthorizeTestCase(unittest.TestCase):
""" Unit test for VMDK Authorization """
vm_uuid = str(uuid.uuid4())
vm_name = test_utils.generate_test_vm_name()
tenant1 = None
datastore_name = None
datastore_url = None
def setUp(self):
""" Setup run before each test """
logging.info("VMDKAuthorizeTest setUp path =%s", path)
if (not self.datastore_name):
datastores = vmdk_utils.get_datastores()
if datastores:
datastore = datastores[0]
self.datastore_name = datastore[0]
self.datastore_url = datastore[1]
self.datastore_path = datastore[2]
logging.debug("datastore_name=%s datastore_url=%s datastore_path=%s",
self.datastore_name, self.datastore_url, self.datastore_path)
else:
logging.error("Cannot find a valid datastore")
self.assertFalse(True)
self.auth_mgr = auth_data.AuthorizationDataManager()
self.auth_mgr.connect()
self.cleanup()
def cleanup(self):
logging.info("VMDKAuthorizeTest cleanup")
error_info, exist_tenant = self.auth_mgr.get_tenant('vmdk_auth_test')
if exist_tenant:
error_info = self.auth_mgr.remove_tenant(exist_tenant.id, False)
self.assertEqual(error_info, None)
error_info = self.auth_mgr.remove_volumes_from_volumes_table(exist_tenant.id)
self.assertEqual(error_info, None)
def tearDown(self):
logging.info("VMDKAuthorizeTest tearDown path =%s", path)
self.cleanup()
def test_vmdkop_authorize(self):
""" Test vmdkop authorize """
vms = [(self.vm_uuid, self.vm_name)]
privileges = []
error_info, tenant1 = self.auth_mgr.create_tenant(name='vmdk_auth_test',
description='Tenant used to vmdk_auth_test',
vms=vms,
privileges=privileges)
self.assertEqual(error_info, None)
self.assertTrue(uuid.UUID(tenant1.id))
# test CMD_CREATE without "create_volume" set
privileges = [{'datastore_url': self.datastore_url,
'allow_create': 0,
'max_volume_size': 500,
'usage_quota': 1000}]
error_info = tenant1.set_datastore_access_privileges(self.auth_mgr.conn, privileges)
self.assertEqual(error_info, None)
opts={u'size': u'100MB', u'fstype': u'ext4'}
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_CREATE,
opts=opts,
privilege_ds_url=self.datastore_url)
self.assertEqual(error_info, "No create privilege")
# set "create_volume" privilege to true
privileges = [{'datastore_url': self.datastore_url,
'allow_create': 1,
'max_volume_size': 500,
'usage_quota': 1000}]
error_info = tenant1.set_datastore_access_privileges(self.auth_mgr.conn, privileges)
self.assertEqual(error_info, None)
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_CREATE,
opts=opts,
privilege_ds_url=self.datastore_url)
self.assertEqual(error_info, None)
if not error_info:
error_info = auth.add_volume_to_volumes_table(tenant1.id, self.datastore_url, "VmdkAuthorizeTestVol1", 100)
self.assertEqual(error_info, None)
opts={u'size': u'600MB', u'fstype': u'ext4'}
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_CREATE,
opts=opts,
privilege_ds_url=self.datastore_url)
# create a volume with 600MB which exceed the"max_volume_size", command should fail
self.assertEqual(error_info, "Volume size exceeds the max volume size limit")
opts={u'size': u'500MB', u'fstype': u'ext4'}
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_CREATE,
opts=opts,
privilege_ds_url=self.datastore_url)
self.assertEqual(error_info, None)
if not error_info:
error_info = auth.add_volume_to_volumes_table(tenant1.id, self.datastore_url, "VmdkAuthorizeTestVol2", 500)
self.assertEqual(error_info, None)
opts={u'size': u'500MB', u'fstype': u'ext4'}
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_CREATE,
opts=opts,
privilege_ds_url=self.datastore_url)
self.assertEqual(error_info, "The total volume size exceeds the usage quota")
# delete volume
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=self.vm_uuid,
datastore_url=self.datastore_url,
cmd=auth.CMD_REMOVE,
opts=opts,
privilege_ds_url=self.datastore_url)
self.assertEqual(error_info, None)
# remove the tenant
error_info = self.auth_mgr.remove_tenant(tenant1.id, False)
self.assertEqual(error_info, None)
error_info = self.auth_mgr.remove_volumes_from_volumes_table(tenant1.id)
self.assertEqual(error_info, None)
class VmdkTenantTestCase(unittest.TestCase):
""" Unit test for VMDK ops for multi-tenancy """
default_tenant_vol1_name = "default_tenant_vol1"
default_tenant_vol2_name = "default_tenant_vol2"
default_tenant_vol3_name = "default_tenant_vol3"
default_tenant_vol4_name = "default_tenant_vol4"
default_tenant_vols = [default_tenant_vol1_name, default_tenant_vol2_name,
default_tenant_vol3_name, default_tenant_vol4_name]
# tenant1 info
tenant1_name = "test_tenant1"
vm1_name = test_utils.generate_test_vm_name()
vm1 = None
tenant1_vol1_name = 'tenant1_vol1'
tenant1_vol2_name = 'tenant1_vol2'
tenant1_vol3_name = 'tenant1_vol3'
vm1_config_path = None
tenant1_new_name = "new_test_tenant1"
vm3_name = test_utils.generate_test_vm_name()
vm3 = None
# tenant2 info
tenant2_name = "test_tenant2"
vm2_name = test_utils.generate_test_vm_name()
vm2 = None
tenant2_vol1_name = 'tenant2_vol1'
tenant2_vol2_name = 'tenant2_vol2'
tenant2_vol3_name = 'tenant2_vol3'
vm2_config_path = None
datastore_name = None
datastore_path = None
datastore1_name = None
datastore1_path = None
def setUp(self):
""" Setup run before each test """
logging.info("VMDKTenantTest setUp path =%s", path)
if (not self.datastore_name):
datastores = vmdk_utils.get_datastores()
if datastores:
datastore = datastores[0]
self.datastore_name = datastore[0]
self.datastore_path = datastore[2]
logging.debug("datastore_name=%s datastore_path=%s", self.datastore_name,
self.datastore_path)
if len(datastores) > 1:
datastore1 = datastores[1]
self.datastore1_name = datastore1[0]
self.datastoer1_path = datastore[2]
logging.debug("Found second datastore: datastore_name=%s datastore_path=%s",
self.datastore1_name, self.datastore1_path)
else:
logging.error("Cannot find a valid datastore")
self.assertFalse(True)
self.cleanup()
# get service_instance, and create VMs
si = vmdk_ops.get_si()
error, self.vm1 = test_utils.create_vm(si=si,
vm_name=self.vm1_name,
datastore_name=self.datastore_name)
if error:
self.assertFalse(True)
self.vm1_config_path = vmdk_utils.get_vm_config_path(self.vm1_name)
logging.info("VmdkTenantTestCase: create vm1 name=%s Done", self.vm1_name)
error, self.vm2 = test_utils.create_vm(si=si,
vm_name=self.vm2_name,
datastore_name=self.datastore_name)
if error:
self.assertFalse(True)
self.vm2_config_path = vmdk_utils.get_vm_config_path(self.vm2_name)
logging.info("VmdkTenantTestCase: create vm2 name=%s Done", self.vm2_name)
if self.datastore1_name:
# create a volume from different datastore
error, self.vm3 = test_utils.create_vm(si=si,
vm_name=self.vm3_name,
datastore_name=self.datastore1_name)
if error:
self.assertFalse(True)
self.vm3_config_path = vmdk_utils.get_vm_config_path(self.vm3_name)
logging.info("VmdkTenantTestCase: create vm3 name=%s Done", self.vm3_name)
# create DEFAULT tenant and privilege if missing
test_utils.create_default_tenant_and_privileges(self)
# create tenant1 without adding any vms and privileges
name = self.tenant1_name
vm_list = None
description = "Test tenant1"
privileges = []
error_info, tenant = auth_api._tenant_create(
name=name,
default_datastore=auth_data_const.VM_DS,
description=description,
vm_list=vm_list,
privileges=privileges)
self.assertEqual(None, error_info)
# create tenant2 without adding any vms and privileges
name = self.tenant2_name
vm_list = None
description = "Test tenant2"
privileges = []
error_info, tenant = auth_api._tenant_create(
name=name,
default_datastore=auth_data_const.VM_DS,
description=description,
vm_list=vm_list,
privileges=privileges)
self.assertEqual(None, error_info)
def tearDown(self):
""" Cleanup after each test """
logging.info("VMDKTenantTest tearDown path")
self.cleanup()
def cleanup(self):
# cleanup existing volume under DEFAULT tenant
logging.info("VMDKTenantTest cleanup")
if self.datastore_path:
default_tenant_path = os.path.join(self.datastore_path, auth_data_const.DEFAULT_TENANT_UUID)
for vol in self.default_tenant_vols:
vmdk_path = vmdk_utils.get_vmdk_path(default_tenant_path, vol)
response = vmdk_ops.getVMDK(vmdk_path, vol, self.datastore_name)
if not "Error" in response:
logging.debug("cleanup: remove volume %s", vmdk_path)
vmdk_ops.removeVMDK(vmdk_path)
# cleanup existing tenants
test_utils.cleanup_tenant(self.tenant1_name)
test_utils.cleanup_tenant(self.tenant1_new_name)
test_utils.cleanup_tenant(self.tenant2_name)
# remove VM
si = vmdk_ops.get_si()
if self.vm1:
test_utils.remove_vm(si, self.vm1)
if self.vm2:
test_utils.remove_vm(si, self.vm2)
if self.vm3:
test_utils.remove_vm(si, self.vm3)
def test_vmdkops_on_default_tenant_vm(self):
""" Test vmdk life cycle on a VM which belongs to DEFAULT tenant """
# This test test the following cases:
# 1. DEFAULT tenant, privilege to datastore "_ALL_DS", and privilege to datastore "_VM_DS""
# are present, vmdk_ops from VM which is not owned by any tenant, vmdk_ops should succeed, and the volumes
# will be created in the _VM_DS
# 2. change the default_datastore for DEFAULT tenant, volume create with short name should be created in the
# default datastore instead of VM datastore
# 3. Only privilege to "_VM_DS" present, "create volume" should succed
# 4. REMOVE DEFAULT tenant, "create volume" should fail
# run create, attach, detach, remove command when DEFAULT tenant and privileges to "_ALL_DS" and "_VM_DS" are present
# This is the case after user fresh install
logging.info("test_vmdkops_on_default_tenant_vm")
vm1_uuid = vmdk_utils.get_vm_uuid_by_name(self.vm1_name)
# run create command
opts={u'size': u'100MB', u'fstype': u'ext4'}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_CREATE, self.default_tenant_vol1_name, opts)
self.assertEqual(None, error_info)
# test attach a volume
opts={}
result = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_ATTACH, self.default_tenant_vol1_name, opts)
self.assertFalse("Error" in result)
# test detach a volume
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_DETACH, self.default_tenant_vol1_name, opts)
self.assertEqual(None, error_info)
# run remove command
opts = {}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_REMOVE, self.default_tenant_vol1_name, opts)
self.assertEqual(None, error_info)
# create a volume with default size
# default_datastore for _DEFAULT tenant is set to "VM_DS", a volume will be
# tried to create on the vm_datastore, which is self.datastore_name
opts={u'fstype': u'ext4'}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_CREATE, self.default_tenant_vol1_name, opts)
self.assertEqual(None, error_info)
# Only run this test if second datastore exists
if self.datastore1_name:
# Change the "default_datastore" of _DEFAULT tenant to second datastore "self.datastore1_name"
# Then the full access privilege will be created to this new "default_datastore"
error_info = auth_api._tenant_update(name=auth_data_const.DEFAULT_TENANT,
default_datastore=self.datastore1_name,
)
# create a volume with default size
# default_datastore is set for _DEFAULT tenant, a volume will be
# tried to create on the default_datastore, which is "self.datastore1_name"
opts={u'fstype': u'ext4'}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_CREATE, self.default_tenant_vol2_name, opts)
self.assertEqual(None, error_info)
# list volumes
opts = {}
result = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, 'list', None, opts)
# there should be two volumes "default_tenant1_vol1" and "default_tenant_vol2"
self.assertEqual(len(result), 2)
self.assertEqual(self.default_tenant_vol1_name + "@" + self.datastore_name, result[0]['Name'])
self.assertEqual(self.default_tenant_vol2_name + "@" + self.datastore1_name, result[1]['Name'])
# remove privilege to datastore "self.datastore1_name"
error_info = auth_api._tenant_access_rm(name=auth_data_const.DEFAULT_TENANT,
datastore=self.datastore1_name)
# datastore "self.datastore1_name" is still the "default_datastore" for _DEFAULT tenant, cannot remove
self.assertNotEqual(None, error_info)
# change the "default_datastore" for _DEFAULT tenant to "_ALL_DS", which should fail
# "_ALL_DS" cannot be set as "default_datastore"
error_info = auth_api._tenant_update(name=auth_data_const.DEFAULT_TENANT,
default_datastore=auth_data_const.ALL_DS)
self.assertNotEqual(None, error_info)
# set the "default_datastore" for _DEFAULT tenant to "_VM_DS", which should succeed
error_info = auth_api._tenant_update(name=auth_data_const.DEFAULT_TENANT,
default_datastore=auth_data_const.VM_DS)
self.assertEqual(None, error_info)
# remove the access privilege to "self.datastore1_name"
error_info = auth_api._tenant_access_rm(auth_data_const.DEFAULT_TENANT, self.datastore1_name)
self.assertEqual(None, error_info)
# remove "_ALL_DS"" privileges, and run create command, which should succeed since we still have
# access privilege to "_VM_DS"
error_info = auth_api._tenant_access_rm(auth_data_const.DEFAULT_TENANT, auth_data_const.ALL_DS)
self.assertEqual(None, error_info)
opts={u'size': u'100MB', u'fstype': u'ext4'}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_CREATE, self.default_tenant_vol3_name, opts)
self.assertEqual(None, error_info)
# try to create volume on a different datastore, which should fail
if self.datastore1_name:
full_vol_name = self.default_tenant_vol4_name + "@" + self.datastore1_name
opts={u'size': u'100MB', u'fstype': u'ext4'}
error_info = vmdk_ops.executeRequest(vm1_uuid, self.vm1_name, self.vm1_config_path, auth.CMD_CREATE, full_vol_name, opts)
self.assertNotEqual(None, error_info)
# remove "_VM_DS" privilege, which should fail since "_VM_DS"
# is the "default_datastore" | |
def metadata(self):
return self.ll_table.metadata
@property
def metadata_offset(self):
return self.ll_table.metadata_offset
def add_row(self, metadata=None):
"""
Adds a new row to this :class:`PopulationTable` and returns the ID of the
corresponding population.
:param bytes metadata: The binary-encoded metadata for the new population.
If not specified or None, a zero-length byte string is stored.
:return: The ID of the newly added population.
:rtype: int
"""
return self.ll_table.add_row(metadata=metadata)
def __str__(self):
metadata = unpack_bytes(self.metadata, self.metadata_offset)
ret = "id\tmetadata\n"
for j in range(self.num_rows):
md = base64.b64encode(metadata[j]).decode('utf8')
ret += "{}\t{}\n".format(j, md)
return ret[:-1]
def copy(self):
"""
Returns a deep copy of this table.
"""
copy = PopulationTable()
copy.set_columns(
metadata=self.metadata,
metadata_offset=self.metadata_offset)
return copy
def set_columns(self, metadata=None, metadata_offset=None):
self.ll_table.set_columns(
dict(metadata=metadata, metadata_offset=metadata_offset))
def append_columns(self, metadata=None, metadata_offset=None):
self.ll_table.append_columns(
dict(metadata=metadata, metadata_offset=metadata_offset))
def asdict(self):
return {
"metadata": self.metadata,
"metadata_offset": self.metadata_offset,
}
class ProvenanceTable(BaseTable):
"""
A table recording the provenance (i.e., history) of this table, so that the
origin of the underlying data and sequence of subsequent operations can be
traced. Each row contains a "record" string (recommended format: JSON) and
a timestamp.
.. todo::
The format of the `record` field will be more precisely specified in
the future.
:ivar record: The flattened array containing the record strings.
:ref:`sec_tables_api_text_columns` for more details.
:vartype record: numpy.ndarray, dtype=np.int8
:ivar record_offset: The array of offsets into the record column. See
:ref:`sec_tables_api_text_columns` for more details.
:vartype record_offset: numpy.ndarray, dtype=np.uint32
:ivar timestamp: The flattened array containing the timestamp strings.
:ref:`sec_tables_api_text_columns` for more details.
:vartype timestamp: numpy.ndarray, dtype=np.int8
:ivar timestamp_offset: The array of offsets into the timestamp column. See
:ref:`sec_tables_api_text_columns` for more details.
:vartype timestamp_offset: numpy.ndarray, dtype=np.uint32
"""
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.ProvenanceTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, ProvenanceTableRow)
@property
def record(self):
return self.ll_table.record
@property
def record_offset(self):
return self.ll_table.record_offset
@property
def timestamp(self):
return self.ll_table.timestamp
@property
def timestamp_offset(self):
return self.ll_table.timestamp_offset
def add_row(self, record, timestamp=None):
"""
Adds a new row to this ProvenanceTable consisting of the specified record and
timestamp. If timestamp is not specified, it is automatically generated from
the current time.
:param str record: A provenance record, describing the parameters and
environment used to generate the current set of tables.
:param str timestamp: A string timestamp. This should be in ISO8601 form.
"""
if timestamp is None:
timestamp = datetime.datetime.now().isoformat()
# Note that the order of the positional arguments has been reversed
# from the low-level module, which is a bit confusing. However, we
# want the default behaviour here to be to add a row to the table at
# the current time as simply as possible.
return self.ll_table.add_row(record=record, timestamp=timestamp)
def set_columns(
self, timestamp=None, timestamp_offset=None,
record=None, record_offset=None):
self.ll_table.set_columns(dict(
timestamp=timestamp, timestamp_offset=timestamp_offset,
record=record, record_offset=record_offset))
def append_columns(
self, timestamp=None, timestamp_offset=None,
record=None, record_offset=None):
self.ll_table.append_columns(dict(
timestamp=timestamp, timestamp_offset=timestamp_offset,
record=record, record_offset=record_offset))
def __str__(self):
timestamp = unpack_strings(self.timestamp, self.timestamp_offset)
record = unpack_strings(self.record, self.record_offset)
ret = "id\ttimestamp\trecord\n"
for j in range(self.num_rows):
ret += "{}\t{}\t{}\n".format(j, timestamp[j], record[j])
return ret[:-1]
def copy(self):
"""
Returns a deep copy of this table.
"""
copy = ProvenanceTable()
copy.set_columns(
timestamp=self.timestamp,
timestamp_offset=self.timestamp_offset,
record=self.record,
record_offset=self.record_offset)
return copy
def asdict(self):
return {
"timestamp": self.timestamp,
"timestamp_offset": self.timestamp_offset,
"record": self.record,
"record_offset": self.record_offset,
}
class TableCollection(object):
"""
A collection of mutable tables defining a tree sequence. See the
:ref:`sec_data_model` section for definition on the various tables
and how they together define a :class:`TreeSequence`. Arbitrary
data can be stored in a TableCollection, but there are certain
:ref:`requirements <sec_valid_tree_sequence_requirements>` that must be
satisfied for these tables to be interpreted as a tree sequence.
To obtain a :class:`TreeSequence` instance corresponding to the current
state of a ``TableCollection``, please use the :meth:`.tree_sequence`
method.
:ivar individuals: The individual table.
:vartype individuals: IndividualTable
:ivar nodes: The node table.
:vartype nodes: NodeTable
:ivar edges: The edge table.
:vartype edges: EdgeTable
:ivar migrations: The migration table.
:vartype migrations: MigrationTable
:ivar sites: The site table.
:vartype sites: SiteTable
:ivar mutations: The mutation table.
:vartype mutations: MutationTable
:ivar populations: The population table.
:vartype populations: PopulationTable
:ivar provenances: The provenance table.
:vartype provenances: ProvenanceTable
:ivar sequence_length: The sequence length defining the coordinate
space.
:vartype sequence_length: float
:ivar file_uuid: The UUID for the file this TableCollection is derived
from, or None if not derived from a file.
:vartype file_uuid: str
"""
def __init__(self, sequence_length=0):
self.ll_tables = _tskit.TableCollection(sequence_length)
@property
def individuals(self):
return IndividualTable(ll_table=self.ll_tables.individuals)
@property
def nodes(self):
return NodeTable(ll_table=self.ll_tables.nodes)
@property
def edges(self):
return EdgeTable(ll_table=self.ll_tables.edges)
@property
def migrations(self):
return MigrationTable(ll_table=self.ll_tables.migrations)
@property
def sites(self):
return SiteTable(ll_table=self.ll_tables.sites)
@property
def mutations(self):
return MutationTable(ll_table=self.ll_tables.mutations)
@property
def populations(self):
return PopulationTable(ll_table=self.ll_tables.populations)
@property
def provenances(self):
return ProvenanceTable(ll_table=self.ll_tables.provenances)
@property
def sequence_length(self):
return self.ll_tables.sequence_length
@sequence_length.setter
def sequence_length(self, sequence_length):
self.ll_tables.sequence_length = sequence_length
@property
def file_uuid(self):
return self.ll_tables.file_uuid
def asdict(self):
"""
Returns a dictionary representation of this TableCollection.
Note: the semantics of this method changed at tskit 1.0.0. Previously a
map of table names to the tables themselves was returned.
"""
return {
"sequence_length": self.sequence_length,
"individuals": self.individuals.asdict(),
"nodes": self.nodes.asdict(),
"edges": self.edges.asdict(),
"migrations": self.migrations.asdict(),
"sites": self.sites.asdict(),
"mutations": self.mutations.asdict(),
"populations": self.populations.asdict(),
"provenances": self.provenances.asdict(),
}
def __banner(self, title):
width = 60
line = "#" * width
title_line = "# {}".format(title)
title_line += " " * (width - len(title_line) - 1)
title_line += "#"
return line + "\n" + title_line + "\n" + line + "\n"
def __iter__(self):
"""
Iterate over all the tables in this TableCollection, ordered by table name
(i.e. deterministically), returning a tuple of (table_name, table_object)
"""
yield 'edges', self.edges
yield 'individuals', self.individuals
yield 'migrations', self.migrations
yield 'mutations', self.mutations
yield 'nodes', self.nodes
yield 'populations', self.populations
yield 'provenances', self.provenances
yield 'sites', self.sites
def __str__(self):
s = self.__banner("Individuals")
s += str(self.individuals) + "\n"
s += self.__banner("Nodes")
s += str(self.nodes) + "\n"
s += self.__banner("Edges")
s += str(self.edges) + "\n"
s += self.__banner("Sites")
s += str(self.sites) + "\n"
s += self.__banner("Mutations")
s += str(self.mutations) + "\n"
s += self.__banner("Migrations")
s += str(self.migrations) + "\n"
s += self.__banner("Populations")
s += str(self.populations) + "\n"
s += self.__banner("Provenances")
s += str(self.provenances)
return s
def __eq__(self, other):
ret = False
if type(other) is type(self):
ret = bool(self.ll_tables.equals(other.ll_tables))
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return self.asdict()
# Unpickle support
def __setstate__(self, state):
self.__init__(state["sequence_length"])
self.individuals.set_columns(**state["individuals"])
self.nodes.set_columns(**state["nodes"])
self.edges.set_columns(**state["edges"])
self.migrations.set_columns(**state["migrations"])
self.sites.set_columns(**state["sites"])
self.mutations.set_columns(**state["mutations"])
self.populations.set_columns(**state["populations"])
self.provenances.set_columns(**state["provenances"])
@classmethod
def fromdict(self, tables_dict):
tables = TableCollection(tables_dict["sequence_length"])
tables.individuals.set_columns(**tables_dict["individuals"])
tables.nodes.set_columns(**tables_dict["nodes"])
tables.edges.set_columns(**tables_dict["edges"])
tables.migrations.set_columns(**tables_dict["migrations"])
tables.sites.set_columns(**tables_dict["sites"])
tables.mutations.set_columns(**tables_dict["mutations"])
tables.populations.set_columns(**tables_dict["populations"])
tables.provenances.set_columns(**tables_dict["provenances"])
return tables
def tree_sequence(self):
"""
Returns a :class:`TreeSequence` instance with the structure defined by the
tables in this :class:`TableCollection`. If the table collection is not
in canonical form (i.e., does not meet sorting requirements) or cannot be
interpreted as a tree sequence an exception is raised. The
:meth:`.sort` method may be used to ensure that input sorting requirements
are met.
:return: A :class:`TreeSequence` instance reflecting the structures
defined in this set of tables.
:rtype: .TreeSequence
"""
return tskit.TreeSequence.load_tables(self)
def simplify(
self, samples=None,
filter_zero_mutation_sites=None, # Deprecated alias for filter_sites
reduce_to_site_topology=False,
filter_populations=True, filter_individuals=True, filter_sites=True,
keep_unary=False):
"""
Simplifies the tables in place to retain only the information necessary
to reconstruct the tree sequence describing the given ``samples``.
This will change the ID of the nodes, so that the node
``samples[k]`` will have ID ``k`` in the result. The resulting
NodeTable will have only the first ``len(samples)`` individuals marked
as samples. The mapping from node IDs in the current set of tables to
their equivalent values in the simplified tables is also returned as a
numpy array. If an array ``a`` is returned by this function and ``u``
is the ID of a node in the input table, then ``a[u]`` is the ID of this
node in the output table. For any node ``u`` that is not mapped into
the output tables, this mapping will equal ``-1``.
Tables operated on by this function must: be sorted (see
:meth:`TableCollection.sort`)), have children be born strictly after their
parents, and the intervals on which any individual is a child must be
disjoint. Other than this the tables need not satisfy remaining
requirements to specify a valid tree sequence (but the resulting tables
will).
Please see the :meth:`TreeSequence.simplify` method for a description
of the remaining parameters.
| |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.442343,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.07281,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.11944,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.296502,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.584603,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.284335,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.458622,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.231497,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.974454,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.235569,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.34863,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.110444,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0119263,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.133415,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0882022,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.243859,
'Execution Unit/Register Files/Runtime Dynamic': 0.100129,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.310934,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.71516,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.49162,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000653142,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000653142,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000568611,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000219968,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00126703,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00314193,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00627208,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0847911,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.39344,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.186987,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.287989,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.87371,
'Instruction Fetch Unit/Runtime Dynamic': 0.569181,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.124973,
'L2/Runtime Dynamic': 0.0322292,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.04771,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.952234,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0585769,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0585769,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.32432,
'Load Store Unit/Runtime Dynamic': 1.29969,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.144441,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.288881,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0512625,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.053132,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.335344,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.030676,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.579513,
'Memory Management Unit/Runtime Dynamic': 0.0838079,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.8406,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.290527,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0163641,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.141027,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned short const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned short [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi, int const [] Bx,
npy_int32 [] Cp, npy_int32 [] Ci, int [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned int const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned int [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, long long [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
unsigned long long [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
float const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
float const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, float [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
double const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, double [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
long double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
long double const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, long double [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_cfloat_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_cfloat_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_cfloat_wrapper [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_cdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_cdouble_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_cdouble_wrapper [] Cx)
csc_eldiv_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_clongdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_clongdouble_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_clongdouble_wrapper [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_bool_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_bool_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_bool_wrapper [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
signed char const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
signed char const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, signed char [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned char const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned char const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned char [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
short const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
short const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, short [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned short const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned short const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned short [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
int const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi, int const [] Bx,
npy_int64 [] Cp, npy_int64 [] Ci, int [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned int const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned int const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned int [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
long long const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
long long const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, long long [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned long long const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned long long const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
unsigned long long [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
float const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
float const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, float [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
double const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
double const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, double [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
long double const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
long double const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, long double [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_cfloat_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_cfloat_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_cfloat_wrapper [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_cdouble_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_cdouble_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_cdouble_wrapper [] Cx)
csc_eldiv_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_clongdouble_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_clongdouble_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_clongdouble_wrapper [] Cx)
"""
return _csc.csc_eldiv_csc(*args)
def csc_plus_csc(*args):
"""
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_bool_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_bool_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_bool_wrapper [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
signed char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
signed char const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, signed char [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned char const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned char [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
short const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
short const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, short [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 | |
from typing import Union
class Profile():
"""This class contains all API endpoints for the Profile category of the Retail World of Warcraft API
:param api: An instance of our generic API object
:type api: API
"""
def __init__(self, api):
"""Constructor method
"""
self.api = api
async def getProfileApiResource(self, namespace:str, endpoint:str, params:dict=None) -> Union[dict, None]:
"""Generic method for retrieving data from a Profile API endpoint
:param namespace: The namespace of the resource we're trying to access
:type namespace: str
:param endpoint: The API endpoint of the resource we're trying to access
:type endpoint: str
:param params: Parameters to send with the request, defaults to None
:type params: dict, optional
:return: The result of the API request (Warning: Can be None/Null)
:rtype: dict
"""
region = await self.api.getRegion()
locale = await self.api.getLocale()
hostname = await self.api.getHostname()
token = await self.api.getAccessToken()
if params is None:
params = {}
params["namespace"] = namespace.format(region=region),
params["locale"] = locale,
params["access_token"] = token
return await self.api.getResource(hostname, endpoint, params)
#region Character Achievements API
async def getCharAchievementsSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of the achievements a character has completed.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/achievements"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharAchievementStatistics(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a character's statistics as they pertain to achievements.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/achievements/statistics"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Appearance API
async def getCharAppearanceSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of a character's appearance settings.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/appearance"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Collections API
async def getCharCollectionsIndex(self, character:str, realm:str) -> Union[dict, None]:
"""Returns an index of collection types for a character.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/collections"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharMountsCollectionSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of the mounts a character has obtained.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/collections/mounts"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharPetsCollectionSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of the battle pets a character has obtained.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/collections/pets"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Encounters API
async def getCharEncountersSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of a character's encounters.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/encounters"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharDungeons(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of a character's completed dungeons.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/encounters/dungeons"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharRaids(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of a character's completed raids.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/encounters/raids"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Equipment API
async def getCharEquipmentSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of the items equipped by a character.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/equipment"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Hunter Pets API
async def getCharHunterPetsSummary(self, character:str, realm:str) -> Union[dict, None]:
"""If the character is a hunter, returns a summary of the character's hunter pets. Otherwise, returns an HTTP 404 Not Found error.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/hunter-pets"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Media API
async def getCharMediaSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of the media assets available for a character (such as an avatar render).
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/character-media"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Mythic Keystone Profile API
async def getCharMythicKeystoneProfileIndex(self, character:str, realm:str) -> Union[dict, None]:
"""Returns the Mythic Keystone profile index for a character.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/mythic-keystone-profile"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def getCharMythicKeystoneSeasonDetails(self, character:str, realm:str, season_id:str) -> Union[dict, None]:
"""Returns the Mythic Keystone season details for a character.
Returns a 404 Not Found for characters that have not yet completed a Mythic Keystone dungeon for the specified season.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:param season_id: The ID of the Mythic Keystone season.
:type season_id: int
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/mythic-keystone-profile/season/{season_id}"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Professions API
async def getCharProfessionsSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a summary of professions for a character.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}/professions"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
#endregion
#region Character Profile API
async def getCharProfileSummary(self, character:str, realm:str) -> Union[dict, None]:
"""Returns a profile summary for a character.
:param character: The lowercase name of the character.
:type character: str
:param realm: The slug of the realm.
:type realm: str
:return: The API response in JSON format/a dict object
:rtype: dict
"""
endpoint = f"/profile/wow/character/{str(realm).lower()}/{str(character).lower()}"
namespace = 'profile-{region}'
return await self.getProfileApiResource(namespace, endpoint)
async def | |
directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
# name = str(resource_type.name) #.string if resource_type.name.string else ''
dump.add_line(u'Name: [{0}]'.format(
resource_type.name.decode(encoding, 'backslashreplace_')
), 2)
else:
dump.add_line(u'Id: [0x{0:X}] ({1})'.format(
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line(u'Name: [{0}]'.format(
resource_id.name.decode(
'utf-8', 'backslashreplace_')), 6)
else:
dump.add_line('Id: [0x{0:X}]'.format(resource_id.struct.Id), 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
dump.add_line(u'\\--- LANG [%d,%d][%s,%s]' % (
resource_lang.data.lang,
resource_lang.data.sublang,
LANG.get(resource_lang.data.lang, '*unknown*'),
get_sublang_name_for_lang( resource_lang.data.lang, resource_lang.data.sublang ) ), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
dump.add_line(u'[STRINGS]' , 10 )
for idx, res_string in list(sorted(resource_id.directory.strings.items())):
dump.add_line( '{0:6d}: {1}'.format(idx,
res_string.encode(
'unicode-escape',
'backslashreplace').decode(
'ascii')),
12)
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump.add_header('LOAD_CONFIG')
dump.add_lines(self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line(
'Type: 0x{0:x}(Unknown)'.format(dbg.struct.Type))
dump.add_newline()
if dbg.entry:
dump.add_lines(dbg.entry.dump(), 4)
dump.add_newline()
if self.has_relocs():
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
def dump_dict(self, dump=None):
"""Dump all the PE header information into a dictionary."""
dump_dict = dict()
warnings = self.get_warnings()
if warnings:
dump_dict['Parsing Warnings'] = warnings
dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()
dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()
dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()
image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump_dict['Flags'] = list()
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
dump_dict['Flags'].append(flag[0])
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()
dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')
dump_dict['DllCharacteristics'] = list()
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
dump_dict['DllCharacteristics'].append(flag[0])
dump_dict['PE Sections'] = list()
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
section_dict = section.dump_dict()
dump_dict['PE Sections'].append(section_dict)
section_dict['Flags'] = list()
for flag in section_flags:
if getattr(section, flag[0]):
section_dict['Flags'].append(flag[0])
section_dict['Entropy'] = section.get_entropy()
if md5 is not None:
section_dict['MD5'] = section.get_hash_md5()
if sha1 is not None:
section_dict['SHA1'] = section.get_hash_sha1()
if sha256 is not None:
section_dict['SHA256'] = section.get_hash_sha256()
if sha512 is not None:
section_dict['SHA512'] = section.get_hash_sha512()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump_dict['Directories'] = list()
for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump_dict['Directories'].append(directory.dump_dict())
if hasattr(self, 'VS_VERSIONINFO'):
dump_dict['Version Information'] = list()
for idx in range(len(self.VS_VERSIONINFO)):
version_info_list = list()
version_info_list.append(self.VS_VERSIONINFO[idx].dump_dict())
if hasattr(self, 'VS_FIXEDFILEINFO'):
version_info_list.append(self.VS_FIXEDFILEINFO[idx].dump_dict())
if hasattr(self, 'FileInfo'):
fileinfo_list = list()
for entry in self.FileInfo[idx]:
fileinfo_list.append(entry.dump_dict())
if hasattr(entry, 'StringTable'):
stringtable_dict = dict()
for st_entry in entry.StringTable:
[fileinfo_list.append(line) for line in st_entry.dump_dict()]
stringtable_dict['LangID'] = st_entry.LangID
for str_entry in list(st_entry.entries.items()):
stringtable_dict[str_entry[0]] = str_entry[1]
fileinfo_list.append(stringtable_dict)
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
var_dict = dict()
if hasattr(var_entry, 'entry'):
[fileinfo_list.append(line) for line in var_entry.dump_dict()]
var_dict[list(var_entry.entry.keys())[0]] = list(
var_entry.entry.values())[0]
fileinfo_list.append(var_dict)
dump_dict['Version Information'].append(version_info_list)
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump_dict['Exported symbols'] = list()
dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
export_dict = dict()
if export.address is not None:
export_dict.update({'Ordinal': export.ordinal, 'RVA': export.address, 'Name': export.name})
if export.forwarder:
export_dict['forwarder'] = export.forwarder
dump_dict['Exported symbols'].append(export_dict)
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump_dict['Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_IMPORT:
import_list = list()
dump_dict['Imported symbols'].append(import_list)
import_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
import_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump_dict['Bound imports'] = list()
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
bound_imp_desc_dict = dict()
dump_dict['Bound imports'].append(bound_imp_desc_dict)
bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())
bound_imp_desc_dict['DLL'] = bound_imp_desc.name
for bound_imp_ref in bound_imp_desc.entries:
bound_imp_ref_dict = dict()
bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())
bound_imp_ref_dict['DLL'] = bound_imp_ref.name
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump_dict['Delay Imported symbols'] = list()
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
module_list = list()
dump_dict['Delay Imported symbols'].append(module_list)
module_list.append(module.struct.dump_dict())
for symbol in module.imports:
symbol_dict = dict()
if symbol.import_by_ordinal is True:
symbol_dict['DLL'] = module.dll
symbol_dict['Ordinal'] = symbol.ordinal
else:
symbol_dict['DLL'] = module.dll
symbol_dict['Name'] = symbol.name
symbol_dict['Hint'] = symbol.hint
if symbol.bound:
symbol_dict['Bound'] = symbol.bound
module_list.append(symbol_dict)
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump_dict['Resource directory'] = list()
dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
resource_type_dict = dict()
if resource_type.name is not None:
resource_type_dict['Name'] = resource_type.name
else:
resource_type_dict['Id'] = (
resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))
resource_type_dict.update(resource_type.struct.dump_dict())
dump_dict['Resource directory'].append(resource_type_dict)
if hasattr(resource_type, 'directory'):
directory_list = list()
directory_list.append(resource_type.directory.struct.dump_dict())
dump_dict['Resource directory'].append(directory_list)
for resource_id in resource_type.directory.entries:
resource_id_dict = dict()
if resource_id.name is not None:
resource_id_dict['Name'] = resource_id.name
else:
resource_id_dict['Id'] = resource_id.struct.Id
resource_id_dict.update(resource_id.struct.dump_dict())
directory_list.append(resource_id_dict)
if hasattr(resource_id, 'directory'):
resource_id_list = list()
resource_id_list.append(resource_id.directory.struct.dump_dict())
directory_list.append(resource_id_list)
for resource_lang in resource_id.directory.entries:
if hasattr(resource_lang, 'data'):
resource_lang_dict = dict()
resource_lang_dict['LANG'] = resource_lang.data.lang
resource_lang_dict['SUBLANG'] = resource_lang.data.sublang
resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')
resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
resource_lang_dict.update(resource_lang.struct.dump_dict())
resource_lang_dict.update(resource_lang.data.struct.dump_dict())
resource_id_list.append(resource_lang_dict)
if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:
for idx, res_string in list(resource_id.directory.strings.items()):
resource_id_list.append(res_string.encode(
'unicode-escape',
'backslashreplace').decode(
'ascii'))
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()
if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and
self.DIRECTORY_ENTRY_LOAD_CONFIG and
self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):
dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump_dict['Debug information'] = list()
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dbg_dict = dict()
dump_dict['Debug information'].append(dbg_dict)
dbg_dict.update(dbg.struct.dump_dict())
dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)
if self.has_relocs():
dump_dict['Base relocations'] = list()
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
base_reloc_list = list()
dump_dict['Base relocations'].append(base_reloc_list)
base_reloc_list.append(base_reloc.struct.dump_dict())
for reloc in base_reloc.entries:
reloc_dict = dict()
base_reloc_list.append(reloc_dict)
reloc_dict['RVA'] = reloc.rva
try:
reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]
except KeyError:
reloc_dict['Type'] = reloc.type
return dump_dict
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get / set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword & 0xffffffff)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data starting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<I', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva, 4), 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get / set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get / set
##
def get_data_from_qword(self, word):
"""Return an eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data starting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
| |
<reponame>vincenttran-msft/azure-sdk-for-python
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 63
# Methods Covered : 63
# Examples Total : 87
# Examples Tested : 87
# Coverage % : 100
# ----------------------
# covered ops:
# action_groups: 7/7
# activity_log_alerts: 6/6
# activity_logs: 1/1
# autoscale_settings: 6/6
# baselines: 1/1
# alert_rule_incidents: 0/2 # TODO: cannot test it in this sub
# alert_rules: 0/6 # TODO: cannot test it in this sub
# baseline: 0/1 # TODO: need check whether it is outdated
# diagnostic_settings: 4/4
# diagnostic_settings_category: 2/2
# guest_diagnostics_settings: 0/6 TODO: InvalidResourceType, it seems like outdated
# guest_diagnostics_settings_association: 0/6 TODO: InvalidResourceType, it seems like outdated
# event_categories: 1/1
# log_profiles: 5/5
# metric_alerts: 6/6
# metric_alerts_status: 2/2
# metric_baseline: 0/2 TODO: bad request
# metric_definitions: 1/1
# metric_namespaces: 1/1
# metrics: 1/1
# operations: 1/1
# scheduled_query_rules: 6/6
# service_diagnostic_settings: 0/3 TODO: InvalidResourceType, it seems like outdated
# tenant_activity_logs: 1/1
# vm_insights: 1/1
import time
import unittest
import os
import azure.mgmt.monitor
import azure.mgmt.monitor.models
import pytest
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
class TestMgmtMonitorClient(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.monitor.MonitorManagementClient
)
if self.is_live:
from azure.mgmt.storage import StorageManagementClient
self.storage_client = self.create_mgmt_client(
StorageManagementClient
)
from azure.mgmt.eventhub import EventHubManagementClient
self.eventhub_client = self.create_mgmt_client(
azure.mgmt.eventhub.EventHubManagementClient
)
from azure.mgmt.loganalytics import LogAnalyticsManagementClient
self.loganalytics_client = self.create_mgmt_client(
LogAnalyticsManagementClient
)
from azure.mgmt.web import WebSiteManagementClient
self.web_client = self.create_mgmt_client(
WebSiteManagementClient
)
from azure.mgmt.compute import ComputeManagementClient
self.vm_client = self.create_mgmt_client(
ComputeManagementClient
)
from azure.mgmt.network import NetworkManagementClient
self.network_client = self.create_mgmt_client(
NetworkManagementClient
)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
self.insight_client = self.create_mgmt_client(
ApplicationInsightsManagementClient
)
from azure.mgmt.logic import LogicManagementClient
self.logic_client = self.create_mgmt_client(
LogicManagementClient
)
def create_workflow(self, group_name, location, workflow_name):
workflow = self.logic_client.workflows.create_or_update(
group_name,
workflow_name,
azure.mgmt.logic.models.Workflow(
location=location,
definition={
"$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"triggers": {},
"actions": {},
"outputs": {}
}
)
)
return workflow
# use track 1 version
def create_storage_account(self,
group_name,
location,
storage_name
):
from azure.mgmt.storage import models
params_create = models.StorageAccountCreateParameters(
sku=models.Sku(name=models.SkuName.standard_lrs),
kind=models.Kind.storage,
location=location
)
result_create = self.storage_client.storage_accounts.create(
group_name,
storage_name,
params_create,
)
account = result_create.result()
return account.id
# use eventhub track 1 verison
def create_event_hub_authorization_rule(
self,
group_name,
location,
name_space,
eventhub,
authorization_rule,
storage_account_id
):
# NamespaceCreate[put]
BODY = {
"sku": {
"name": "Standard",
"tier": "Standard"
},
"location": location,
"tags": {
"tag1": "value1",
"tag2": "value2"
}
}
result = self.eventhub_client.namespaces.begin_create_or_update(group_name, name_space, BODY)
result.result()
# NameSpaceAuthorizationRuleCreate[put]
BODY = {
"rights": [
"Listen",
"Send",
"Manage"
]
}
result = self.eventhub_client.namespaces.create_or_update_authorization_rule(group_name, name_space, authorization_rule, BODY["rights"])
# EventHubCreate[put]
BODY = {
"message_retention_in_days": "4",
"partition_count": "4",
"status": "Active",
"capture_description": {
"enabled": True,
"encoding": "Avro",
"interval_in_seconds": "120",
"size_limit_in_bytes": "10485763",
"destination": {
"name": "EventHubArchive.AzureBlockBlob",
"storage_account_resource_id": storage_account_id,
"blob_container": "container",
"archive_name_format": "{Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}"
}
}
}
result = self.eventhub_client.event_hubs.create_or_update(group_name, name_space, eventhub, BODY)
# EventHubAuthorizationRuleCreate[put]
BODY = {
"rights": [
"Listen",
"Send",
"Manage"
]
}
result = self.eventhub_client.event_hubs.create_or_update_authorization_rule(group_name, name_space, eventhub, authorization_rule, BODY["rights"])
# use track 1 version
def create_workspace(
self,
group_name,
location,
workspace_name
):
BODY = {
"sku": {
"name": "PerNode"
},
"retention_in_days": 30,
"location": location,
"tags": {
"tag1": "val1"
}
}
result = self.loganalytics_client.workspaces.begin_create_or_update(
group_name,
workspace_name,
BODY
)
return result.result()
# use track 1 version
def create_site(self, group_name, location, site_name, app_service_plan_name):
# server_farm_async_operation = self.web_client.app_service_plans.create_or_update(
# group_name,
# app_service_plan_name,
# azure.mgmt.web.models.AppServicePlan(
# location=location,
# sku=azure.mgmt.web.models.SkuDescription(
# name='S1',
# capacity=1,
# tier='Standard'
# )
# )
# )
# server_farm = server_farm_async_operation.result()
# Create a Site to be hosted in the Server Farm
site_async_operation = self.web_client.web_apps.create_or_update(
group_name,
site_name,
azure.mgmt.web.models.Site(
location=self.region,
# server_farm_id=server_farm.id
)
)
site = site_async_operation.result()
return site
# use track 1 version
def create_virtual_network(self, group_name, location, network_name, subnet_name):
azure_operation_poller = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
return subnet_info
# use track 1 version
def create_network_interface(self, group_name, location, nic_name, subnet):
async_nic_creation = self.network_client.network_interfaces.begin_create_or_update(
group_name,
nic_name,
{
'location': location,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet.id
}
}]
}
)
nic_info = async_nic_creation.result()
return nic_info.id
# use track 1 version
def create_vm(
self,
group_name,
location,
vm_name,
network_name,
subnet_name,
interface_name
):
subnet = self.create_virtual_network(group_name, location, network_name, subnet_name)
NIC_ID = self.create_network_interface(group_name, location, interface_name, subnet)
# Create a vm with empty data disks.[put]
BODY = {
"location": "eastus",
"hardware_profile": {
"vm_size": "Standard_D2_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
},
"data_disks": [
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "0"
},
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "1"
}
]
},
"os_profile": {
"admin_username": "testuser",
"computer_name": "myVM",
"admin_password": "<PASSWORD>",
"windows_configuration": {
"enable_automatic_updates": True # need automatic update for reimage
}
},
"network_profile": {
"network_interfaces": [
{
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/networkInterfaces/" + NIC_ID + "",
"id": NIC_ID,
"properties": {
"primary": True
}
}
]
}
}
result = self.vm_client.virtual_machines.create_or_update(group_name, vm_name, BODY)
return result.result()
# use track 1 version
def create_vmss(
self,
group_name,
location,
vmss_name,
network_name,
subnet_name,
interface_name
):
subnet = self.create_virtual_network(group_name, location, network_name, subnet_name)
NIC_ID = self.create_network_interface(group_name, location, interface_name, subnet)
# Create a scale set with empty data disks on each vm.[put]
BODY = {
"sku": {
"tier": "Standard",
"capacity": "2",
"name": "Standard_D1_v2"
},
"location": location,
"overprovision": True,
"virtual_machine_profile": {
"storage_profile": {
"image_reference": {
"offer": "UbuntuServer",
"publisher": "Canonical",
"sku": "18.04-LTS",
"version": "latest"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"create_option": "FromImage",
"disk_size_gb": "512"
}
},
"os_profile": {
"computer_name_prefix": "testPC",
"admin_username": "testuser",
"admin_password": "<PASSWORD>"
},
"network_profile": {
"network_interface_configurations": [
{
"name": "testPC",
"primary": True,
"enable_ipforwarding": True,
"ip_configurations": [
{
"name": "testPC",
"properties": {
"subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + NETWORK_NAME + "/subnets/" + SUBNET_NAME + ""
"id": subnet.id
}
}
}
]
}
]
}
},
"upgrade_policy": {
"mode": "Manual"
},
"upgrade_mode": "Manual"
}
result = self.vm_client.virtual_machine_scale_sets.create_or_update(group_name, vmss_name, BODY)
vmss = result.result()
return vmss
@unittest.skip('hard to test')
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_monitor_diagnostic_settings(self, resource_group):
SUBSCRIPTION_ID = self.get_settings_value("SUBSCRIPTION_ID")
RESOURCE_GROUP = resource_group.name
# RESOURCE_URI = "subscriptions/{}/resourcegroups/{}".format(SUBSCRIPTION_ID, RESOURCE_GROUP)
STORAGE_ACCOUNT_NAME = self.get_resource_name("storageaccountxx")
NAMESPACE_NAME = self.get_resource_name("namespacexx")
EVENTHUB_NAME = self.get_resource_name("eventhubxx")
AUTHORIZATIONRULE_NAME = self.get_resource_name("authorizationrulexx")
INSIGHT_NAME = self.get_resource_name("insightxx")
WORKSPACE_NAME = self.get_resource_name("workspacexx")
WORKFLOW_NAME = self.get_resource_name("workflow")
if self.is_live:
storage_account_id = self.create_storage_account(RESOURCE_GROUP, AZURE_LOCATION, STORAGE_ACCOUNT_NAME)
self.create_event_hub_authorization_rule(RESOURCE_GROUP, AZURE_LOCATION, NAMESPACE_NAME, EVENTHUB_NAME, AUTHORIZATIONRULE_NAME, storage_account_id)
workspace = self.create_workspace(RESOURCE_GROUP, AZURE_LOCATION, WORKSPACE_NAME)
workflow = self.create_workflow(RESOURCE_GROUP, AZURE_LOCATION, WORKFLOW_NAME)
RESOURCE_URI = workflow.id
workspace_id = workspace.id
else:
RESOURCE_URI = "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Logic/workflows/" + WORKFLOW_NAME
workspace_id = "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.OperationalInsights/workspaces/" + WORKSPACE_NAME
# Creates or Updates the diagnostic setting[put]
BODY = {
"storage_account_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME + "",
# "workspace_id": "",
"workspace_id": workspace_id,
# "event_hub_authorization_rule_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/microsoft.eventhub/namespaces/" + NAMESPACE_NAME + "/eventhubs/" + EVENTHUB_NAME + "/authorizationrules/" + AUTHORIZATIONRULE_NAME + "",
"event_hub_authorization_rule_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/microsoft.eventhub/namespaces/" + NAMESPACE_NAME + "/authorizationrules/" + AUTHORIZATIONRULE_NAME,
"event_hub_name": EVENTHUB_NAME,
"metrics": [
# {
# "category": "WorkflowMetrics",
# "enabled": True,
# "retention_policy": {
# "enabled": False,
# "days": "0"
# }
# }
],
"logs": [
{
"category": "WorkflowRuntime",
"enabled": True,
"retention_policy": {
"enabled": False,
"days": "0"
}
}
],
# "log_analytics_destination_type": "Dedicated"
}
diagnostic_settings = self.mgmt_client.diagnostic_settings.create_or_update(RESOURCE_URI, INSIGHT_NAME, BODY)
# TODO: resourceGroups has been changed to resourcegroups
RESOURCE_URI = "subscriptions/{sub}/resourcegroups/{group}/providers/microsoft.logic/workflows/{workflow}".format(
sub=SUBSCRIPTION_ID,
group=RESOURCE_GROUP,
workflow=WORKFLOW_NAME
)
# List diagnostic settings categories
categories = self.mgmt_client.diagnostic_settings_category.list(RESOURCE_URI)
# List diagnostic settings[get]
result = self.mgmt_client.diagnostic_settings.list(RESOURCE_URI)
# Gets the diagnostic setting[get]
result = self.mgmt_client.diagnostic_settings.get(RESOURCE_URI, INSIGHT_NAME)
# Get diagnostic settings category
self.mgmt_client.diagnostic_settings_category.get(RESOURCE_URI, categories.value[0].name)
# Deletes the diagnostic setting[delete]
result = self.mgmt_client.diagnostic_settings.delete(RESOURCE_URI, INSIGHT_NAME)
@unittest.skip('hard to test')
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_log_profiles(self, resource_group):
SUBSCRIPTION_ID = self.get_settings_value("SUBSCRIPTION_ID")
RESOURCE_GROUP = resource_group.name
LOGPROFILE_NAME = self.get_resource_name("logprofilex")
STORAGE_ACCOUNT_NAME = self.get_resource_name("storageaccountb")
if self.is_live:
storage_account_id = self.create_storage_account(RESOURCE_GROUP, AZURE_LOCATION, STORAGE_ACCOUNT_NAME)
else:
storage_account_id = "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME
# Create or update a log profile[put]
BODY = {
"location": "",
"locations": [
"global"
],
"categories": [
"Write",
"Delete",
"Action"
],
"retention_policy": {
"enabled": True,
"days": "3"
},
# "storage_account_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + | |
formats from self.formats to be used for this column
date_formats_used[j] = list(subformats)
random_idxs = np.array_split(rng.choice(nrow, size=nrow, replace=False), len(subformats)) #randomly split dataframe indices into len(subformats) number of groups
for i in range(len(subformats)): #for each group of indices, apply a different format from subformats
new_col.iloc[random_idxs[i]] = new_df.iloc[random_idxs[i], j].apply(lambda x: x if pd.isna(x) else x.strftime(subformats[i]))
#for each set of random indices, apply a different strftime format
new_df.iloc[:, j] = new_col
end = time()
message += {new_df.columns[k]: v for k, v in date_formats_used.items()}.__repr__()
self.update_history(message, end - start)
return new_df, {}, {}
class DateFormatStainer(DatetimeFormatStainer):
"""Stainer to alter the format of dates for given date columns.
Subclass of DatetimeFormatStainer.
"""
#: Set as "date" - only datetime/date columns will be selected for reformatting
col_type = "date"
def __init__(self, name="Date Formats", col_idx = [], num_format = 2, formats = None):
"""The constructor for DateFormatStainer class.
Parameters
----------
name : str, optional
Name of stainer. Default is "Date Formats".
col_idx : int list, optional
Column indices that the stainer will operate on. Default is empty list.
num_format : int, optional
Number of date formats present within each column. If num_format > number of available formats,
or num_format == -1, use all formats. Default is 2.
formats : str list or None, optional
List of date string format options that the DateFormatStainer chooses from. Use datetime module string formats (e.g. '%d%b%Y').
If None, a default list of 41 non-ambiguous (month is named) date formats are provided. Default is None.
"""
if formats == None:
formats = [f"{dm_y[0]}{br}{dm_y[1]}" for br in [",", ", ", "-", "/", " "]
for m_type in ["%b", "%B"]
for d_m in itertools.permutations(["%d", m_type])
for d_m_str in [f"{d_m[0]}{br}{d_m[1]}"]
for dm_y in itertools.permutations([d_m_str, '%Y'])
] + ['%Y%m%d'] #default formats; 41 total and non-ambiguous
super().__init__(col_idx=col_idx, name=name, num_format=num_format, formats=formats)
class DatetimeSplitStainer(Stainer):
"""Stainer that splits each given date / datetime columns into 3 columns respectively, representing day, month, and year.
If a given column's name is 'X', then the respective generated column names are 'X_day', 'X_month', and 'X_year'. If keep_time is True,
then further generate 'X_hour', 'X_minute', and 'X_second'. Otherwise, only dates will be kept.
If a column is split, the original column will be dropped.
For 'X_month' and 'X_year', a format from ['m', '%B', '%b'], and ['%Y', '%y'] is randomly chosen respectively.
"""
#: Set as "datetime" - only datetime/date/time columns will be selected for splitting
col_type = "datetime"
def __init__(self, name="Datetime Split", col_idx = [], keep_time = True, prob=1.0):
"""The constructor for DatetimeSplitStainer class.
Parameters
----------
name : str, optional
Name of stainer. Default is "Datetime Split".
col_idx : int list, optional
Column indices that the stainer will operate on. Default is empty list.
keep_time : boolean, optional
Whether time component of datetime should be kept, thus 3 new columns are created. Default is True.
prob : float [0, 1], optional
Probability that the stainer splits a date column. Probabilities of split for each given date column are independent. Default is 1.
"""
super().__init__(name, [], col_idx)
self.keep_time = keep_time
if prob < 0 or prob > 1:
raise ValueError("prob is a probability, it must be in the range [0, 1].")
else:
self.prob = prob
def transform(self, df, rng, row_idx = None, col_idx = None):
"""Applies staining on the given indices in the provided dataframe.
Parameters
----------
df : pd.DataFrame
Dataframe to be transformed.
rng : np.random.BitGenerator
PCG64 pseudo-random number generator.
row_idx : int list, optional
Unused parameter as this stainer does not use row indices.
col_idx : int list, optional
Column indices that the stainer will operate on. Will take priority over the class attribute `col_idx`.
Returns
-------
new_df : pd.DataFrame
Modified dataframe.
row_map : empty dictionary
This stainer does not produce any row mappings.
col_map : dictionary {int: int}
Column mapping showing the relationship between the original and new column positions.
"""
new_df, row_idx, col_idx = self._init_transform(df, row_idx, col_idx)
start = time()
message = f"Split the following date columns: "
col_map_dct = {j: [] for j in range(df.shape[1])} #initialize column map dictionary; new number of columns is unknown at start.
j_new = 0 #running column index for output df
#iterate over all columns, and apply logic only when current column index is in self.col_idx
for j in range(df.shape[1]):
if (j not in col_idx) or (rng.random() > self.prob): #current column index not in self.col_idx, or no split due to probability
col_map_dct[j].append(j_new)
j_new += 1
else:
col_name = df.columns[j]
message += f"{col_name}, "
#check to ensure no undetected column name conflict
if f"{col_name}_day" in new_df.columns:
raise KeyError(f"column name: '{col_name}_day' already exists in dataframe.")
if f"{col_name}_month" in new_df.columns:
raise KeyError(f"column name: '{col_name}_month' already exists in dataframe.")
if f"{col_name}_year" in new_df.columns:
raise KeyError(f"column name: '{col_name}_year' already exists in dataframe.")
month_format = rng.choice(["%m", "%B", "%b"]) #randomly chosen month format
year_format = rng.choice(["%Y", "%y"]) #randomly chosen year format
new_df.drop(col_name, axis=1, inplace=True)
new_df.insert(j_new, f"{col_name}_day", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime("%d")))
new_df.insert(j_new + 1, f"{col_name}_month", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime(month_format)))
new_df.insert(j_new + 2, f"{col_name}_year", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime(year_format)))
col_map_dct[j].extend([j_new, j_new + 1, j_new + 2])
j_new += 3
if self.keep_time:
#check to ensure no undetected column name conflict
if f"{col_name}_hour" in new_df.columns:
raise KeyError(f"column name: '{col_name}_hour' already exists in dataframe.")
if f"{col_name}_minute" in new_df.columns:
raise KeyError(f"column name: '{col_name}_minute' already exists in dataframe.")
if f"{col_name}_second" in new_df.columns:
raise KeyError(f"column name: '{col_name}_second' already exists in dataframe.")
new_df.insert(j_new, f"{col_name}_hour", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime("%H")))
new_df.insert(j_new + 1, f"{col_name}_minute", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime("%M")))
new_df.insert(j_new + 2, f"{col_name}_second", df[col_name].apply(lambda x: x if pd.isna(x) else x.strftime("%S")))
col_map_dct[j].extend([j_new, j_new + 1, j_new + 2])
j_new += 3
if j == j_new - 1:
message = "No date columns were split."
else:
message = message[:-2]
end = time()
self.update_history(message, end - start)
return new_df, {}, col_map_dct
class FTransformStainer(Stainer):
"""
Stainer that takes a numerical column and applies a transformation to it. Only works on numerical columns.
If any other column is selected, a type error will be raised.
"""
#: Set as "numeric" - only numeric columns will be selected for transformation
col_type = "numeric"
#: 7 default functions, namely square, cube, sqrt (square root), cubert (cube root), inverse (1/x), ln (natural logarithm), exp (exponential)
function_dict = {"square": lambda x: x**2,
"cube": lambda x: x**3,
"sqrt": lambda x: round(x**0.5, 2),
"cubert": lambda x: round(x**(1/3), 2),
"inverse": lambda x: 1000 if x == 0 else round(1/x, 2),
"ln": lambda x: 0 if x == 0 else round(np.log(x), 2),
"exp": lambda x: round(np.exp(x), 2)}
def __init__(self, deg, name = "Function Transform", col_idx = [], trans_lst = [], trans_dict = {}, scale = False):
"""
The constructor for FTransformStainer class.
Parameters
----------
deg : float (0, 1]
Determines the proportion of selected data that would be transformed
name : str, optional
Name of stainer. Default is "Function Transform"
col_idx : int list, optional
Column indices that the stainer will operate on. Default is empty list.
trans_lst : str list, optional
Names of transformations in function_dict to include in the pool of possible transformations. Default is empty list.
trans_dict : {str : function} dictionary, optional
{Name of transformation: Function} to include in the pool of possible transformations.
Default is empty dictionary. If no transformation has been selected, all default functions will be selected instead.
scale : boolean
If True, will scale the data back to its original range.
Defaults to False
Raises
----------
ValueError
Degree provided is not in the range of (0, 1]
Exception
If multiple functions are given the same name
KeyError
Name provided in trans_lst is not one of the 7 default transformations
TypeError
Invalid column type provided
ZeroDivisionError
Transformation would reuslt in division by zero
"""
super().__init__(name, | |
from utils.vocab import Vocab
from utils.util import initialize_weights, set_seed, count_parameters, save_params, build_vocab
import utils.constants as constants
from utils.loss import LabelSmoothing, LossCompute
from utils.optimizer import NoamOpt
from Dataloader import ParallelDataset, get_dataloader
from Translate import translate
from models.Multitask import Multitask
from layers.Encoder import Encoder
from layers.Decoder import Decoder
import torch
import torch.nn as nn
import math
import os
import time
def build_dataset(source_files, target_files, batch_size, shuffle=False, \
source_vocabs=None, target_vocabs=None, mtl=False, max_length=180):
'''
This method builds a dataset and dataloader for all tasks
source_files: path for each source file (each file represents a task)
target_files: path for each target file (each file represents a task)
batch_size: the size of the batch
shuffle: shuffle the dataset
source_vocabs: the source vocabulary for each file
target_vocabs: the target vocabulary for each file
mtl: if true an specific target vocabulary is used for each dataset sharing he source vocab, otherwise, all are built separately
max_length: max length of the source/target lines
'''
loaders = []
for index, (source_file, target_file) in enumerate(zip(source_files, target_files)):
if mtl is True:
_set = ParallelDataset(source_file, target_file, max_length = max_length, \
source_vocab = source_vocabs[0], target_vocab = target_vocabs[index])
else:
_set = ParallelDataset(source_file, target_file, max_length = max_length, \
source_vocab = source_vocabs[0], target_vocab = target_vocabs[0])
loader = get_dataloader(_set, batch_size, shuffle=shuffle)
loaders.append(loader)
return loaders
def load_model(args, source_vocabs, target_vocabs, device, max_length):
'''
This method loads a pre-trained model
args: arguments for loading the model
source_vocabs: the source vocabulary for each file
target_vocabs: the target vocabulary for each file
device: if use gpu or cpu
max_length: max length of a sentence
'''
if args.load_encoder:
from collections import OrderedDict
encoder = OrderedDict()
model = torch.load(args.model)
for item in model:
if item.startswith("encoder"):
encoder[item.replace("encoder.","")] = model[item]
print("Building an model using a pre-trained encoder ... ")
current = build_model(args, source_vocabs, target_vocabs, device, max_length, encoder)
return current
else:
mtl = build_model(args, source_vocabs, target_vocabs, device, max_length)
mtl.load_state_dict(torch.load(args.model))
print("Building an model using the encoder and the decoder ... ")
return mtl
def build_model(args, source_vocabs, target_vocabs, device, max_length , encoder=None):
'''
This method builds a model from scratch or using the encoder of a pre-trained model
args: arguments for loading the model
source_vocabs: the source vocabulary for each file
target_vocabs: the target vocabulary for each file
device: if use gpu or cpu
max_length: max length of a sentence
encoder: if the encoder is passed as a pre-trained model
'''
input_dim = source_vocabs[0].len()
enc = Encoder(input_dim,
args.hidden_size,
args.encoder_layer,
args.encoder_head,
args.encoder_ff_size,
args.encoder_dropout,
device,
max_length=max_length).to(device)
if encoder is None:
enc.apply(initialize_weights);
else:
enc.load_state_dict(encoder)
decs = []
for target_vocab in target_vocabs:
output_dim = target_vocab.len()
dec = Decoder(output_dim,
args.hidden_size,
args.decoder_layer,
args.decoder_head,
args.decoder_ff_size,
args.decoder_dropout,
device,
max_length=max_length).to(device)
if args.tie_embeddings:
dec.tok_embedding = enc.tok_embedding
dec.fc_out.weight = enc.tok_embedding.weight
dec.apply(initialize_weights);
decs.append(dec)
model = Multitask(enc, decs, constants.PAD_IDX, constants.PAD_IDX, device).to(device)
return model
def train_step(model, loader, loss_compute, device, task_id = 0):
'''
This method performs training on a step (only one batch)
model: the model being trained
loader: dataloader that provides the batches
loss_compute: function to compute the loss
device: if use gpu or cpu
task_id: task id that is being trained (0 as default)
'''
model.train()
(src, tgt) = next(iter(loader))
src = src.to(device)
tgt = tgt.to(device)
output, _ = model(src, tgt[:,:-1], task_id=task_id)
#output = [batch size, tgt len - 1, output dim]
#tgt = [batch size, tgt len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
tgt = tgt[:,1:].contiguous().view(-1)
#output = [batch size * tgt len - 1, output dim]
#tgt = [batch size * tgt len - 1]
loss = loss_compute(output, tgt)
return loss
def evaluate(model, loader, loss_compute, device, task_id=0):
'''
This method performs an evaluation on all dataset
model: the model being evaluated
loader: dataloader that provides the batches
loss_compute: function to compute the loss
device: if use gpu or cpu
task_id: task id that is being trained (0 as default)
'''
model.eval()
epoch_loss = 0
total_tokens = 0
with torch.no_grad():
for i, (src, tgt) in enumerate(loader):
src = src.to(device)
tgt = tgt.to(device)
output, _ = model(src, tgt[:,:-1], task_id=task_id)
#output = [batch size, tgt len - 1, output dim]
#tgt = [batch size, tgt len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
tgt = tgt[:,1:].contiguous().view(-1)
#output = [batch size * tgt len - 1, output dim]
#tgt = [batch size * tgt len - 1]
loss = loss_compute(output, tgt)
epoch_loss += loss
#if torch.equal(model.decoders[task_id].fc_out.weight, model.encoder.tok_embedding.weight):
# print("decoder output and encoder embeddings are the same")
return epoch_loss / len(loader)
def run_translate(model, source_vocab, target_vocabs, save_dir, device, beam_size, filenames, max_length):
'''
This method builds a model from scratch or using the encoder of a pre-trained model
model: the model being evaluated
source_vocabs: the source vocabulary for each file
target_vocabs: the target vocabulary for each file
save_dir: path where the outpus will be saved
beam_size: beam size during the translating
filenames: filenames of triples to process
max_length: max length of a sentence
'''
for index, eval_name in enumerate(filenames):
n = len(eval_name.split("/"))
name = eval_name.split("/")[n-1]
print(f'Reading {eval_name}')
fout = open(save_dir + name + "." + str(index) + ".out", "w")
with open(eval_name, "r") as f:
outputs = translate(model, index, f, source_vocab, target_vocabs[index], device,
beam_size=beam_size, max_length=max_length)
for output in outputs:
fout.write(output.replace("<eos>","").strip() + "\n")
fout.close()
def run_evaluation(model, source_vocab, target_vocabs, device, beam_size, filenames, ref_files, max_length):
'''
This method builds a model from scratch or using the encoder of a pre-trained model
model: the model being evaluated
source_vocabs: the source vocabulary for each file
target_vocabs: the target vocabulary for each file
beam_size: beam size during the translating
filenames: filenames of triples to process
ref_files: filenames with gold-standards for each process
max_length: max length of a sentence
'''
accuracies = []
for index, eval_name in enumerate(filenames):
eval_ref = ref_files[index]
eval_ref, corpus = '/'.join(eval_ref.split('/')[:-1]), eval_ref.split('/')[-1]
references = []
for i, fname in enumerate(sorted(os.listdir(eval_ref))):
if corpus in fname:
path = os.path.join(eval_ref, fname)
with open(path) as f:
doc = f.read().split('\n')
if i == 0:
references = [[w] for w in doc]
else:
for i, ref in enumerate(doc):
references[i].append(ref)
n = len(eval_name.split("/"))
name = eval_name.split("/")[n-1]
print(f'Reading {eval_name}')
with open(eval_name, "r") as f:
outputs = translate(model, index, f, source_vocab, target_vocabs[index], device,
beam_size=beam_size, max_length=max_length)
acc = 0.0
for j, output in enumerate(outputs):
if output.replace("<eos>","").strip().lower() in [w.lower() for w in references[j]]:
acc += 1
acc /= len(outputs)
accuracies.append(acc)
return accuracies
def train(args):
set_seed(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() and args.gpu else 'cpu')
batch_size = args.batch_size
max_length = args.max_length
mtl = args.mtl
learning_rate = 0.0005
if not args.learning_rate:
learning_rate = args.learning_rate
if len(args.train_source) != len(args.train_target):
print("Error.Number of inputs in train are not the same")
return
if len(args.dev_source) != len(args.dev_target):
print("Error: Number of inputs in dev are not the same")
return
if not args.tie_embeddings:
print("Building Encoder vocabulary")
source_vocabs = build_vocab(args.train_source, args.src_vocab, save_dir=args.save_dir)
print("Building Decoder vocabulary")
target_vocabs = build_vocab(args.train_target, args.tgt_vocab, mtl=mtl, name ="tgt", save_dir=args.save_dir)
else:
print("Building Share vocabulary")
source_vocabs = build_vocab(args.train_source + args.train_target, args.src_vocab, name="tied", save_dir=args.save_dir)
if mtl:
target_vocabs = [source_vocabs[0] for _ in range(len(args.train_target))]
else:
target_vocabs = source_vocabs
print("Number of source vocabularies:", len(source_vocabs))
print("Number of target vocabularies:", len(target_vocabs))
save_params(args, args.save_dir + "args.json")
# source_vocabs, target_vocabs = build_vocab(args.train_source, args.train_target, mtl=mtl)
print("Building training set and dataloaders")
train_loaders = build_dataset(args.train_source, args.train_target, batch_size, \
source_vocabs=source_vocabs, target_vocabs=target_vocabs, shuffle=True, mtl=mtl, max_length=max_length)
for train_loader in train_loaders:
print(f'Train - {len(train_loader):d} batches with size: {batch_size:d}')
print("Building dev set and dataloaders")
dev_loaders = build_dataset(args.dev_source, args.dev_target, batch_size, \
source_vocabs=source_vocabs, target_vocabs=target_vocabs, mtl=mtl, max_length=max_length)
for dev_loader in dev_loaders:
print(f'Dev - {len(dev_loader):d} batches with size: {batch_size:d}')
if args.model is not None:
print("Loading the encoder from an external model...")
multitask_model = load_model(args, source_vocabs, target_vocabs, device, max_length)
else:
print("Building model")
multitask_model = build_model(args, source_vocabs, target_vocabs, device, max_length)
print(f'The Transformer has {count_parameters(multitask_model):,} trainable parameters')
print(f'The Encoder has {count_parameters(multitask_model.encoder):,} trainable parameters')
for index, decoder in enumerate(multitask_model.decoders):
print(f'The Decoder {index+1} has {count_parameters(decoder):,} trainable parameters')
# Defining CrossEntropyLoss as default
#criterion = nn.CrossEntropyLoss(ignore_index = constants.PAD_IDX)
criterions = [LabelSmoothing(size=target_vocab.len(), padding_idx=constants.PAD_IDX, smoothing=0.1) \
for target_vocab in target_vocabs]
# Default optimizer
optimizer = torch.optim.Adam(multitask_model.parameters(), lr = learning_rate, betas=(0.9, 0.98), eps=1e-09)
model_opts = [NoamOpt(args.hidden_size, args.warmup_steps, optimizer) for _ in target_vocabs]
task_id = 0
print_loss_total = 0 # Reset every print_every
n_tasks | |
ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_snapshot: Creates the specified snapshot (in addition to the regular
SnapMirror snapshot) on the destination after the qtree
SnapMirror transfer is over.
:param transfer_priority: Specifies the priority at which the transfer runs.
Possible values are: "normal", and "low". The default
value is the value specified in the snapmirror policy which
is associated with the relationship.
<p>This parameter only applies on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control plane
is 'v2'.
:param source_cluster: Specifies the source cluster of the SnapMirror relationship. The
source Vserver and source volume must also be specified if using
this parameter.
:param destination_vserver: Specifies the destination Vserver of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the destination volume.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param destination_location: Specifies the destination endpoint of the SnapMirror
relationship in the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>]
On Data ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data
ONTAP 8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode.
<li> <[vserver:]volume>
On Data ONTAP 8.2 or later operating in Cluster-Mode except
for relationships using a control plane compatible with Data
ONTAP 8.1 operating in Cluster-Mode. This format depends
on the Vserver peering setup between the source and
destination Vservers.
<ul>
This format may change in the future.
On Data ONTAP operating in Cluster-Mode, when specifying a
destination endpoint, you must use either the destination
location, or the destination cluster, destination Vserver,
and destination volume.
This parameter is mandatory on Data ONTAP operating in 7-mode
:param destination_volume: Specifies the destination volume of the SnapMirror relationship.
If using this parameter, the following parameters must also be
specified:
<ul>
<li> The name of the destination Vserver.
<li> The name of the destination cluster on Data ONTAP 8.1
operating in Cluster-Mode, or on Data ONTAP 8.2
operating in Cluster-Mode if the relationship control
plane is 'v1'.
</ul>
:param source_location: Specifies the source endpoint of the SnapMirror
relationship in the following formats:
<ul>
<li> <system>:/vol/<volume>[/<qtree>]
On Data ONTAP operating in 7-Mode.
<li> [<cluster>:]//<vserver>/<volume>
On Data ONTAP 8.1 operating in Cluster-Mode, and on Data
ONTAP 8.2 operating in Cluster-Mode for relationships using
a control plane compatible with Data ONTAP 8.1 operating in
Cluster-Mode.
<li> <[vserver:]volume>
On Data ONTAP 8.2 or later operating in Cluster-Mode except
for relationships using a control plane compatible with Data
ONTAP 8.1 operating in Cluster-Mode. This format depends
on the Vserver peering setup between the source and
destination Vservers.
<ul>
This format may change in the future.
On Data ONTAP Cluster-Mode when specifying a source endpoint,
you must use either the source location, or the source
cluster, source Vserver, and source volume.
On Data ONTAP operating in 7-Mode, If the source-location is
not specified, then the source in /etc/snapmirror.conf for
the destination path is used.
:param source_snapshot: Designates the source snapshot to use for a qtree update
on Data ONTAP operating in 7-Mode, and the snapshot on the
source volume to use for the transfer on Data ONTAP 8.2 or
later operating in Cluster-Mode.
<p>For data protection mirror relationships, Data ONTAP
Cluster-Mode does not create a new Snapshot copy. It will
use the specified Snapshot copy as if it were the most recent
one; that is, all copies between the most recent common one and
the specified one are transferred, but no copies newer than the
specified one are transferred.
<p>For vault relationships, Data ONTAP Cluster-Mode transfers the
specified Snapshot copy instead of the ones that match its policy's rules.
<p>This parameter only applies on Data ONTAP 8.2 or later
operating in Cluster-Mode if the relationship control plane
is 'v2'.
:param max_transfer_rate: Specifies the upper bound, in kilobytes per second, at which data
is transferred. The default is unlimited (0) which permits the
SnapMirror relationship to fully utilize the available network
bandwidth.
On Data ONTAP operating in Cluster-Mode, the max-transfer-rate
option does not affect load-sharing transfers and transfers for
other relationships with Relationship Capability of Pre 8.2
confined to a single cluster.
:param destination_cluster: Specifies the destination cluster of the SnapMirror relationship.
The destination Vserver and destination volume must also be
specified if using this parameter.
"""
return self.request( "snapmirror-resync", {
'preserve': [ preserve, 'preserve', [ bool, 'None' ], False ],
'source_vserver': [ source_vserver, 'source-vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
'destination_snapshot': [ destination_snapshot, 'destination-snapshot', [ basestring, 'None' ], False ],
'transfer_priority': [ transfer_priority, 'transfer-priority', [ basestring, 'None' ], False ],
'source_cluster': [ source_cluster, 'source-cluster', [ basestring, 'None' ], False ],
'destination_vserver': [ destination_vserver, 'destination-vserver', [ basestring, 'None' ], False ],
'destination_location': [ destination_location, 'destination-location', [ basestring, 'None' ], False ],
'destination_volume': [ destination_volume, 'destination-volume', [ basestring, 'None' ], False ],
'source_location': [ source_location, 'source-location', [ basestring, 'None' ], False ],
'source_snapshot': [ source_snapshot, 'source-snapshot', [ basestring, 'None' ], False ],
'max_transfer_rate': [ max_transfer_rate, 'max-transfer-rate', [ int, 'None' ], False ],
'destination_cluster': [ destination_cluster, 'destination-cluster', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
def snapmirror_on(self):
"""
Enables SnapMirror data transfers and turns on the
SnapMirror scheduler. Check the SnapMirror status
with the snapmirror-get-status API for results.
"""
return self.request( "snapmirror-on", {
}, {
} )
def snapmirror_get_destination_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
The snapmirror-get-destination-iter API returns information
about, one or more SnapMirror relationships whose source
endpoints are in the Vserver or the cluster the API is issued
on.
<p>
The information returned can be stale. Stale information
corresponds to a SnapMirror relationship that has been deleted on
its destination cluster or Vserver. Stale information may result
to several entries being returned with the same source and
destination endpoints, but with different relationship IDs.
<p>
Note that the information for a SnapMirror relationship will not
be available on its source Vserver or source cluster until at
least one transfer is initiated.
<p>
This API is only supported on Data ONTAP 8.2 and above operating
in Cluster-Mode. It can be issued on a Vserver or a Cluster.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
snapmirror object.
All snapmirror objects matching this query up to 'max-records'
will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "snapmirror-get-destination-iter", {
'max_records': max_records,
'query': [ query, 'query', [ SnapmirrorDestinationInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ SnapmirrorDestinationInfo, 'None' ], False ],
}, {
'attributes-list': [ SnapmirrorDestinationInfo, True ],
} )
def snapmirror_list_sync_schedule(self, destination_location=None):
"""
Returns a synchronous schedule for a given destination or
all destinations. The API must be executed on the
destination filer.
Currently, the schedules is in /etc/snapmirror.conf.
:param destination_location: The destination location of a schedule to obtain. The
destination location | |
def view_diff_working_copy(self, data=None):
return (self.vcs.is_in_a_or_a_working_copy(self.path) and len(self.revisions) == 1)
def copy_clipboard(self, data=None):
return (len(self.revisions) > 0)
def view_diff_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def view_diff_revisions(self, data=None):
return (len(self.revisions) > 1)
def compare_working_copy(self, data=None):
return (self.vcs.is_in_a_or_a_working_copy(self.path) and len(self.revisions) == 1)
def compare_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def compare_revisions(self, data=None):
return (len(self.revisions) > 1)
def show_changes_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def show_changes_revisions(self, data=None):
return (len(self.revisions) > 1)
def update_to_this_revision(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_SVN and len(self.revisions) == 1)
# TODO Evaluate multiple revisions later
# TODO Git?
def revert_changes_from_this_revision(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_SVN and len(self.revisions) == 1)
def checkout(self, data=None):
return (len(self.revisions) == 1)
def branches(self, data=None):
return (len(self.revisions) == 1 and self.vcs_name == rabbitvcs.vcs.VCS_GIT)
def tags(self, data=None):
return (len(self.revisions) == 1 and self.vcs_name == rabbitvcs.vcs.VCS_GIT)
def branch_tag(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_SVN and len(self.revisions) == 1)
def export(self, data=None):
return (len(self.revisions) == 1)
def edit_author(self, data=None):
return self.vcs_name == rabbitvcs.vcs.VCS_SVN
def edit_log_message(self, data=None):
return self.vcs_name == rabbitvcs.vcs.VCS_SVN
def edit_revision_properties(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_SVN and len(self.revisions) == 1)
def separator(self, data=None):
return True
def separator_last(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_SVN)
def merge(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_GIT)
def reset(self, data=None):
return (self.vcs_name == rabbitvcs.vcs.VCS_GIT)
class LogTopContextMenuCallbacks:
def __init__(self, caller, vcs, path, revisions):
self.caller = caller
self.vcs = vcs
self.path = path
self.revisions = revisions
self.vcs_name = self.caller.get_vcs_name()
def find_parent(self, revision):
if ("parents" in revision) and len(revision["parents"]) > 0:
parent = six.text_type(revision["parents"][0])
elif ("next_revision" in revision):
parent = six.text_type(revision["next_revision"])
else:
parent = six.text_type(int(six.text_type(revision["revision"])) - 1)
return parent
def view_diff_working_copy(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("diff", [
"%s@%s" % (self.path, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
def copy_clipboard(self, widget, data=None):
self.caller.copy_revision_text()
def view_diff_previous_revision(self, widget, data=None):
parent = self.find_parent(self.revisions[0])
rabbitvcs.util.helper.launch_ui_window("diff", [
"%s@%s" % (self.path, parent),
"%s@%s" % (self.path, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
def view_diff_revisions(self, widget, data=None):
path_older = self.path
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
path_older = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("diff", [
"%s@%s" % (path_older, self.revisions[1]["revision"].value),
"%s@%s" % (self.path, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
def compare_working_copy(self, widget, data=None):
path_older = self.path
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
path_older = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("diff", [
"-s",
"%s@%s" % (path_older, six.text_type(self.revisions[0]["revision"])),
"%s" % (self.path),
"--vcs=%s" % self.caller.get_vcs_name()
])
def compare_previous_revision(self, widget, data=None):
parent = self.find_parent(self.revisions[0])
rabbitvcs.util.helper.launch_ui_window("diff", [
"-s",
"%s@%s" % (self.path, parent),
"%s@%s" % (self.path, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
def compare_revisions(self, widget, data=None):
path_older = self.path
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
path_older = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("diff", [
"-s",
"%s@%s" % (path_older, self.revisions[1]["revision"].value),
"%s@%s" % (self.path, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
def show_changes_previous_revision(self, widget, data=None):
rev_first = six.text_type(self.revisions[0]["revision"])
parent = self.find_parent(self.revisions[0])
path = self.path
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
path = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("changes", [
"%s@%s" % (path, parent),
"%s@%s" % (path, six.text_type(rev_first)),
"--vcs=%s" % self.caller.get_vcs_name()
])
def show_changes_revisions(self, widget, data=None):
rev_first = six.text_type(self.revisions[0]["revision"])
rev_last = six.text_type(self.revisions[0]["next_revision"])
path = self.path
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
path = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("changes", [
"%s@%s" % (path, six.text_type(rev_first)),
"%s@%s" % (path, six.text_type(rev_last)),
"--vcs=%s" % self.caller.get_vcs_name()
])
def update_to_this_revision(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("updateto", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def revert_changes_from_this_revision(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("merge", [
self.path,
six.text_type(self.revisions[0]["revision"]) + "-" + str(int(six.text_type(self.revisions[0]["revision"])) - 1),
"--vcs=%s" % self.caller.get_vcs_name()
])
def checkout(self, widget, data=None):
url = ""
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
url = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("checkout", [
self.path,
url,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def branch_tag(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("branch", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def branches(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("branches", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def tags(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("tags", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def export(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("export", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def merge(self, widget, data=None):
extra = []
if self.vcs_name == rabbitvcs.vcs.VCS_GIT:
extra.append(six.text_type(self.revisions[0]["revision"]))
try:
fromrev = six.text_type(self.revisions[1]["revision"])
extra.append(fromrev)
except IndexError as e:
pass
extra += ["--vcs=%s" % self.caller.get_vcs_name()]
rabbitvcs.util.helper.launch_ui_window("merge", [self.path] + extra)
def reset(self, widget, data=None):
rabbitvcs.util.helper.launch_ui_window("reset", [
self.path,
"-r", six.text_type(self.revisions[0]["revision"]),
"--vcs=%s" % self.caller.get_vcs_name()
])
def edit_author(self, widget, data=None):
message = ""
if len(self.revisions) == 1:
author = self.revisions[0]["author"]
from rabbitvcs.ui.dialog import TextChange
dialog = TextChange(_("Edit author"), author)
(result, new_author) = dialog.run()
if result == gtk.RESPONSE_OK:
self.caller.edit_revprop("svn:author", new_author, self.caller.on_author_edited)
def edit_log_message(self, widget, data=None):
message = ""
if len(self.revisions) == 1:
message = self.revisions[0]["message"]
from rabbitvcs.ui.dialog import TextChange
dialog = TextChange(_("Edit log message"), message)
(result, new_message) = dialog.run()
if result == gtk.RESPONSE_OK:
self.caller.edit_revprop("svn:log", new_message, self.caller.on_log_message_edited)
def edit_revision_properties(self, widget, data=None):
url = self.vcs.svn().get_repo_url(self.path)
rabbitvcs.util.helper.launch_ui_window("revprops", [
"%s@%s" % (url, six.text_type(self.revisions[0]["revision"])),
"--vcs=%s" % self.caller.get_vcs_name()
])
class LogTopContextMenu:
"""
Defines context menu items for a table with files
"""
def __init__(self, caller, event, path, revisions=[]):
"""
@param caller: The calling object
@type caller: object
@param base_dir: The curent working directory
@type base_dir: string
@param path: The loaded path
@type path: string
@param revisions: The selected revisions
@type revisions: list of rabbitvcs.vcs.Revision object
"""
self.caller = caller
self.event = event
self.path = path
self.revisions = revisions
self.vcs = rabbitvcs.vcs.VCS()
self.conditions = LogTopContextMenuConditions(
self.caller,
self.vcs,
self.path,
self.revisions
)
self.callbacks = LogTopContextMenuCallbacks(
self.caller,
self.vcs,
self.path,
self.revisions
)
# The first element of each tuple is a key that matches a
# ContextMenuItems item. The second element is either None when there
# is no submenu, or a recursive list of tuples for desired submenus.
self.structure = [
(MenuViewDiffWorkingCopy, None),
(MenuViewDiffPreviousRevision, None),
(MenuViewDiffRevisions, None),
(MenuCompareWorkingCopy, None),
(MenuComparePreviousRevision, None),
(MenuCompareRevisions, None),
(MenuShowChangesPreviousRevision, None),
(MenuShowChangesRevisions, None),
(MenuSeparator, None),
(MenuCopyClipboard, None),
(MenuSeparator, None),
(MenuUpdateToThisRevision, None),
(MenuRevertChangesFromThisRevision, None),
(MenuCheckout, None),
(MenuBranches, None),
(MenuTags, None),
(MenuBranchTag, None),
(MenuExport, None),
(MenuMerge, None),
(MenuReset, None),
(MenuSeparatorLast, None),
(MenuEditAuthor, None),
(MenuEditLogMessage, None),
(MenuEditRevisionProperties, None)
]
def show(self):
if len(self.revisions) == 0:
return
context_menu = GtkContextMenu(self.structure, self.conditions, self.callbacks)
context_menu.show(self.event)
class LogBottomContextMenuConditions:
def __init__(self, caller, vcs, paths, revisions):
self.caller = caller
self.vcs = vcs
self.paths = paths
self.revisions = revisions
def view_diff_working_copy(self, data=None):
return False
def view_diff_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def view_diff_revisions(self, data=None):
return (len(self.paths) == 1 and len(self.revisions) > 1)
def compare_working_copy(self, data=None):
return False
def compare_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def compare_revisions(self, data=None):
return (len(self.paths) == 1 and len(self.revisions) > 1)
def show_changes_previous_revision(self, data=None):
item = self.revisions[0]["revision"]
return ("previous_revision" in self.revisions[0] and len(self.revisions) == 1)
def show_changes_revisions(self, data=None):
return (len(self.paths) == 1 and len(self.revisions) > 1)
def _open(self, data=None):
return True
def annotate(self, data=None):
return (len(self.paths) == 1)
def separator(self, data=None):
return True
class LogBottomContextMenuCallbacks:
def __init__(self, caller, vcs, paths, revisions):
self.caller = caller
self.vcs = vcs
self.svn = self.vcs.svn()
self.vcs_name = self.caller.get_vcs_name()
self.paths = paths
self.revisions = revisions
def find_parent(self, revision):
if ("parents" in revision) and len(revision["parents"]) > 0:
parent = six.text_type(revision["parents"][0])
elif ("next_revision" in revision):
parent = six.text_type(revision["next_revision"])
else:
parent = six.text_type(int(six.text_type(revision["revision"])) - 1)
return parent
def view_diff_previous_revision(self, widget, data=None):
rev = six.text_type(self.revisions[0]["revision"])
parent = self.find_parent(self.revisions[0])
path_item = self.paths[0]
url = self.caller.root_url + path_item
self.caller.view_diff_for_path(url, rev, parent)
def view_diff_revisions(self, widget, data=None):
rev_first = six.text_type(self.revisions[0]["revision"])
rev_last = six.text_type(self.revisions[-1]["revision"])
path_item = self.paths[0]
url = self.caller.root_url + path_item
self.caller.view_diff_for_path(url, latest_revision_number=rev_last,
earliest_revision_number=rev_first)
def compare_previous_revision(self, widget, data=None):
rev = six.text_type(self.revisions[0]["revision"])
parent = self.find_parent(self.revisions[0])
path_item = self.paths[0]
url = self.caller.root_url + path_item
self.caller.view_diff_for_path(url, rev, parent, sidebyside=True)
def compare_revisions(self, widget, data=None):
earliest_rev = six.text_type(self.revisions[0]["revision"])
latest_rev = six.text_type(self.revisions[-1]["revision"])
path_item = self.paths[0]
url = self.caller.root_url + path_item
self.caller.view_diff_for_path(url,
latest_rev,
sidebyside=True,
earliest_revision_number=earliest_rev)
def show_changes_previous_revision(self, widget, data=None):
rev_first = six.text_type(self.revisions[0]["revision"])
parent = self.find_parent(self.revisions[0])
url = self.paths[0]
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
url = self.caller.root_url + self.paths[0]
rabbitvcs.util.helper.launch_ui_window("changes", [
"%s@%s" % (url, parent),
"%s@%s" % (url, rev_last),
"--vcs=%s" % self.caller.get_vcs_name()
])
def show_changes_revisions(self, widget, data=None):
rev_first = six.text_type(self.revisions[0]["revision"])
rev_last = six.text_type(self.revisions[-1]["revision"])
url = self.paths[0]
if self.vcs_name == rabbitvcs.vcs.VCS_SVN:
url = self.caller.root_url + self.paths[0]
rabbitvcs.util.helper.launch_ui_window("changes", [
"%s@%s" % (url, rev_first),
"%s@%s" % (url, rev_last),
| |
"""
Sentence Pair Classification with Bidirectional Encoder Representations from Transformers
=========================================================================================
This example shows how to implement finetune a model with pre-trained BERT parameters for
sentence pair classification, with Gluon NLP Toolkit.
@article{devlin2018bert,
title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
author={<NAME> and <NAME> and <NAME> and <NAME>},
journal={arXiv preprint arXiv:1810.04805},
year={2018}
}
"""
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name,logging-format-interpolation
import io
import os
import time
import argparse
import random
import logging
import warnings
import multiprocessing
import numpy as np
import mxnet as mx
from mxnet import gluon
import gluonnlp as nlp
from gluonnlp.model import get_model
from gluonnlp.data import BERTTokenizer
from model.classification import BERTClassifier, BERTRegression
from data.classification import MRPCTask, QQPTask, RTETask, STSBTask, SSTTask
from data.classification import QNLITask, CoLATask, MNLITask, WNLITask, XNLITask
from data.classification import LCQMCTask, ChnSentiCorpTask
from data.transform import BERTDatasetTransform
tasks = {
'MRPC': MRPCTask(),
'QQP': QQPTask(),
'QNLI': QNLITask(),
'RTE': RTETask(),
'STS-B': STSBTask(),
'CoLA': CoLATask(),
'MNLI': MNLITask(),
'WNLI': WNLITask(),
'SST': SSTTask(),
'XNLI': XNLITask(),
'LCQMC': LCQMCTask(),
'ChnSentiCorp': ChnSentiCorpTask()
}
parser = argparse.ArgumentParser(
description='BERT fine-tune examples for GLUE tasks.')
parser.add_argument(
'--epochs', type=int, default=3, help='number of epochs, default is 3')
parser.add_argument(
'--batch_size',
type=int,
default=32,
help='Batch size. Number of examples per gpu in a minibatch, default is 32')
parser.add_argument(
'--dev_batch_size',
type=int,
default=8,
help='Batch size for dev set and test set, default is 8')
parser.add_argument(
'--optimizer',
type=str,
default='bertadam',
help='Optimization algorithm, default is bertadam')
parser.add_argument(
'--lr',
type=float,
default=5e-5,
help='Initial learning rate, default is 5e-5')
parser.add_argument(
'--epsilon',
type=float,
default=1e-06,
help='Small value to avoid division by 0, default is 1e-06'
)
parser.add_argument(
'--warmup_ratio',
type=float,
default=0.1,
help='ratio of warmup steps used in NOAM\'s stepsize schedule, default is 0.1')
parser.add_argument(
'--log_interval',
type=int,
default=10,
help='report interval, default is 10')
parser.add_argument(
'--max_len',
type=int,
default=128,
help='Maximum length of the sentence pairs, default is 128')
parser.add_argument(
'--pad',
action='store_true',
help='Whether to pad to maximum length when preparing data batches. Default is False.')
parser.add_argument(
'--seed', type=int, default=2, help='Random seed, default is 2')
parser.add_argument(
'--accumulate',
type=int,
default=None,
help='The number of batches for gradients accumulation to simulate large batch size. '
'Default is None')
parser.add_argument(
'--gpu', type=int, default=None, help='Which gpu for finetuning. By default cpu is used.')
parser.add_argument(
'--task_name',
type=str,
default='MRPC',
choices=tasks.keys(),
help='The name of the task to fine-tune. Choices include MRPC, QQP, '
'QNLI, RTE, STS-B, CoLA, MNLI, WNLI, SST.')
parser.add_argument(
'--bert_model',
type=str,
default='bert_12_768_12',
help='The name of pre-trained BERT model to fine-tune'
'(bert_24_1024_16 and bert_12_768_12).')
parser.add_argument(
'--bert_dataset',
type=str,
default='book_corpus_wiki_en_uncased',
help='The dataset BERT pre-trained with.'
'Options include \'book_corpus_wiki_en_cased\', \'book_corpus_wiki_en_uncased\''
'for both bert_24_1024_16 and bert_12_768_12.'
'\'wiki_cn_cased\', \'wiki_multilingual_uncased\' and \'wiki_multilingual_cased\''
'for bert_12_768_12 only.')
parser.add_argument(
'--pretrained_bert_parameters',
type=str,
default=None,
help='Pre-trained bert model parameter file. default is None')
parser.add_argument(
'--model_parameters',
type=str,
default=None,
help='A parameter file for the model that is loaded into the model'
' before training/inference. It is different from the parameter'
' file written after the model is trained. default is None')
parser.add_argument(
'--output_dir',
type=str,
default='./output_dir',
help='The output directory where the model params will be written.'
' default is ./output_dir')
parser.add_argument(
'--only_inference',
action='store_true',
help='If set, we skip training and only perform inference on dev and test data.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.captureWarnings(True)
logging.info(args)
batch_size = args.batch_size
dev_batch_size = args.dev_batch_size
task_name = args.task_name
lr = args.lr
epsilon = args.epsilon
accumulate = args.accumulate
log_interval = args.log_interval * accumulate if accumulate else args.log_interval
if accumulate:
logging.info('Using gradient accumulation. Effective batch size = %d',
accumulate * batch_size)
# random seed
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
ctx = mx.cpu() if args.gpu is None else mx.gpu(args.gpu)
task = tasks[task_name]
# model and loss
only_inference = args.only_inference
model_name = args.bert_model
dataset = args.bert_dataset
pretrained_bert_parameters = args.pretrained_bert_parameters
model_parameters = args.model_parameters
if only_inference and not model_parameters:
warnings.warn('model_parameters is not set. '
'Randomly initialized model will be used for inference.')
get_pretrained = not (pretrained_bert_parameters is not None
or model_parameters is not None)
bert, vocabulary = get_model(
name=model_name,
dataset_name=dataset,
pretrained=get_pretrained,
ctx=ctx,
use_pooler=True,
use_decoder=False,
use_classifier=False)
if not task.class_labels:
# STS-B is a regression task.
# STSBTask().class_labels returns None
model = BERTRegression(bert, dropout=0.1)
if not model_parameters:
model.regression.initialize(init=mx.init.Normal(0.02), ctx=ctx)
loss_function = gluon.loss.L2Loss()
else:
model = BERTClassifier(
bert, dropout=0.1, num_classes=len(task.class_labels))
if not model_parameters:
model.classifier.initialize(init=mx.init.Normal(0.02), ctx=ctx)
loss_function = gluon.loss.SoftmaxCELoss()
# load checkpointing
output_dir = args.output_dir
if pretrained_bert_parameters:
logging.info('loading bert params from %s', pretrained_bert_parameters)
model.bert.load_parameters(pretrained_bert_parameters, ctx=ctx,
ignore_extra=True)
if model_parameters:
logging.info('loading model params from %s', model_parameters)
model.load_parameters(model_parameters, ctx=ctx)
nlp.utils.mkdir(output_dir)
logging.debug(model)
model.hybridize(static_alloc=True)
loss_function.hybridize(static_alloc=True)
# data processing
do_lower_case = 'uncased' in dataset
bert_tokenizer = BERTTokenizer(vocabulary, lower=do_lower_case)
def preprocess_data(tokenizer, task, batch_size, dev_batch_size, max_len, pad=False):
"""Train/eval Data preparation function."""
pool = multiprocessing.Pool()
# transformation for data train and dev
label_dtype = 'float32' if not task.class_labels else 'int32'
trans = BERTDatasetTransform(tokenizer, max_len,
class_labels=task.class_labels,
label_alias=task.label_alias,
pad=pad, pair=task.is_pair,
has_label=True)
# data train
# task.dataset_train returns (segment_name, dataset)
train_tsv = task.dataset_train()[1]
data_train = mx.gluon.data.SimpleDataset(pool.map(trans, train_tsv))
data_train_len = data_train.transform(
lambda input_id, length, segment_id, label_id: length, lazy=False)
# bucket sampler for training
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(label_dtype))
batch_sampler = nlp.data.sampler.FixedBucketSampler(
data_train_len,
batch_size=batch_size,
num_buckets=10,
ratio=0,
shuffle=True)
# data loader for training
loader_train = gluon.data.DataLoader(
dataset=data_train,
num_workers=1,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
# data dev. For MNLI, more than one dev set is available
dev_tsv = task.dataset_dev()
dev_tsv_list = dev_tsv if isinstance(dev_tsv, list) else [dev_tsv]
loader_dev_list = []
for segment, data in dev_tsv_list:
data_dev = mx.gluon.data.SimpleDataset(pool.map(trans, data))
loader_dev = mx.gluon.data.DataLoader(
data_dev,
batch_size=dev_batch_size,
num_workers=1,
shuffle=False,
batchify_fn=batchify_fn)
loader_dev_list.append((segment, loader_dev))
# batchify for data test
test_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0))
# transform for data test
test_trans = BERTDatasetTransform(tokenizer, max_len,
class_labels=None,
pad=pad, pair=task.is_pair,
has_label=False)
# data test. For MNLI, more than one test set is available
test_tsv = task.dataset_test()
test_tsv_list = test_tsv if isinstance(test_tsv, list) else [test_tsv]
loader_test_list = []
for segment, data in test_tsv_list:
data_test = mx.gluon.data.SimpleDataset(pool.map(test_trans, data))
loader_test = mx.gluon.data.DataLoader(
data_test,
batch_size=dev_batch_size,
num_workers=1,
shuffle=False,
batchify_fn=test_batchify_fn)
loader_test_list.append((segment, loader_test))
return loader_train, loader_dev_list, loader_test_list, len(data_train)
# Get the loader.
logging.info('processing dataset...')
train_data, dev_data_list, test_data_list, num_train_examples = preprocess_data(
bert_tokenizer, task, batch_size, dev_batch_size, args.max_len, args.pad)
def test(loader_test, segment):
"""Inference function on the test dataset."""
logging.info('Now we are doing testing on %s with %s.', segment, ctx)
tic = time.time()
results = []
for _, seqs in enumerate(loader_test):
input_ids, valid_length, type_ids = seqs
out = model(input_ids.as_in_context(ctx),
type_ids.as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
if not task.class_labels:
# regression task
for result in out.asnumpy().reshape(-1).tolist():
results.append('{:.3f}'.format(result))
else:
# classification task
indices = mx.nd.topk(out, k=1, ret_typ='indices', dtype='int32').asnumpy()
for index in indices:
results.append(task.class_labels[int(index)])
mx.nd.waitall()
toc = time.time()
logging.info('Time cost=%.2fs, throughput=%.2f samples/s', toc - tic,
dev_batch_size * len(loader_test) / (toc - tic))
# write result to a file.
filename = args.task_name + segment.replace('test', '') + '.csv'
test_path = os.path.join(args.output_dir, filename)
with io.open(test_path, 'w', encoding='utf-8') as f:
f.write(u'index\tprediction\n')
for i, pred in enumerate(results):
f.write(u'%d\t%s\n'%(i, str(pred)))
def log_train(batch_id, batch_num, metric, step_loss, log_interval, epoch_id, learning_rate):
"""Generate and print out the log message for training. """
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm, metric_val = [metric_nm], [metric_val]
train_str = '[Epoch %d Batch %d/%d] loss=%.4f, lr=%.7f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(train_str, epoch_id + 1, batch_id + 1, batch_num,
step_loss / log_interval, learning_rate, *metric_val)
def log_eval(batch_id, batch_num, metric, step_loss, log_interval):
"""Generate and print out the log message for inference. """
metric_nm, metric_val = metric.get()
if not isinstance(metric_nm, list):
metric_nm, metric_val = [metric_nm], [metric_val]
eval_str = '[Batch %d/%d] loss=%.4f, metrics:' + \
','.join([i + ':%.4f' for i in metric_nm])
logging.info(eval_str, batch_id + 1, batch_num,
step_loss / log_interval, *metric_val)
def train(metric):
"""Training function."""
if not only_inference:
logging.info('Now we are doing BERT classification training on %s!', ctx)
all_model_params = model.collect_params()
optimizer_params = {'learning_rate': lr, 'epsilon': epsilon, 'wd': 0.01}
try:
trainer = gluon.Trainer(all_model_params, args.optimizer,
optimizer_params, update_on_kvstore=False)
except ValueError as e:
print(e)
warnings.warn(
'AdamW optimizer is not found. Please consider upgrading to '
'mxnet>=1.5.0. Now the original Adam optimizer is used instead.')
trainer = gluon.Trainer(all_model_params, 'adam',
optimizer_params, update_on_kvstore=False)
step_size = batch_size * accumulate if accumulate else batch_size
num_train_steps = int(num_train_examples / step_size * args.epochs)
warmup_ratio = args.warmup_ratio
num_warmup_steps = int(num_train_steps * warmup_ratio)
step_num = 0
# Do not apply weight decay on LayerNorm and bias terms
for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = | |
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time
import os
import sys
import argparse
import logging
import logging.config
from deployment.confStorage.download import download_configuration
from deployment.confStorage.synchronization import synchronization
from deployment.confStorage.external_version_control.external_config import uploading_external_config
from deployment.paiLibrary.common import linux_shell
from deployment.paiLibrary.common import file_handler
from deployment.paiLibrary.clusterObjectModel import objectModelFactory
from deployment.paiLibrary.paiService import service_management_start
from deployment.paiLibrary.paiService import service_management_stop
from deployment.paiLibrary.paiService import service_management_delete
from deployment.paiLibrary.paiService import service_management_refresh
from deployment.paiLibrary.paiCluster import cluster_util
from deployment.k8sPaiLibrary.maintainlib import add as k8s_add
from deployment.k8sPaiLibrary.maintainlib import remove as k8s_remove
from deployment.k8sPaiLibrary.maintainlib import etcdfix as k8s_etcd_fix
from deployment.k8sPaiLibrary.maintainlib import kubectl_conf_check
from deployment.k8sPaiLibrary.maintainlib import kubectl_install
from deployment.k8sPaiLibrary.maintainlib import update as k8s_update
logger = logging.getLogger(__name__)
def setup_logging():
"""
Setup logging configuration.
"""
configuration_path = "deployment/sysconf/logging.yaml"
logging_configuration = file_handler.load_yaml_config(configuration_path)
logging.config.dictConfig(logging_configuration)
#########
## TODO: Please remove all function following, after cluster_object_model is finsied.
def load_cluster_objectModel_service(config_path):
objectModel = objectModelFactory.objectModelFactory(config_path)
ret = objectModel.objectModelPipeLine()
return ret["service"]
def load_cluster_objectModel_k8s(config_path):
objectModel = objectModelFactory.objectModelFactory(config_path)
ret = objectModel.objectModelPipeLine()
return ret["k8s"]
def cluster_object_model_generate_service(config_path):
cluster_config = load_cluster_objectModel_service(config_path)
return cluster_config
def cluster_object_model_generate_k8s(config_path):
cluster_config = load_cluster_objectModel_k8s(config_path)
return cluster_config
## TODO: Please remove all function above, after cluster_object_model is finsied.
#########
# True : continue
# False: exit
def kubectl_env_checking(cluster_object_mode):
kubectl_conf_ck_worker = kubectl_conf_check.kubectl_conf_check(cluster_object_mode)
if kubectl_conf_ck_worker.check() == False:
count_input = 0
while True:
user_input = raw_input("Do you want to re-install kubectl by paictl? (Y/N) ")
if user_input == "N":
count_quit = 0
while True:
quit_or_not = raw_input("Do you want to quit by this operation? (Y/N) ")
if quit_or_not == "Y":
return False
elif quit_or_not == "N":
return True
else:
print(" Please type Y or N.")
count_quit = count_quit + 1
if count_quit == 3:
logger.warning("3 Times......... Sorry, we will force stopping your operation.")
return False
elif user_input == "Y":
kubectl_install_worker = kubectl_install.kubectl_install(cluster_object_mode)
kubectl_install_worker.run()
return True
else:
print(" Please type Y or N.")
count_input = count_input + 1
if count_input == 3:
logger.warning("3 Times......... Sorry, we will force stopping your operation.")
return False
return True
class SubCmd(object):
""" interface class for defining sub-command for paictl """
def register(self, parser):
""" subclass use this method to register arguments """
pass
@staticmethod
def add_handler(parser, handler, *args, **kwargs):
""" helper function for adding sub-command handler """
sub_parser = parser.add_parser(*args, **kwargs)
sub_parser.set_defaults(handler=handler) # let handler handle this subcmd
return sub_parser
def run(self, args):
""" will call run with expected args, subclass do not have to override this method
if subclass use `add_handler` to register handler. """
args.handler(args)
class Machine(SubCmd):
def register(self, parser):
machine_parser = parser.add_subparsers(help="machine operations")
def add_arguments(parser):
parser.add_argument("-p", "--config-path", dest="config_path", required=True,
help="The path of your configuration directory.")
parser.add_argument("-l", "--node-list", dest="node_list", required=True,
help="The node-list to be operator")
add_parser = SubCmd.add_handler(machine_parser, self.machine_add, "add")
remove_parser = SubCmd.add_handler(machine_parser, self.machine_remove, "remove")
etcd_parser = SubCmd.add_handler(machine_parser, self.etcd_fix, "etcd-fix")
update_parser = SubCmd.add_handler(machine_parser, self.machine_update, "update")
add_arguments(add_parser)
add_arguments(remove_parser)
add_arguments(etcd_parser)
update_parser.add_argument("-p", "--config-path", dest="config_path", default=None,
help="the path of directory which stores the cluster configuration.")
update_parser.add_argument("-c", "--kube-config-path", dest="kube_config_path", default="~/.kube/config",
help="The path to KUBE_CONFIG file. Default value: ~/.kube/config")
def process_args(self, args):
cluster_object_model_k8s = cluster_object_model_generate_k8s(args.config_path)
node_list = file_handler.load_yaml_config(args.node_list)
if not kubectl_env_checking(cluster_object_model_k8s):
raise RuntimeError("failed to do kubectl checking")
for host in node_list["machine-list"]:
if "nodename" not in host:
host["nodename"] = host["hostip"]
return cluster_object_model_k8s, node_list
def machine_add(self, args):
cluster_object_model_k8s, node_list = self.process_args(args)
for host in node_list["machine-list"]:
add_worker = k8s_add.add(cluster_object_model_k8s, host, True)
add_worker.run()
if host["k8s-role"] == "master":
logger.info("Master Node is added, sleep 60s to wait it ready.")
time.sleep(60)
def machine_remove(self, args):
cluster_object_model_k8s, node_list = self.process_args(args)
for host in node_list["machine-list"]:
add_worker = k8s_remove.remove(cluster_object_model_k8s, host, True)
add_worker.run()
if host["k8s-role"] == "master":
logger.info("master node is removed, sleep 60s for etcd cluster's updating")
time.sleep(60)
def machine_update(self, args):
if args.kube_config_path != None:
args.kube_config_path = os.path.expanduser(args.kube_config_path)
update_worker = k8s_update.update(kube_config_path = args.kube_config_path)
update_worker.run()
logger.info("Congratulations! Machine update is finished.")
def etcd_fix(self, args):
cluster_object_model_k8s, node_list = self.process_args(args)
if len(node_list["machine-list"]) > 1:
logger.error("etcd-fix can't fix more than one machine everytime. Please fix them one by one!")
sys.exit(1)
for host in node_list["machine-list"]:
etcd_fix_worker = k8s_etcd_fix.etcdfix(cluster_object_model_k8s, host, True)
etcd_fix_worker.run()
logger.info("Etcd has been fixed.")
class Service(SubCmd):
def register(self, parser):
service_parser = parser.add_subparsers(help="service operations")
def add_arguments(parser):
parser.add_argument("-p", "--config-path", dest="config_path", required=True,
help="The path of your configuration directory.")
parser.add_argument("-n", "--service-name", dest="service_name", default="all",
help="Build and push the target image to the registry")
start_parser = SubCmd.add_handler(service_parser, self.service_start, "start")
stop_parser = SubCmd.add_handler(service_parser, self.service_stop, "stop")
delete_parser = SubCmd.add_handler(service_parser, self.service_delete, "delete")
refresh_parser = SubCmd.add_handler(service_parser, self.service_refresh, "refresh")
# TODO: Two feature.
# Rolling Update Service : paictl.py service update -p /path/to/configuration/ [ -n service-x ]
# Rolling back Service : paictl.py service update -p /path/to/configuration/ [ -n service-x ]
add_arguments(start_parser)
add_arguments(stop_parser)
add_arguments(delete_parser)
add_arguments(refresh_parser)
def process_args(self, args):
cluster_object_model = cluster_object_model_generate_service(args.config_path)
cluster_object_model_k8s = cluster_object_model_generate_k8s(args.config_path)
service_list = None
if args.service_name != "all":
service_list = [args.service_name]
# Tricky, re-install kubectl first.
# TODO: install kubectl-install here.
if not kubectl_env_checking(cluster_object_model_k8s):
raise RuntimeError("failed to do kubectl checking")
return cluster_object_model, service_list
def service_start(self, args):
cluster_object_model, service_list = self.process_args(args)
service_management_starter = service_management_start.serivce_management_start(cluster_object_model, service_list)
service_management_starter.run()
def service_stop(self, args):
cluster_object_model, service_list = self.process_args(args)
service_management_stopper = service_management_stop.service_management_stop(cluster_object_model, service_list)
service_management_stopper.run()
def service_delete(self, args):
cluster_object_model, service_list = self.process_args(args)
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---------- Dangerous Operation!!! ---------------")
logger.warning("------ The target service will be stopped -------")
logger.warning("------ And the persistent data on the disk -------")
logger.warning("------- will be deleted --------")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("-------- It's an irreversible operation -------")
logger.warning("-------- After this operation, -------")
logger.warning("------ the deleted service data is unrecoverable -------")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---- Please ensure you wanna do this operator, ------")
logger.warning("------- after knowing all risk above. -------")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
count_input = 0
while True:
user_input = raw_input("Do you want to continue this operation? (Y/N) ")
if user_input == "N":
return
elif user_input == "Y":
break
else:
print(" Please type Y or N.")
count_input = count_input + 1
if count_input == 3:
logger.warning("3 Times......... Sorry, we will force stopping your operation.")
return
service_management_deleter = service_management_delete.service_management_delete(cluster_object_model, service_list)
service_management_deleter.run()
def service_refresh(self, args):
cluster_object_model, service_list = self.process_args(args)
service_management_refresher = service_management_refresh.service_management_refresh(cluster_object_model, service_list)
service_management_refresher.run()
class Cluster(SubCmd):
def register(self, parser):
cluster_parser = parser.add_subparsers(help="cluster operations")
bootup_parser = SubCmd.add_handler(cluster_parser, self.k8s_bootup, "k8s-bootup")
clean_parser = SubCmd.add_handler(cluster_parser, self.k8s_clean, "k8s-clean")
env_parser = SubCmd.add_handler(cluster_parser, self.k8s_set_environment, "k8s-set-env")
bootup_parser.add_argument("-p", "--config-path", dest="config_path", required=True,
help="path of cluster configuration file")
clean_parser.add_argument("-p", "--config-path", dest="config_path", required=True, help="path of cluster configuration file")
clean_parser.add_argument("-f", "--force", dest="force", required=False, action="store_true", help="clean all the data forcefully")
env_parser.add_argument("-p", "--config-path", dest="config_path", help="path of cluster configuration file")
def k8s_bootup(self, args):
cluster_config = cluster_object_model_generate_k8s(args.config_path)
logger.info("Begin to initialize PAI k8s cluster.")
cluster_util.maintain_cluster_k8s(cluster_config, option_name="deploy", clean=True)
logger.info("Finish initializing PAI k8s cluster.")
def k8s_clean(self, args):
# just use 'k8s-clean' for testing temporarily .
cluster_config = cluster_object_model_generate_k8s(args.config_path)
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---------- Dangerous Operation!!! ---------------")
logger.warning("------ Your k8s Cluster will be destroyed -------")
logger.warning("------ PAI service on k8s will be stopped -------")
logger.warning("--------------------------------------------------------")
if args.force:
logger.warning("--------------------------------------------------------")
logger.warning("---------- ETCD data will be cleaned. ------------")
logger.warning("----- If you wanna keep pai's user data. ---------")
logger.warning("----- Please backup etcd data. ---------")
logger.warning("----- And restore it after k8s-bootup ---------")
logger.warning("--- And restore it before deploy pai service ----")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---- Please ensure you wanna do this operator, ------")
logger.warning("------- after knowing all risk above. -------")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
count_input = 0
while True:
user_input = raw_input("Do you want to continue this operation? (Y/N) ")
if user_input == "N":
return
elif user_input == "Y":
break
else:
print(" Please type Y or N.")
count_input = count_input + 1
if count_input == 3:
logger.warning("3 Times......... Sorry, we will force stopping your operation.")
return
logger.info("Begin to clean up | |
<reponame>NextThought/nti.schema
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
For producing a JSON schema appropriate for use by clients, based on a
Zope schema.
The ``TAG`` constants are intended to be set as (boolean) tagged values
on fields of interfaces, helping determine how the schema is built.
.. note:: This schema is ad-hoc and non-standard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import copy
try:
from collections.abc import Sequence
except ImportError: # pragma: no cover
# Python 2
from collections import Sequence
from numbers import Number
from six import integer_types
from six import string_types
from six import text_type
from zope.i18n import translate
from zope.interface.interfaces import IMethod
from zope.interface.interfaces import IInterface
from zope.schema import interfaces as sch_interfaces
from zope.schema import vocabulary as sch_vocabulary
from nti.schema.interfaces import IVariant
from nti.schema.interfaces import find_most_derived_interface
__docformat__ = "restructuredtext en"
#: Don't display this by default in the UI
TAG_HIDDEN_IN_UI = "nti.dataserver.users.field_hidden_in_ui"
#: Qualifying details about how the field should be treated, such as data source
#: The ``UI_TYPE`` values are defined known values for this tag.
TAG_UI_TYPE = 'nti.dataserver.users.field_type'
#: Overrides the value from the field itself
TAG_REQUIRED_IN_UI = 'nti.dataserver.users.field_required'
#: Overrides the value from the field itself, if true
TAG_READONLY_IN_UI = 'nti.dataserver.users.field_readonly'
#: A tagged value that echos semi-arbitrary data back in the schema
#: under the key ``application_info``.
#:
#: The value for this field *must* be a mapping object whose keys
#: are strings and whose values are JSON serializable. Text values
#: (specifically `zope.i18nmessageid.Message` objects) will be translated
#: when put into the schema. The object in the schema *will not* be the object
#: in the tagged value, and constituent composite values will also be copied.
#: (In case any post-processing of the schema is done based on current user,
#: request or site, this isolates such modifications.)
#:
#: .. versionadded:: 1.12.0
TAG_APPLICATION_INFO = 'nti.schema.jsonschema.field_application_info'
# Known types
#: The email type
UI_TYPE_EMAIL = 'nti.dataserver.users.interfaces.EmailAddress'
#: An email type that is stored as a non-recoverable hash.
#: The value is chosen so that a begins-with test will match
#: either this or :const:`UI_TYPE_EMAIL`, making validation easier
UI_TYPE_HASHED_EMAIL = UI_TYPE_EMAIL + ":Hashed"
#: Something that can be set once, typically during account creation
UI_TYPE_ONE_TIME_CHOICE = 'nti.dataserver.users.interfaces.OneTimeChoice'
def get_ui_type_from_interface(iface):
ui_type = iface.getName()
ui_type = ui_type[1:] if ui_type.startswith('I') else ui_type
return ui_type
_interface_to_ui_type = interface_to_ui_type = get_ui_type_from_interface # BWC
def get_ui_type_from_field_interface(field):
derived_field_iface = find_most_derived_interface(field, sch_interfaces.IField)
if derived_field_iface is not sch_interfaces.IField:
ui_type = get_ui_type_from_interface(derived_field_iface)
return ui_type
return None
_ui_type_from_field_iface = ui_type_from_field_iface = get_ui_type_from_field_interface # BWC
def get_ui_types_from_field(field):
ui_type = ui_base_type = None
_type = getattr(field, '_type', None)
if isinstance(_type, type):
ui_type = _type.__name__
elif isinstance(_type, tuple):
# Most commonly lists subclasses. Most commonly lists subclasses of strings
if all((issubclass(x, string_types) for x in _type)):
ui_type = 'basestring'
elif all((issubclass(x, integer_types) for x in _type)):
ui_type = 'int'
elif all((issubclass(x, float) for x in _type)):
ui_type = 'float'
elif all((issubclass(x, Sequence) for x in _type)):
ui_type = 'list'
else:
ui_type = get_ui_type_from_field_interface(field)
if ui_type in ('unicode', 'str', 'basestring'):
# These are all 'string' type
# Can we be more specific?
ui_type = get_ui_type_from_field_interface(field)
if ui_type and ui_type not in ('TextLine', 'Text'): # pragma: no cover
# Yes we can
ui_base_type = 'string'
else:
ui_type = 'string'
ui_base_type = 'string'
elif ui_type in ('Sequence', 'MutableSequence'):
ui_type = 'list'
elif ui_type in ('Mapping', 'MutableMapping'):
ui_type = 'dict'
elif ui_type in ('Number', 'float', 'Decimal', 'Complex', 'Real', 'Rational'):
ui_base_type = 'float'
elif ui_type in ('int', 'long', 'Integral'):
ui_base_type = 'int'
elif ui_type in ('bool',):
ui_base_type = 'bool'
return ui_type, ui_base_type
_ui_type_from_field = ui_type_from_field = get_ui_types_from_field # BWC
def get_data_from_choice_field(v, base_type=None):
# Vocabulary could be a name or the vocabulary itself
choices = ()
vocabulary = None
if sch_interfaces.IVocabulary.providedBy(v.vocabulary): # pragma: no cover
vocabulary = v.vocabulary
elif isinstance(v.vocabularyName, string_types):
name = v.vocabularyName
vocabulary = sch_vocabulary.getVocabularyRegistry().get(None, name)
if vocabulary is not None:
choices = []
tokens = []
for term in vocabulary:
# For BWC, we do different things depending on whether
# there is a title or not
if getattr(term, 'title', None):
try:
# like nti.externalization, but without the dependency
choice = term.toExternalObject()
except AttributeError: # pragma: no cover
choice = {
'token': term.token,
'value': term.value,
'title': term.title
}
choices.append(choice)
else: # pragma: no cover
choices.append(term.token) # bare; ideally this would go away
tokens.append(term.token)
# common case, these will all be the same type
if not base_type \
and all((isinstance(x, string_types) for x in tokens)):
base_type = 'string'
return choices, base_type
_process_choice_field = process_choice_field = get_data_from_choice_field
class JsonSchemafier(object):
def __init__(self, schema, readonly_override=None, context=None):
"""
Create a new schemafier.
:param schema: The zope schema (interface) to use.
:param bool readonly_override: If given, a boolean value that will replace all
readonly values in the schema.
:param context: The context passed to :func:`zope.i18n.translate`
"""
self.schema = schema
self.readonly_override = readonly_override
self.context = context
def _iter_names_and_descriptions(self):
"""
Return an iterable across the names and descriptions of the schema.
Subclass hook to change what is considered.
"""
return self.schema.namesAndDescriptions(all=True)
def allow_field(self, name, field):
"""
Return if the field is allowed in the external schema.
By default, this checks to see if the field has a true value for the
tag `TAG_HIDDEN_IN_UI`, or if the field's name starts with an underscore.
.. versionchanged:: 1.12.0
This will now be called for nested fields, such as the ``value_type`` or
``key_type`` of collections and mappings. In those cases, commonly the name will
be the empty string.
"""
if field.queryTaggedValue(TAG_HIDDEN_IN_UI) or name.startswith('_'):
return False
return True
def get_ui_types_from_field(self, field):
"""
Return the type and base type for the specified field
"""
return get_ui_types_from_field(field)
ui_types_from_field = get_ui_types_from_field # BWC
def get_data_from_choice_field(self, field, base_type=None):
"""
Return the choices and base type for the specified field
"""
return get_data_from_choice_field(field, base_type)
process_choice_field = get_data_from_choice_field # BWC
def post_process_field(self, name, field, item_schema):
pass
def bind(self, schema):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.schema = schema
return clone
def make_schema(self):
"""
Create the JSON schema.
Individual fields of the schema will be checked and returned. See the various
``TAG`` constants for ways that the schema externalization can be influenced.
:return: A dictionary consisting of dictionaries, one for each field. All the keys
are strings and the values are strings, bools, numbers, or lists of primitives.
Will be suitable for writing to JSON.
"""
ext_schema = {}
for k, v in self._iter_names_and_descriptions():
__traceback_info__ = k, v
if IMethod.providedBy(v):
continue
# v could be a schema field or an interface.Attribute
if not self.allow_field(k, v):
# If we disallow at the top level, we don't even
# hint to its existence.
continue
item_schema = self._make_field_schema(v, k)
self.post_process_field(k, v, item_schema)
ext_schema[k] = item_schema
return ext_schema
def _translate(self, text):
return translate(text, context=self.context)
def _make_field_schema(self, field, name=None):
name = name or field.__name__ or ''
if not self.allow_field(name, field):
# Disallowed, but we're already expecting a return value to put
# in a dictionary.
return None
required = getattr(field, 'required', None)
required = field.queryTaggedValue(TAG_REQUIRED_IN_UI) or required
readonly_override = self.readonly_override
if readonly_override is not None:
readonly = readonly_override
else:
readonly = getattr(field, 'readonly', False)
readonly = field.queryTaggedValue(TAG_READONLY_IN_UI) or readonly
ui_base_type = None
item_schema = {
'name': name,
'required': required,
'readonly': readonly,
}
ui_type = field.queryTaggedValue(TAG_UI_TYPE)
if not ui_type:
ui_type, ui_base_type = self.get_ui_types_from_field(field)
else:
_, ui_base_type = self.get_ui_types_from_field(field)
item_schema['type'] = ui_type
item_schema['base_type'] = ui_base_type
# Now add supplemental information about nested data.
# Note that we're consistent to always provide *something*
# for these optional keys, if they are expected to be there based on the
# type of the field, it just may be None if it was excluded (or empty,
# if all variants were excluded).
if sch_interfaces.IChoice.providedBy(field):
choices, base_type = self.get_data_from_choice_field(field, ui_base_type)
item_schema['choices'] = choices
item_schema['base_type'] = base_type
if IVariant.providedBy(field):
# 'fields' is not actually declared in the IVariant
# interface; that's ok, we couldn't handle it automatically anyway.
item_schema['value_type_options'] = [
self._make_field_schema(the_field)
for the_field
in field.fields
if self.allow_field(the_field.__name__, the_field)
]
application_info = field.queryTaggedValue(TAG_APPLICATION_INFO) or {}
item_schema['application_info'] = {
k: self._translate(v) if isinstance(v, text_type) else copy(v)
for k, v in application_info.items()
}
self._fill_field_values(item_schema, field)
return item_schema
def _fill_field_values(self, item_schema, field,
# Certain things from IField that we exclude
# because they are not useful externally or may
# expose sensitive information. `validate_invariants`
| |
reqd_string_in_label, nsamples_per_condition, label_value_counts, srs_subset, all_indexes_to_use, all_samples_to_use, labels_to_use, counts_to_use, conditions_list], os.path.join(data_directory, 'datasets', dataset_name), dataset_name+'.pkl')
# Write all the data for input into DESeq2, instead of just a sample
def write_all_data_for_deseq2_input(srs_labels, df_counts, data_directory, dataset_name, drop_zero_genes=False, log_fcn=print):
# Sample call: write_all_data_for_deseq2_input(df_samples['label 1'], df_counts, data_directory, 'all_data')
# Import relevant libraries
import os
import numpy as np
tci = get_tci_library()
# Construct a list of indexes (actual numbers) to use as a sample of all our data (this time we're using them all)
all_indexes_to_use = [ x for x in range(len(srs_labels)) ]
log_fcn('\nHere is the final set of numerical indexes that we\'re using ({} of them):'.format(len(all_indexes_to_use)))
log_fcn(all_indexes_to_use)
# Get just a sample of the labels/conditions and counts
all_samples_to_use = srs_labels.index[all_indexes_to_use] # get the actual descriptive indexes from the numerical indexes
labels_to_use = srs_labels[all_samples_to_use]
counts_to_use = df_counts.loc[all_samples_to_use,:].transpose()
# Delete rows of counts that are all zeros
if drop_zero_genes:
counts_to_use = counts_to_use[(counts_to_use!=0).any(axis=1)]
# Check that the indexes of the counts and labels that we're going to write out are the same
if not counts_to_use.columns.equals(labels_to_use.index):
log_fcn('ERROR: Indexes/columns of the labels/counts are inconsistent')
exit()
# Create the dataset directory if it doesn't already exist
#os.makedirs(os.path.join(data_directory, 'datasets', dataset_name))
os.makedirs(os.path.join(data_directory, 'datasets', dataset_name), exist_ok=True)
# Write the annotation file in the same format as the pasilla example
with open(file=os.path.join(data_directory, 'datasets', dataset_name, 'annotation.csv'), mode='w') as f:
log_fcn('"file","condition"', file=f)
for curr_file, condition in zip(labels_to_use.index, labels_to_use):
log_fcn('"{}","{}"'.format(curr_file, condition), file=f)
# Write the gene counts in the same format as the pasilla example
with open(file=os.path.join(data_directory, 'datasets', dataset_name, 'gene_counts.tsv'), mode='w') as f:
counts_to_use.to_csv(f, sep='\t', index_label='gene_id')
# Save the dataset data
tci.make_pickle([srs_labels, df_counts, data_directory, dataset_name, all_indexes_to_use, all_samples_to_use, labels_to_use, counts_to_use], os.path.join(data_directory, 'datasets', dataset_name), dataset_name+'.pkl')
# Create and plot PCA and tSNE analyses
def plot_pca_and_tsne(data_directory, dataset_name, transformation_name='variance-stabilizing', ntop=500, n_components_pca=10, alpha=1, dpi=300, y=None, save_figure=False, log_fcn=print, plt_ctx=None):
# Sample call: plot_pca_and_tsne('/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/assay_normal_transformation.csv', '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/coldata_normal_transformation.csv', 'normal', '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data')
# Import relevant libraries
import os
import matplotlib.lines as mpl_lines
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sklearn.decomposition as sk_decomp
import sklearn.manifold as sk_manif
import contextlib
if not plt_ctx:
plt_ctx = contextlib.nullcontext()
# Process the arguments
transformation_name_filename = transformation_name.lower().replace(' ','_').replace('-','_') # get a version of the transformation_name suitable for filenames
data_dir = os.path.join(data_directory, 'datasets', dataset_name) # '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data'
assay_csv_file = os.path.join(data_dir, 'assay_' + transformation_name_filename + '_transformation.csv') # '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/assay_variance_stabilizing_transformation.csv'
coldata_csv_file = os.path.join(data_dir, 'coldata_' + transformation_name_filename + '_transformation.csv') # '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/coldata_variance_stabilizing_transformation.csv'
# Determine the data matrix
df_assay = pd.read_csv(assay_csv_file).set_index('Unnamed: 0') # read in the transformed data
top_genes = df_assay.var(axis=1).sort_values(axis=0, ascending=False)[:ntop].index # get the indexes of the top-ntop-variance genes
X = df_assay.loc[top_genes,:].T # keep only the top genes and transpose in order to get the typical data matrix format with the samples in the rows
# Determine the labels vector
if y is None:
df_coldata = pd.read_csv(coldata_csv_file).set_index('Unnamed: 0') # read in the column data, which includes the labels (in the 'condition' column)
y = df_coldata.loc[X.index,'condition'] # ensure the labels are ordered in the same way as the data and take just the 'condition' column as the label
fn_addendum = ''
else: # allow for a custom set of labels
y = y.loc[X.index]
fn_addendum = '_custom_label'
# Order the samples by their labels
sample_order = y.sort_values().index
y = y.loc[sample_order]
X = X.loc[sample_order,:]
if not y.index.equals(X.index):
log_fcn('ERROR: Weirdly inconsistent ordering')
exit()
# Perform PCA
pca = sk_decomp.PCA(n_components=n_components_pca)
pca_res = pca.fit_transform(X)
log_fcn('Top {} PCA explained variance ratios: {}'.format(n_components_pca, pca.explained_variance_ratio_))
# Get a reasonable set of markers and color palette
markers = mpl_lines.Line2D.filled_markers
nclasses = len(set(y))
marker_list = markers * int(nclasses/len(markers)+1)
color_palette = sns.color_palette("hls", nclasses)
# Plot and save the PCA
with plt_ctx:
fig = plt.figure(figsize=(12,7.5), facecolor='w')
ax = sns.scatterplot(x=pca_res[:,0], y=pca_res[:,1], hue=y, style=y, palette=color_palette, legend="full", alpha=alpha, markers=marker_list, edgecolor='k')
ax.legend(bbox_to_anchor=(1,1))
ax.set_title('PCA - ' + transformation_name + ' transformation')
if save_figure:
fig.savefig(os.path.join(data_dir, 'pca_' + transformation_name_filename + '_transformation' + fn_addendum + '.png'), dpi=dpi, bbox_inches='tight')
plt.show(fig)
# Perform tSNE analysis
tsne = sk_manif.TSNE(n_components=2)
tsne_res = tsne.fit_transform(X)
# Plot and save the tSNE analysis
with plt_ctx:
fig = plt.figure(figsize=(12,7.5), facecolor='w')
ax = sns.scatterplot(x=tsne_res[:,0], y=tsne_res[:,1], hue=y, style=y, palette=color_palette, legend="full", alpha=alpha, markers=marker_list, edgecolor='k')
ax.legend(bbox_to_anchor=(1,1))
ax.set_title('tSNE - ' + transformation_name + ' transformation')
if save_figure:
fig.savefig(os.path.join(data_dir, 'tsne_' + transformation_name_filename + '_transformation' + fn_addendum + '.png'), dpi=dpi, bbox_inches='tight')
plt.show(fig)
# Run VST using DESeq2 on data exported from Python
def run_deseq2(dataset_name, project_directory):
# run_deseq2('all_data_label_2', project_directory)
import os
import subprocess
# cmd_list = ['Rscript', '--vanilla', os.path.join(project_directory, 'checkout', 'run_vst.R'), dataset_name, project_directory]
# log_fcn('Now running command: ' + ' '.join(cmd_list))
# list_files = subprocess.run(cmd_list)
# log_fcn('The Rscript exit code was {}'.format(list_files.returncode))
# This function will take the raw counts and their labels and return the data matrix X (dataframe) and labels vector y (series) with the samples in label order and the genes in top-variance order by running the VST using DESeq2, saving all intermediate files
def run_vst(counts_dataframe, labels_series, project_directory, log_fcn=print):
# Sample call: X, y = run_vst(df_counts, df_samples['label 1'], project_directory)
# Import relevant libraries
import os
import pandas as pd
tci = get_tci_library()
# Constant (basically)
transformation_name = 'variance-stabilizing'
# Variables
dataset_name = labels_series.name.lower().replace(' ','_').replace('-','_')
data_dir = os.path.join(project_directory, 'data')
transformation_name_filename = transformation_name.lower().replace(' ','_').replace('-','_') # get a version of the transformation_name suitable for filenames
data_dir2 = os.path.join(data_dir, 'datasets', dataset_name)
assay_csv_file = os.path.join(data_dir2, 'assay_' + transformation_name_filename + '_transformation.csv') # '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/assay_variance_stabilizing_transformation.csv'
coldata_csv_file = os.path.join(data_dir2, 'coldata_' + transformation_name_filename + '_transformation.csv') # '/data/BIDS-HPC/private/projects/dmi2/data/datasets/all_data/coldata_variance_stabilizing_transformation.csv'
# If the datafile does not already exist...
if not os.path.exists(os.path.join(data_dir2, 'vst_transformed_data.pkl')):
# Write all the data for input into DESeq2
write_all_data_for_deseq2_input(labels_series, counts_dataframe, data_dir, dataset_name)
# Run VST using DESeq2 on the dataset exported from Python above
# Note this will write (at least) the files assay_csv_file and coldata_csv_file defined above
run_deseq2(dataset_name, project_directory)
# Determine the data matrix
df_assay = pd.read_csv(assay_csv_file).set_index('Unnamed: 0') # read in the transformed data
top_variance_order = df_assay.var(axis=1).sort_values(axis=0, ascending=False).index # get the indexes of the genes in top-variance order
X = df_assay.loc[top_variance_order,:].T # order the genes by top variance and transpose in order to get the typical data matrix format with the samples in the rows
# Determine the labels vector
df_coldata = pd.read_csv(coldata_csv_file).set_index('Unnamed: 0') # read in the column data, which includes the labels (in the 'condition' column)
y = df_coldata.loc[X.index,'condition'] # ensure the labels are ordered in the same way as the data and take just the 'condition' column as the label
# Order the samples by their labels
sample_order = y.sort_values().index
y = y.loc[sample_order]
X = X.loc[sample_order,:]
# This should be a trivial check
if not y.index.equals(X.index):
log_fcn('ERROR: Weirdly inconsistent ordering')
exit()
# Save the data to disk
tci.make_pickle([X, y], data_dir2, 'vst_transformed_data.pkl')
# Otherwise, read it in
else:
[X, y] = tci.load_pickle(data_dir2, 'vst_transformed_data.pkl')
# Return the data matrix (dataframe) and labels vector (series)
return(X, y)
# Plot a PCA or tSNE analysis
def plot_unsupervised_analysis(results, y, figsize=(12,7.5), alpha=1, gray_indexes=None, ax=None, legend='full', plt_ctx=None):
# Sample calls:
#
# # Perform PCA
# import sklearn.decomposition as sk_decomp
# pca = sk_decomp.PCA(n_components=10)
# pca_res = pca.fit_transform(X.iloc[:,:500])
# log_fcn('Top {} PCA explained variance ratios: {}'.format(10, pca.explained_variance_ratio_))
# ax = tc.plot_unsupervised_analysis(pca_res, y)
# ax.set_title('PCA - variance-stabilizing transformation')
#
# # Perform tSNE analysis
# import sklearn.manifold as sk_manif
# tsne = sk_manif.TSNE(n_components=2)
# tsne_res = tsne.fit_transform(X.iloc[:,:500])
# ax = tc.plot_unsupervised_analysis(tsne_res, y)
# ax.set_title('tSNE - variance-stabilizing transformation')
#
# Import relevant libraries
import matplotlib.lines as mpl_lines
import matplotlib.pyplot as plt
import seaborn as sns
import contextlib
if not plt_ctx:
plt_ctx = contextlib.nullcontext()
# Get a reasonable set of markers and color palette
markers = mpl_lines.Line2D.filled_markers
nclasses = len(set(y))
marker_list = markers * int(nclasses/len(markers)+1)
color_palette = sns.color_palette("hls", nclasses)
# Plot results
with plt_ctx:
if ax is None:
fig, ax = plt.subplots(figsize=figsize, facecolor='w')
#ax = sns.scatterplot(x=results[:,0], y=results[:,1], hue=y, style=y, palette=color_palette, legend="full", alpha=alpha, markers=marker_list, edgecolor='k')
#ax = sns.scatterplot(x=results[:,0], y=results[:,1], hue=y, style=y, palette=color_palette, legend=legend, alpha=(0.2 if gray_indexes is not None else alpha), markers=marker_list, edgecolor='k', ax=ax)
ax = sns.scatterplot(x=results[:,0], y=results[:,1], hue=y, style=y, palette=color_palette, legend=legend, alpha=(0.2 if gray_indexes is not None else alpha), edgecolor='k', ax=ax)
if gray_indexes is not None:
import collections
gray_indexes=list(collections.OrderedDict.fromkeys(gray_indexes.to_list()))
#ax = sns.scatterplot(x=results[gray_indexes,0], y=results[gray_indexes,1], hue='gray', style=y.iloc[gray_indexes], palette=color_palette, markers=marker_list, edgecolor='k', ax=ax)
# ax = sns.scatterplot(x=results[gray_indexes,0], y=results[gray_indexes,1], color='gray', style=y.iloc[gray_indexes], palette=color_palette, markers=marker_list, edgecolor='k', ax=ax, alpha=1, | |
returned, i.e., the points where there
isn't complete overlap between kernel and spike train
are discarded
NOTE: if True and an asymmetrical kernel is provided
the output will not be aligned with [t_start, t_stop]
Returns
-------
rate : neo.AnalogSignal
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given as an argument to the function.
Raises
------
TypeError:
If argument value for the parameter `sigma` is not a quantity object
or string 'auto'.
See also
--------
elephant.statistics.make_kernel
References
----------
..[1] <NAME>, <NAME>, J Comput Neurosci (2010) 29:171–182.
"""
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
if sigma == 'auto':
form = 'GAU'
unit = spiketrain.units
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
sigma = kw2sigma(form) * kernel_width * unit
elif not isinstance(sigma, pq.Quantity):
raise TypeError('sigma must be either a quantities object or "auto".'
' Found: %s, value %s' % (type(sigma), str(sigma)))
kernel, norm, m_idx = make_kernel(form=form, sigma=sigma,
sampling_period=sampling_period)
units = pq.CompoundUnit(
"%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
if np.any(r < 0):
warnings.warn('Instantaneous firing rate approximation contains '
'negative values, possibly caused due to machine '
'precision errors')
if acausal:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
else:
if not trim:
r = r[m_idx:-(kernel.size - m_idx)]
elif trim:
r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
t_start = t_start + m_idx * spiketrain.units
t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units
rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start)
return rate, sigma
def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
cutoff=5.0, t_start=None, t_stop=None, trim=False):
"""
Estimates instantaneous firing rate by kernel convolution.
Parameters
-----------
spiketrain : neo.SpikeTrain or list of neo.SpikeTrain objects
Neo object that contains spike times, the unit of the time stamps
and t_start and t_stop of the spike train.
sampling_period : Time Quantity
Time stamp resolution of the spike times. The same resolution will
be assumed for the kernel
kernel : string 'auto' or callable object of :class:`Kernel` from module
'kernels.py'. Currently implemented kernel forms are rectangular,
triangular, epanechnikovlike, gaussian, laplacian, exponential,
and alpha function.
Example: kernel = kernels.RectangularKernel(sigma=10*ms, invert=False)
The kernel is used for convolution with the spike train and its
standard deviation determines the time resolution of the instantaneous
rate estimation.
Default: 'auto'. In this case, the optimized kernel width for the
rate estimation is calculated according to [1] and with this width
a gaussian kernel is constructed. Automatized calculation of the
kernel width is not available for other than gaussian kernel shapes.
cutoff : float
This factor determines the cutoff of the probability distribution of
the kernel, i.e., the considered width of the kernel in terms of
multiples of the standard deviation sigma.
Default: 5.0
t_start : Time Quantity (optional)
Start time of the interval used to compute the firing rate. If None
assumed equal to spiketrain.t_start
Default: None
t_stop : Time Quantity (optional)
End time of the interval used to compute the firing rate (included).
If None assumed equal to spiketrain.t_stop
Default: None
trim : bool
if False, the output of the Fast Fourier Transformation being a longer
vector than the input vector by the size of the kernel is reduced back
to the original size of the considered time interval of the spiketrain
using the median of the kernel.
if True, only the region of the convolved signal is returned, where
there is complete overlap between kernel and spike train. This is
achieved by reducing the length of the output of the Fast Fourier
Transformation by a total of two times the size of the kernel, and
t_start and t_stop are adjusted.
Default: False
Returns
-------
rate : neo.AnalogSignal
Contains the rate estimation in unit hertz (Hz).
Has a property 'rate.times' which contains the time axis of the rate
estimate. The unit of this property is the same as the resolution that
is given via the argument 'sampling_period' to the function.
Raises
------
TypeError:
If `spiketrain` is not an instance of :class:`SpikeTrain` of Neo.
If `sampling_period` is not a time quantity.
If `kernel` is neither instance of :class:`Kernel` or string 'auto'.
If `cutoff` is neither float nor int.
If `t_start` and `t_stop` are neither None nor a time quantity.
If `trim` is not bool.
ValueError:
If `sampling_period` is smaller than zero.
Example
--------
kernel = kernels.AlphaKernel(sigma = 0.05*s, invert = True)
rate = instantaneous_rate(spiketrain, sampling_period = 2*ms, kernel)
References
----------
..[1] <NAME>, <NAME>, J Comput Neurosci (2010) 29:171–182.
"""
# Merge spike trains if list of spike trains given:
if isinstance(spiketrain, list):
_check_consistency_of_spiketrainlist(spiketrain, t_start=t_start, t_stop=t_stop)
if t_start is None:
t_start = spiketrain[0].t_start
if t_stop is None:
t_stop = spiketrain[0].t_stop
spikes = np.concatenate([st.magnitude for st in spiketrain])
merged_spiketrain = SpikeTrain(np.sort(spikes), units=spiketrain[0].units,
t_start=t_start, t_stop=t_stop)
return instantaneous_rate(merged_spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, t_start=t_start,
t_stop=t_stop, trim=trim)
# Checks of input variables:
if not isinstance(spiketrain, SpikeTrain):
raise TypeError(
"spiketrain must be instance of :class:`SpikeTrain` of Neo!\n"
" Found: %s, value %s" % (type(spiketrain), str(spiketrain)))
if not (isinstance(sampling_period, pq.Quantity) and
sampling_period.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError(
"The sampling period must be a time quantity!\n"
" Found: %s, value %s" % (type(sampling_period), str(sampling_period)))
if sampling_period.magnitude < 0:
raise ValueError("The sampling period must be larger than zero.")
if kernel == 'auto':
kernel_width = sskernel(spiketrain.magnitude, tin=None,
bootstrap=True)['optw']
if kernel_width is None:
raise ValueError(
"Unable to calculate optimal kernel width for "
"instantaneous rate from input data.")
unit = spiketrain.units
sigma = 1 / (2.0 * 2.7) * kernel_width * unit
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
kernel = kernels.GaussianKernel(sigma)
elif not isinstance(kernel, kernels.Kernel):
raise TypeError(
"kernel must be either instance of :class:`Kernel` "
"or the string 'auto'!\n"
" Found: %s, value %s" % (type(kernel), str(kernel)))
if not (isinstance(cutoff, float) or isinstance(cutoff, int)):
raise TypeError("cutoff must be float or integer!")
if not (t_start is None or (isinstance(t_start, pq.Quantity) and
t_start.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_start must be a time quantity!")
if not (t_stop is None or (isinstance(t_stop, pq.Quantity) and
t_stop.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality)):
raise TypeError("t_stop must be a time quantity!")
if not (isinstance(trim, bool)):
raise TypeError("trim must be bool!")
# main function:
units = pq.CompoundUnit("%s*s" % str(sampling_period.rescale('s').magnitude))
spiketrain = spiketrain.rescale(units)
if t_start is None:
t_start = spiketrain.t_start
else:
t_start = t_start.rescale(spiketrain.units)
if t_stop is None:
t_stop = spiketrain.t_stop
else:
t_stop = t_stop.rescale(spiketrain.units)
time_vector = np.zeros(int((t_stop - t_start)) + 1)
spikes_slice = spiketrain.time_slice(t_start, t_stop) \
if len(spiketrain) else np.array([])
for spike in spikes_slice:
index = int((spike - t_start))
time_vector[index] += 1
if cutoff < kernel.min_cutoff:
cutoff = kernel.min_cutoff
warnings.warn("The width of the kernel was adjusted to a minimally "
"allowed width.")
t_arr = np.arange(-cutoff * kernel.sigma.rescale(units).magnitude,
cutoff * kernel.sigma.rescale(units).magnitude +
sampling_period.rescale(units).magnitude,
sampling_period.rescale(units).magnitude) * units
r = scipy.signal.fftconvolve(time_vector,
kernel(t_arr).rescale(pq.Hz).magnitude, 'full')
if np.any(r < 0):
warnings.warn("Instantaneous firing rate approximation contains "
"negative values, possibly caused due to machine "
"precision errors.")
if not trim:
r = r[kernel.median_index(t_arr):-(kernel(t_arr).size -
kernel.median_index(t_arr))]
elif trim:
r = r[2 * kernel.median_index(t_arr):-2 * (kernel(t_arr).size -
kernel.median_index(t_arr))]
t_start += kernel.median_index(t_arr) * spiketrain.units
t_stop -= (kernel(t_arr).size -
kernel.median_index(t_arr)) * spiketrain.units
rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
sampling_period=sampling_period,
units=pq.Hz, t_start=t_start, t_stop=t_stop)
return rate
def time_histogram(spiketrains, binsize, t_start=None, t_stop=None,
output='counts', binary=False):
"""
Time Histogram of a list of :attr:`neo.SpikeTrain` objects.
Parameters
----------
spiketrains : | |
0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 0]], dtype=np.uint8)
base_img1_flipped = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]], dtype=np.uint8)
base_img2_flipped = np.array([[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
images = np.array([base_img1, base_img2])
images_flipped = np.array([base_img1_flipped, base_img2_flipped])
images_list = [base_img1, base_img2]
images_flipped_list = [base_img1_flipped, base_img2_flipped]
images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]
images_flipped_list2d3d = [base_img1_flipped, base_img2_flipped[:, :, np.newaxis]]
aug = iaa.Fliplr(1.0)
noaug = iaa.Fliplr(0.0)
# one numpy array as input
observed = aug.augment_images(images)
assert np.array_equal(observed, images_flipped)
observed = noaug.augment_images(images)
assert np.array_equal(observed, images)
# list of 2d images
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_flipped_list)
observed = noaug.augment_images(images_list)
assert array_equal_lists(observed, images_list)
# list of images, one 2d and one 3d
observed = aug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_flipped_list2d3d)
observed = noaug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_list2d3d)
def test_Augmenter_augment_batches():
reseed()
image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 1, 1]], dtype=np.uint8)
image_flipped = np.fliplr(image)
keypoint = ia.Keypoint(x=2, y=1)
keypoints = [ia.KeypointsOnImage([keypoint], shape=image.shape + (1,))]
kp_flipped = ia.Keypoint(
x=image.shape[1]-1-keypoint.x,
y=keypoint.y
)
seq = iaa.Fliplr(0.5)
"""
# with images as list, background=False
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()]) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=False))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
keypoint_aug = batch_aug.keypoints_aug[0].keypoints[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
assert nb_flipped_images == nb_flipped_keypoints
"""
for bg in [False, True]:
# with images as list
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()]) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
keypoint_aug = batch_aug.keypoints_aug[0].keypoints[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
assert nb_flipped_images == nb_flipped_keypoints
# with images as array
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
batches = [ia.Batch(images=np.array([np.copy(image)], dtype=np.uint8), keypoints=None) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = ia.Batch(images=np.array([image], dtype=np.uint8), keypoints=keypoints)
#batches_aug = list(seq.augment_batches([batch], background=True))
#batch_aug = batches_aug[0]
image_aug = batch_aug.images_aug[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
# array (N, H, W) as input
nb_flipped_images = 0
nb_iterations = 1000
batches = [np.array([np.copy(image)], dtype=np.uint8) for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = np.array([image], dtype=np.uint8)
#batches_aug = list(seq.augment_batches([batch], background=True))
#image_aug = batches_aug[0][0]
image_aug = batch_aug[0]
assert np.array_equal(image_aug, image) or np.array_equal(image_aug, image_flipped)
if np.array_equal(image_aug, image_flipped):
nb_flipped_images += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
# list of list of KeypointsOnImage as input
nb_flipped_keypoints = 0
nb_iterations = 1000
#batches = [ia.Batch(images=[np.copy(image)], keypoints=None) for _ in sm.xrange(nb_iterations)]
batches = [[keypoints[0].deepcopy()] for _ in sm.xrange(nb_iterations)]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
#batch = [keypoints]
#batches_aug = list(seq.augment_batches([batch], background=True))
#batch_aug = batches_aug[0]
#keypoint_aug = batches_aug[0].keypoints[0].keypoints[0]
keypoint_aug = batch_aug[0].keypoints[0]
assert (keypoint_aug.x == keypoint.x and keypoint_aug.y == keypoint.y) \
or (keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y)
if keypoint_aug.x == kp_flipped.x and keypoint_aug.y == kp_flipped.y:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_keypoints <= 0.6*nb_iterations
# test all augmenters
# this test is currently skipped by default as it takes around 40s on its own,
# probably because of having to start background processes
"""
augs = [
iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.Sometimes(1.0, iaa.Fliplr(1)),
iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
iaa.WithChannels([0], iaa.Add((-50, 50))),
iaa.Noop(name="Noop-nochange"),
iaa.Lambda(
func_images=lambda images, random_state, parents, hooks: images,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
name="Lambda-nochange"
),
iaa.AssertLambda(
func_images=lambda images, random_state, parents, hooks: True,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
name="AssertLambda-nochange"
),
iaa.AssertShape(
(None, 64, 64, 3),
check_keypoints=False,
name="AssertShape-nochange"
),
iaa.Scale((0.5, 0.9)),
iaa.CropAndPad(px=(-50, 50)),
iaa.Pad(px=(1, 50)),
iaa.Crop(px=(1, 50)),
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
iaa.ChangeColorspace(to_colorspace="GRAY"),
iaa.Grayscale(alpha=(0.1, 1.0)),
iaa.GaussianBlur(1.0),
iaa.AverageBlur(5),
iaa.MedianBlur(5),
iaa.Convolve(np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
iaa.EdgeDetect(alpha=(0.1, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
iaa.Add((-50, 50)),
iaa.AddElementwise((-50, 50)),
iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
iaa.Multiply((0.6, 1.4)),
iaa.MultiplyElementwise((0.6, 1.4)),
iaa.Dropout((0.3, 0.5)),
iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
iaa.Invert(0.5),
iaa.ContrastNormalization((0.6, 1.4)),
iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1), rotate=(-20, 20),
shear=(-20, 20), order=ia.ALL, mode=ia.ALL, cval=(0, 255)),
iaa.PiecewiseAffine(scale=(0.1, 0.3)),
iaa.ElasticTransformation(alpha=0.5)
]
nb_iterations = 100
image = ia.quokka(size=(64, 64))
batch = ia.Batch(images=np.array([image]), keypoints=keypoints)
batches = [ia.Batch(images=[np.copy(image)], keypoints=[keypoints[0].deepcopy()])
for _ in sm.xrange(nb_iterations)]
for aug in augs:
nb_changed = 0
batches_aug = list(aug.augment_batches(batches, background=True))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
if image.shape != image_aug.shape or not np.array_equal(image, image_aug):
nb_changed += 1
if nb_changed > 10:
break
if "-nochange" not in aug.name:
assert nb_changed > 0
else:
assert nb_changed == 0
"""
def test_determinism():
reseed()
images = [
ia.quokka(size=(128, 128)),
ia.quokka(size=(64, 64)),
misc.imresize(data.astronaut(), (128, 256))
]
keypoints = [
ia.KeypointsOnImage([
ia.Keypoint(x=20, y=10), ia.Keypoint(x=5, y=5), ia.Keypoint(x=10, y=43)
], shape=(50, 60, 3))
]
augs = [
iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.Sometimes(1.0, iaa.Fliplr(1)),
iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
iaa.WithChannels([0], iaa.Add((-50, 50))),
iaa.Noop(name="Noop-nochange"),
iaa.Lambda(
func_images=lambda images, random_state, parents, hooks: images,
func_heatmaps=lambda heatmaps, random_state, parents, hooks: heatmaps,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
name="Lambda-nochange"
),
iaa.AssertLambda(
func_images=lambda images, random_state, parents, hooks: True,
func_heatmaps=lambda heatmaps, random_state, parents, hooks: True,
func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
name="AssertLambda-nochange"
),
iaa.AssertShape(
(None, None, None, 3),
check_keypoints=False,
name="AssertShape-nochange"
),
iaa.Scale((0.5, 0.9)),
iaa.CropAndPad(px=(-50, 50)),
iaa.Pad(px=(1, 50)),
iaa.Crop(px=(1, 50)),
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
iaa.ChangeColorspace(to_colorspace="GRAY"),
iaa.Grayscale(alpha=(0.1, 1.0)),
iaa.GaussianBlur(1.0),
iaa.AverageBlur(5),
iaa.MedianBlur(5),
iaa.Convolve(np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
iaa.EdgeDetect(alpha=(0.1, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
iaa.Add((-50, 50)),
iaa.AddElementwise((-50, 50)),
iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
iaa.Multiply((0.6, 1.4)),
iaa.MultiplyElementwise((0.6, 1.4)),
iaa.Dropout((0.3, 0.5)),
iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
iaa.Invert(0.5),
iaa.ContrastNormalization((0.6, 1.4)),
iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1),
rotate=(-20, 20), shear=(-20, 20), order=ia.ALL,
mode=ia.ALL, cval=(0, 255)),
iaa.PiecewiseAffine(scale=(0.1, 0.3)),
iaa.ElasticTransformation(alpha=0.5)
]
for aug in augs:
aug_det = aug.to_deterministic()
images_aug1 = aug_det.augment_images(images)
images_aug2 = aug_det.augment_images(images)
kps_aug1 = aug_det.augment_keypoints(keypoints)
kps_aug2 = aug_det.augment_keypoints(keypoints)
assert array_equal_lists(images_aug1, images_aug2), \
"Images not identical for %s" % (aug.name,)
assert keypoints_equal(kps_aug1, kps_aug2), \
"Keypoints not identical for %s" % (aug.name,)
def test_keypoint_augmentation():
ia.seed(1)
keypoints = []
for y in range(40//5):
for x in range(60//5):
keypoints.append(ia.Keypoint(y=y*5, x=x*5))
keypoints_oi = ia.KeypointsOnImage(keypoints, shape=(40, 60, 3))
augs = [
iaa.Add((-5, 5), name="Add"),
iaa.AddElementwise((-5, 5), name="AddElementwise"),
iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
iaa.Multiply((0.95, 1.05), name="Multiply"),
iaa.Dropout(0.01, name="Dropout"),
iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
iaa.Invert(0.01, per_channel=True, name="Invert"),
iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
iaa.AverageBlur((3, 5), name="AverageBlur"),
iaa.MedianBlur((3, 5), name="MedianBlur"),
#iaa.BilateralBlur((3, 5), name="BilateralBlur"),
# WithColorspace ?
#iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"),
# ChangeColorspace ?
# Grayscale cannot be tested, input not RGB
# Convolve ?
iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
iaa.Fliplr(0.5, name="Fliplr"),
iaa.Flipud(0.5, name="Flipud"),
iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
iaa.Affine(shear=(-20, 20), name="Affine-shear"),
iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
#iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
# Sequential
# SomeOf
# OneOf
# Sometimes
# WithChannels
# Noop
# Lambda
# AssertLambda
# AssertShape
iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
name="SimplexNoiseAlpha"),
iaa.Superpixels(p_replace=0.01, n_segments=64),
iaa.Scale(0.5, name="Scale"),
iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
iaa.Pad(px=(0, 10), name="Pad"),
iaa.Crop(px=(0, 10), name="Crop")
]
for aug in augs:
#if aug.name != "PiecewiseAffine":
# continue
dss = []
for i in range(10):
aug_det = aug.to_deterministic()
kp_image = keypoints_oi.to_keypoint_image(size=5)
kp_image_aug = aug_det.augment_image(kp_image)
kp_image_aug_rev = ia.KeypointsOnImage.from_keypoint_image(
kp_image_aug,
if_not_found_coords={"x": -9999, "y": -9999},
nb_channels=1
)
kp_aug = aug_det.augment_keypoints([keypoints_oi])[0]
ds = []
assert len(kp_image_aug_rev.keypoints) == len(kp_aug.keypoints),\
"Lost keypoints for '%s' (%d vs expected %d)" \
% (aug.name, len(kp_aug.keypoints), len(kp_image_aug_rev.keypoints))
for kp_pred, kp_pred_img in zip(kp_aug.keypoints, kp_image_aug_rev.keypoints):
kp_pred_lost = (kp_pred.x == -9999 and kp_pred.y == -9999)
kp_pred_img_lost = (kp_pred_img.x == -9999 and kp_pred_img.y == -9999)
#if kp_pred_lost and not kp_pred_img_lost:
# print("lost kp_pred", kp_pred_img)
#elif not kp_pred_lost and kp_pred_img_lost:
| |
import sys
from io import StringIO
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from modules.settings.settings import SettingsManager
from modules.pseudo_id.pseudo_id import PseudoIDManager
from gms_uploader.modules.models.pandasmodel import PandasModel
from gms_uploader.modules.delegates.delegates import ComboBoxDelegate, \
DateAutoCorrectDelegate, AgeDelegate, IconCheckBoxDelegate
from gms_uploader.modules.fx.fx_manager import FxManager
from gms_uploader.modules.dialogs.dialogs import ValidationDialog, MsgAlert, MsgOKCancel
from gms_uploader.modules.models.sortfilterproxymodel import MultiSortFilterProxyModel
from gms_uploader.modules.extra.auxiliary_functions import to_list, get_pd_row_index, \
date_validate, age_validate, add_gridlayout_row, update_df
from gms_uploader.modules.credentials.credentials import CredManager
from gms_uploader.modules.validate.validate import validate
from gms_uploader.modules.upload.uploader import Uploader
import pandas as pd
from datetime import datetime
from pathlib import Path
import yaml
import json
import csv
from gms_uploader.ui.mw import Ui_MainWindow
import qdarktheme
import resources
__version__ = '0.2.0'
__title__ = 'GMS-uploader'
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setup_complete = False
self.setupUi(self)
self.setAcceptDrops(True)
self.clipboard = QGuiApplication.clipboard()
self.setWindowIcon(QIcon(':/img/GMS-logo.png'))
self.setWindowTitle(__title__ + " " + __version__)
self.set_tb_bkg()
self.fx_manager = FxManager(Path('fx'))
self.fx = None
# add icons
self.set_icons()
default_config_path = Path('config', 'config.yaml')
with default_config_path.open(encoding='utf8') as fp:
self.conf = yaml.safe_load(fp)
self.settm = SettingsManager(self.conf)
self.credm = CredManager(self.settm)
self.pidm = PseudoIDManager(self.conf['tr']['lab_to_code'], self.settm)
self.fx_config = None
self.tableView_columns = list(self.conf['model_fields'].keys())
self.df = pd.DataFrame(columns=self.tableView_columns)
self.model = PandasModel(self.df, self.conf['model_fields'])
self.mfilter_sort_proxy_model = MultiSortFilterProxyModel()
self.filter_cols = self.get_filter_cols()
# setup settings
self.delegates = {}
self.delegates['patient'] = {}
self.delegates['lab'] = {}
self.delegates['organism'] = {}
self.set_signals()
self.setup_tableviews()
# self.set_dataview_setting_widget_values()
self.stackedWidget.setCurrentIndex(0)
self.tabWidget_metadata.setCurrentIndex(0)
self.set_hidden_columns()
self.set_col_widths()
self.setup_settingview_widgets()
self.set_delegates()
# Status widgets change status to activated when there is data in the model. Default is disabled.
self.status_widgets = [
self.action_import_csv,
self.action_upload_meta_seqs,
self.pushButton_filtermarked,
self.pushButton_invert,
self.pushButton_drop,
self.pushButton_clear,
self.pushButton_filldown,
self.pushButton_resetfilters,
self.action_save_meta,
self.action_import_fx,
self.action_paste_fx,
self.lineEdit_filter
]
self.set_datastatus_empty(True)
self.ui_init()
self.setup_complete = True
# setup and init-related functions
def ui_init(self):
self.tabWidget_metadata.setStyleSheet("QTabWidget::pane { border: 0; }")
self.scrollArea.setStyleSheet("QScrollArea { border: 0; }")
self.toolBar.setFixedWidth(50)
self.toolBar.setMovable(False)
self.tabWidget_metadata.setTabText(0, "patient metadata")
self.tabWidget_metadata.setTabText(1, "organism metadata")
self.tabWidget_metadata.setTabText(2, "lab metadata")
self.lineEdit_filter.setPlaceholderText("freetext filter")
self.action_import_fx.setDisabled(True)
self.action_paste_fx.setDisabled(True)
def get_filter_cols(self):
cols = list(self.df.columns)
used_cols = self.conf['freetext_filter']['model_fields']
return [self.df.columns.get_loc(c) for c in cols if c in used_cols]
def set_tb_bkg(self):
"""
Sets bg image to tableviews. Image shown before metadata is imported.
:return: None
"""
img = ':/img/GMS-logo.png'
for tbv in [self.tableView_patient,
self.tableView_organism,
self.tableView_lab]:
tbv.setStyleSheet(
"""
background-repeat: no-repeat;
background-position: center;
background-image: url(%s);
"""
% img
)
tbv.horizontalScrollBar().setStyleSheet(
"""
background: white;
"""
)
def rem_tb_bkg(self):
"""
Removes bg image from tableviews. Images removed when metadata is imported, otherwise
they are visible through the tables.
:return: None
"""
for tbv in [self.tableView_patient, self.tableView_organism, self.tableView_lab]:
tbv.setStyleSheet("background-image: none;")
def set_signals(self):
"""
Setup of signals for static widgets (pushbuttons, actionbuttons, lineedit for filter).
:return:
"""
self.action_show_prefs.triggered.connect(lambda: self.stackedWidget.setCurrentIndex(1))
self.action_show_meta.triggered.connect(lambda: self.stackedWidget.setCurrentIndex(0))
self.lineEdit_filter.textChanged.connect(self.set_free_filter)
self.pushButton_filtermarked.setCheckable(True)
self.pushButton_filtermarked.clicked.connect(self.set_mark_filter)
self.pushButton_drop.clicked.connect(self.drop_rows)
self.pushButton_clear.clicked.connect(self.clear_table)
self.action_select_seq_files.triggered.connect(self.get_seq_files)
self.action_upload_meta_seqs.triggered.connect(self.upload)
self.action_save_meta.triggered.connect(self.save_metadata_file)
self.action_open_meta.triggered.connect(self.open_metadata_file)
self.pushButton_invert.clicked.connect(self.invert_marks)
self.action_import_csv.triggered.connect(self.get_csv_file_combine)
def set_icons(self):
self.action_open_meta.setIcon(QIcon(':/icons/AppIcons/folder-open-outline_mdi.svg'))
self.action_save_meta.setIcon(QIcon(':/icons/AppIcons/content-save-outline_mdi.svg'))
self.action_show_meta.setIcon(QIcon(':/table')) # ':/icons/AppIcons/table_mdi.svg'))
self.action_show_prefs.setIcon(QIcon(':/cog')) #:/icons/AppIcons/cog-outline_mdi.svg'))
self.action_upload_meta_seqs.setIcon(QIcon(':/icons/AppIcons/tray-arrow-up_mdi.svg'))
self.action_select_seq_files.setIcon(QIcon(':/icons/AppIcons/folder-open-outline-dna_mdi.svg'))
self.action_import_csv.setIcon(QIcon(':/import-csv')) #':/icons/AppIcons/import-csv_own.svg'))
self.action_import_fx.setIcon(QIcon(':/import-fx')) #':/icons/AppIcons/content-import-fx_own.svg'))
self.action_paste_fx.setIcon(QIcon(':/paste-fx')) #':/icons/AppIcons/content-paste-fx_own.svg'))
self.pushButton_filldown.setIcon(QIcon(':/arrow-down')) #':/icons/AppIcons/arrow-down_mdi.svg'))
self.pushButton_drop.setIcon(QIcon(':/close')) #':/icons/AppIcons/close_mdi.svg'))
self.pushButton_clear.setIcon(QIcon(':/clear')) # ':/icons/AppIcons/delete-outline_mdi.svg'))
self.pushButton_resetfilters.setIcon(QIcon('/filter-remove')) #QIcon(':/icons/AppIcons/filter-remove-outline_mdi.svg'))
self.pushButton_filtermarked.setIcon(QIcon(':/filter')) # QIcon(':/icons/AppIcons/filter-outline_mdi.svg'))
self.pushButton_invert.setIcon(QIcon(':/invert')) #':/icons/AppIcons/invert_own.svg'))
def set_col_widths(self):
for i, name in enumerate(self.conf['model_fields']):
self.tableView_patient.setColumnWidth(i, self.conf['model_fields'][name]['col_width'])
self.tableView_organism.setColumnWidth(i, self.conf['model_fields'][name]['col_width'])
self.tableView_lab.setColumnWidth(i, self.conf['model_fields'][name]['col_width'])
def set_hidden_columns(self):
for i, name in enumerate(self.conf['model_fields']):
if 'patient' not in self.conf['model_fields'][name]['view']:
self.tableView_patient.setColumnHidden(i, True)
if 'organism' not in self.conf['model_fields'][name]['view']:
self.tableView_organism.setColumnHidden(i, True)
if 'lab' not in self.conf['model_fields'][name]['view']:
self.tableView_lab.setColumnHidden(i, True)
def set_dataview_setting_widget_values(self):
"""
Sets values in static lineedits on the dataview pane.
:return: None
"""
print("reset dataviews")
self.lineEdit_submitter.setText(str(self.settm.get_value("entered_value", "submitter")))
self.lineEdit_lab.setText(str(self.settm.get_value("select_single", "lab")))
self.lineEdit_seq_technology.setText(str(self.settm.get_value("select_single", "seq_technology")))
self.lineEdit_host.setText(str(self.settm.get_value("select_single", "host")))
self.lineEdit_lib_method.setText(str(self.settm.get_value("select_single", "library_method")))
self.lineEdit_import_fx.setText(str(self.settm.get_value("select_single", "fx")))
self.lineEdit_pseudo_id.setText(str(self.pidm.get_first_pid()))
self.lineEdit_ul_target_label.setText(str(self.credm.get_current_target_label()))
self.lineEdit_ul_protocol.setText(str(self.credm.get_current_protocol()))
def setup_settingview_widgets(self):
"""
Creates and sets up dymamic setting widgets based on the config file
:return: None
"""
for category in self.conf['settings_structure']:
if category['target_layout'] == "form":
category_name = category['label']
label = QLabel(category_name)
label.setProperty("class", "bold")
self.verticalLayout_forms.addWidget(label)
grid_layout = QGridLayout()
grid_layout.setColumnMinimumWidth(0, 150)
self.verticalLayout_forms.addLayout(grid_layout)
for item in category['items']:
for field_type, fields in item.items():
if field_type == "entered_value":
for field in fields:
func = self.get_button_func(field)
if func is not None:
button_name = field + "button"
button = QPushButton("...", objectName=button_name)
button.clicked.connect(func)
edit = QLineEdit(objectName=field)
edit.textChanged.connect(self.update_setting)
edit.setReadOnly(True)
hbox = QHBoxLayout()
hbox.addWidget(edit)
hbox.addWidget(button)
label = QLabel(field)
label.setProperty("class", "padding-left")
label.setMinimumWidth(40)
value = self.settm.get_value(field_type, field)
edit.setText(str(value))
add_gridlayout_row(grid_layout, label, hbox)
else:
edit = QLineEdit(objectName=field, editingFinished=self.update_setting)
value = self.settm.get_value(field_type, field)
edit.setText(value)
label = QLabel(field)
label.setProperty("class", "padding-left")
label.setMinimumWidth(40)
add_gridlayout_row(grid_layout, label, edit)
elif field_type == "select_single":
for field in fields:
combo = QComboBox(objectName=field)
combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
items = []
if field in self.conf['add_empty_selection']:
items = ['None']
if field == "fx":
for name in self.fx_manager.get_fx_names():
items.append(name)
else:
if self.conf['settings_values']['select_single'][field] != "None":
items.extend(list(self.conf['settings_values']['select_single'][field].keys()))
combo.addItems(items)
value = self.settm.get_value(field_type, field)
combo.setCurrentText(value)
label = QLabel(field)
label.setProperty("class", "padding-left")
label.setMinimumWidth(40)
combo.currentTextChanged.connect(self.update_setting)
add_gridlayout_row(grid_layout, label, combo)
elif field_type == "select_single_fx":
for field in fields:
combo = QComboBox(objectName=field)
combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
combo.addItem(str('None'))
for name in self.fx_manager.get_fx_names():
combo.addItem(str(name))
label = QLabel(field)
label.setProperty("class", "padding-left")
label.setMinimumWidth(40)
value = self.settm.get_value('select_single', field)
if value is not None:
if combo.findText(value) >= 0:
combo.setCurrentText(value)
combo.currentTextChanged.connect(self.update_setting)
add_gridlayout_row(grid_layout, label, combo)
elif category['target_layout'] == "tabs":
category_name = category['label']
label = QLabel(category_name)
label.setProperty("class", "bold")
self.verticalLayout_tabs.addWidget(QLabel())
self.verticalLayout_tabs.addWidget(label)
tabwidget_settings = QTabWidget(objectName='tabwidget_settings')
tabwidget_settings.setMinimumHeight(420)
tabwidget_settings.setStyleSheet("QTabWidget::pane { border: 0; }")
tabwidget_settings.setMinimumHeight(550)
self.verticalLayout_tabs.addWidget(tabwidget_settings)
for item in category['items']:
for field_type, fields in item.items():
if field_type == "select_multi":
for field in fields:
value = self.settm.get_value(field_type, field)
store_checked = to_list(value)
model = QStandardItemModel()
model.setColumnCount(2)
tableview = QTableView()
model = QStandardItemModel(objectName=field)
model.setColumnCount(2)
for key, checked in self.conf['settings_values'][field_type][field].items():
item1 = QStandardItem("0")
item2 = QStandardItem(key)
if key in store_checked:
item1.setText("1")
model.appendRow([item1, item2])
tableview.setModel(model)
tableview.setItemDelegateForColumn(0, IconCheckBoxDelegate(None))
tableview.setColumnWidth(0, 15)
hheader = tableview.horizontalHeader()
hheader.setStretchLastSection(True)
hheader.hide()
tableview.verticalHeader().setDefaultSectionSize(20)
tableview.verticalHeader().hide()
tableview.setShowGrid(False)
model.itemChanged.connect(self.update_setting)
tabwidget_settings.addTab(tableview, field)
self.set_target_label_items()
self.set_dataview_setting_widget_values()
self.set_fx()
def set_fx(self):
value = self.settm.get_value('select_single', 'fx')
if value is not None and value != 'None':
self.fx = self.fx_manager.load_fx(value)
self.fx.set_path(self.settm.get_value('entered_value', 'fx_import_path'))
if self.fx.from_clipboard:
self.action_paste_fx.triggered.connect(self.import_fx_clipboard)
if self.fx.from_file:
self.action_import_fx.triggered.connect(self.import_fx_file)
def update_setting(self, item=None):
if self.setup_complete:
if isinstance(item, QStandardItem):
self.settm.update_setting(item=item)
else:
obj = self.sender()
self.settm.update_setting(obj=obj)
self.pidm.init_settings()
self.set_dataview_setting_widget_values()
self.update_delegates()
self.set_fx()
def setup_tableviews(self):
"""
Setup of data tableviews, connects to mfilter_sort_proxy_model, and the pandas model.
:return: None
"""
self.mfilter_sort_proxy_model.setSourceModel(self.model)
self.tableView_patient.setModel(self.mfilter_sort_proxy_model)
self.tableView_patient.setEditTriggers(QAbstractItemView.DoubleClicked
| QAbstractItemView.SelectedClicked
| QAbstractItemView.EditKeyPressed)
self.tableView_patient.horizontalHeader().setStretchLastSection(True)
self.tableView_patient.horizontalHeader().setSectionsMovable(True)
self.tableView_patient.setSortingEnabled(True)
self.tableView_organism.setModel(self.mfilter_sort_proxy_model)
self.tableView_organism.setEditTriggers(
QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed)
self.tableView_organism.horizontalHeader().setStretchLastSection(True)
self.tableView_organism.horizontalHeader().setSectionsMovable(True)
self.tableView_organism.setSortingEnabled(True)
self.tableView_lab.setModel(self.mfilter_sort_proxy_model)
self.tableView_lab.setEditTriggers(
QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed)
self.tableView_lab.horizontalHeader().setStretchLastSection(True)
self.tableView_lab.horizontalHeader().setSectionsMovable(True)
self.tableView_lab.setSortingEnabled(True)
self.pushButton_resetfilters.clicked.connect(self.reset_sort_filter)
self.pushButton_filldown.clicked.connect(self.filldown)
self.tableView_patient.verticalHeader().hide()
self.tableView_lab.verticalHeader().hide()
self.tableView_organism.verticalHeader().hide()
self.update_model()
def setup_credentials(self):
self.credm = CredManager(self.settm)
# def load_fx_settings(self):
# store_key = "/".join(['select_single', 'import_fx'])
# fx_name = self.qsettings.value(store_key)
#
#
# default_config_path = Path('config', 'config.yaml')
# with default_config_path.open(encoding='utf8') as fp:
# self.conf = yaml.safe_load(fp)
# model and data-import related functions
def update_model(self):
self.model = PandasModel(self.df, self.conf['model_fields'])
self.mfilter_sort_proxy_model = MultiSortFilterProxyModel()
self.mfilter_sort_proxy_model.setSourceModel(self.model)
self.tableView_patient.setModel(self.mfilter_sort_proxy_model)
self.tableView_lab.setModel(self.mfilter_sort_proxy_model)
self.tableView_organism.setModel(self.mfilter_sort_proxy_model)
self.set_col_widths()
def df_insert(self, df, row):
insert_loc = df.index.max()
if pd.isna(insert_loc):
df.loc[0] = row
else:
df.loc[insert_loc + 1] = row
def verify_files(self, files):
"""
Ensures that all filespaths in a list exist and have correct suffixes, corresponding to
raw sequence data files. Only correct files are returned. If a path is a dir, paths for files in that directory are listed,
verified and returned.
:param files: list of filepaths and/or dirpaths
:return: list of verified filepaths
"""
verified_files = []
for file in files:
f = Path(file)
if f.is_dir():
for type in self.conf['seq_files']:
ext = self.conf['seq_files'][type]['ext']
for fp in f.rglob(ext):
if Path(fp).exists():
verified_files.append(fp)
else:
for type in self.conf['seq_files']:
ext = self.conf['seq_files'][type]['ext']
if f.match(ext) and f.exists():
verified_files.append(f)
return verified_files
def extract_metadata_from_filenames(self, files):
"""
Extract metadata from sequence data filenames
:param files: list of filepaths
:return: list of dicts with metadata from filenames
"""
_data = {}
for file in files:
seq_path = file.parent
filename = file.name
filename_obj = Path(filename)
sample = filename.split('_')[0]
if sample not in _data:
_data[sample] = {}
_data[sample]['seq_path'] = str(seq_path)
if filename_obj.match(self.conf['seq_files']['fastq_gz']['ext']):
f = file.stem.split('.')[0]
lane = f.split('_')[-1]
_data[sample]['lane'] = lane
if 'fastq' not in _data[sample]:
_data[sample]['fastq'] = []
fastq_list = _data[sample]['fastq']
fastq_list.append(filename)
elif filename_obj.match(self.conf['seq_files']['fast5']['ext']):
if 'fast5' not in _data[sample]:
_data[sample]['fast5'] = []
fast5_list = _data[sample]['fast5']
fast5_list.append(filename)
filename_metadata = []
for sample in _data:
row = dict()
row['mark'] = 0 # add mark column
row['internal_lab_id'] = sample
for key in _data[sample]:
value = _data[sample][key]
if isinstance(value, list):
sorted_files = sorted(value)
row[key] = sorted_files
else:
row[key] = value
filename_metadata.append(row)
return filename_metadata
def find_duplicates(self, df1, df2):
"""
Checks if the same internal_lab_id are present in two dataframes
:param df1: dataframe1
:param df2: dataframe2
:return: Bool
"""
df3 = df1.append(df2)
return df3['internal_lab_id'].duplicated().any()
def add_files_metadata_to_model(self, data):
"""
Creates new pandas df, from files and metadata, check for duplicates
and merge with existing df dataset and create new model.
:param data: list of dicts containing metadata and filenames
:return: None
"""
new_df = pd.DataFrame(data)
if not new_df.empty:
if not self.find_duplicates(self.df, new_df):
self.df = self.df.append(new_df)
self.df = self.df.fillna('')
self.update_model()
self.rem_tb_bkg()
self.set_datastatus_empty(False)
else:
msg_box = QMessageBox()
msg_box.setText("Duplicate SampleIDs | |
import argparse
import itertools
import json
import logging
import os
import pickle
import time
import warnings
from collections import Counter, defaultdict
from typing import Dict, Any, List, Iterable, Tuple, Set
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import langdetect
import spacy
from gensim import corpora
from gensim.corpora import IndexedCorpus
from gensim.models import HdpModel, LdaMulticore
from gensim.models.basemodel import BaseTopicModel
from langdetect.lang_detect_exception import LangDetectException
from langdetect.language import Language
from spacy.tokens.doc import Doc
from spacy.tokens.token import Token
import text_processing
import util
from data import JsonLinesCorpus, Topic, Document, DocumentCollection
from util import ProgressLog
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('topic-models')
logger.setLevel(logging.INFO)
#
# goal: go from plaintext PDFs + optional metadata file (result of parser) to id-topic-mapping (input for es index)
# add optional pre-classification (id-class mapping) as first layer of the hierarchical topic model
#
# start: need corpus-id-mapping and metadata-by-doc-id.
# first layer: split by category and write multiple corpora, each with own id-mapping
# subsequent layers: split by assigned topic, add id-mapping, store topics in central id-topic-dict
# side-effect: build topic-tree, store relations in Topic objects (get parents and children, root topic is "main")
#
# so, the id-category-map is a one-time thingy that we don't need to preserve at all. Just write everything
# into the topic tree and document-topic-mapping immediately
#
# steps:
# - calculate topic model from document collection
# - classify documents using this model, store topic labels in document objects
# - create one new model per topic with different hyperparameters and train it with the sub-corpus consisting only of
# the documents in this topic
# - recur
#
# issues:
# - need to build a new mmcorpus and a corpusindex-docid-mapping for each model
#
# data structure: LayeredTopicModel
# - recursive structure, initialize for every subsequent layer
# - the build script requires lots of state and temporary files
# -> maybe have a separate builder, that spits out the final model...
# - the final model consists of multiple topic models + metadata in a single archive
#
# topic model visualization: https://github.com/bmabey/pyLDAvis
#
class TopicModel:
def __init__(self, file_pdf_text: str = None, file_corpus_input: str = None,
file_metadata: str = None, file_output_prefix: str = None, abstracts_only=False,
language_filter: str = None, model: str = "hdp", batch_size=100, n_threads=None,
topic_layers: List[int] = None, topic_limit_per_layer: List[int] = None,
category_layer=False, min_docs_per_topic: int = None, token_min_count=1,
dict_size_limit=10000, document_limit: int = None):
"""
:param file_pdf_text: path to the file containing the parsed PDFs (output of pdf_parser)
:param file_corpus_input: path to the file containing the tokens of the parsed pdfs
(optional, preferred over file_pdf_text)
:param file_metadata: path to the metadata file (output of arxiv_crawler. required,
if the category layer should be used)
:param file_output_prefix: all output files, including temporary files, will be prefixed
with this string. all results will be stored under this prefix aswell.
:param abstracts_only: use only title and abstract for the topic model instead of the
full document text
:param language_filter: filter by the specified language code. the spacy parser we use
currenlty only supports english text, so 'en' is a reasonable value here
(though not a requirement)
:param model: specify the model to use. supported models: "hdp", "lda"
:param batch_size: the batch size of the spacy parser
:param n_threads: the number of threads to use on parallelizable tasks (e.g. spacy)
:param topic_layers: how many topics are to be calculated on each nested topic layer
:param topic_limit_per_layer: how many of those topics should have a fixed limit during
classification (i.e. each document can be only part of up to N topics instead of as
many as the topic model yields)
:param category_layer: use the categories extracted from metadata as the first layer
:param min_docs_per_topic: how many documents are required for each sub-topic to add
(e.g. min_docs = 100, we have 1000 documents, this limits the number of sub-topics to 10)
:param token_min_count: lowest allowed token count for words that may appear in the dictionary
:param dict_size_limit: the total size limit of the dictionary (take the N most frequent terms)
:param document_limit: just process the first N documents (useful for testing)
"""
super().__init__()
# file paths
self.file_pdf_text = file_pdf_text
self.file_corpus_input = file_corpus_input
self.file_metadata = file_metadata
self.file_output_prefix = file_output_prefix
# derived paths
self.file_tasklog = file_output_prefix + '-progress.log'
self.file_corpus_plain = file_corpus_input or file_output_prefix + '-corpus-plain.json.bz2'
self.file_corpus = file_output_prefix + '-corpus.json'
self.file_dict = file_output_prefix + '-lemma.dict.bz2'
self.file_ids = file_output_prefix + '-ids.json'
self.file_docs = file_output_prefix + '-docs.json'
self.file_model = file_output_prefix + '-hdp.pkl.bz2'
self.file_topics = file_output_prefix + '-topics.json.bz2'
# application config
self.abstracts_only = abstracts_only
self.language_filter = language_filter
self.model = model
self.batch_size = batch_size
self.n_threads = n_threads or max(2, int(os.cpu_count() / 2))
self.topic_layers = topic_layers or [10]
self.topic_limit_per_layer = topic_limit_per_layer or [0] * len(topic_layers)
self.category_layer = category_layer
self.min_docs_per_topic = min_docs_per_topic
self.token_min_count = token_min_count
self.dict_size_limit = dict_size_limit
self.document_limit = document_limit
# integrity checks
if not abstracts_only and not file_pdf_text and not file_corpus_input:
raise ValueError("At least one of the parameters 'file_pdf_text' or 'file_token_input' "
"is required, if 'abstracts_only' is not enabled.")
if (category_layer or abstracts_only) and not file_metadata:
raise ValueError("The parameter 'file_metadata' is required, if 'category_layer' "
"or 'abstracts_only' is True.")
if not file_output_prefix:
raise ValueError("The output path must not be empty.")
def build(self, force=False):
# evaluate progress information (no need to do long-running tasks twice)
progress = ProgressLog(self.file_tasklog)
if progress.finished:
logger.info("skipping {} tasks that have already been finished".format(len(progress.finished)))
# unify declarations
if isinstance(self.topic_layers, int):
self.topic_layers = [self.topic_layers]
# build the corpus (if required) and vocabulary
if force or 'token_dict' not in progress:
self.stream_token_dict()
progress.add('token_dict', "finished calculating the token counts and the global dictionary for all documents")
# create a reduced version of the corpus based on the provided dictionary
if force or 'reduced_corpus' not in progress:
self.stream_reduced_corpus()
progress.add('reduced_corpus', "")
# build the category layer (if specified)
if self.category_layer and (force or 'metadata' not in progress):
self.stream_metadata()
progress.add('metadata', "finished extracting categories from document metadata")
# build the nested topic model and classify documents
if force or 'topic_model' not in progress:
self.stream_nested_topic_model()
progress.add('topic_model', "")
logger.info("build completed. Classification results have been stored in `{}`".format(self.file_topics))
def stream_nested_topic_model(self):
# initialize data structures
root_topic = Topic('root', layer=0)
current_topics = None # type: List[Topic]
documents = None # type: Dict[str, Document]
dictionary = self.load_dictionary()
if self.category_layer:
logger.info("building first topic layer from document metadata...")
current_topics = self.topics_from_metadata(root_topic)
documents = self.docs_from_metadata(current_topics)
else:
current_topics = [root_topic]
documents = self.docs_from_ids()
# build topic model and classify documents
logger.info("building topic models and classifying documents...")
for idx, (num_topics, topic_limit) in enumerate(zip(self.topic_layers, self.topic_limit_per_layer)):
logger.info("Processing layer {} of {}, with {} sub-topics per parent topic{}"
.format(idx+1, len(self.topic_layers), num_topics, " (max. {} topics per doc)"
.format(topic_limit) if topic_limit else ""))
# TODO add option to remove temporary data immediately
# collect topics for the next iteration
next_topics = [] # type: List[Topic]
# go through the documents of each topic
for topic in current_topics:
logger.info("Processing documents in topic '{}'...".format(topic.topic_id))
# load the last corpus that was created for this topic's parent
corpus = self.load_corpus_for_topic(topic.parent if topic != root_topic else topic)
# reduce the corpus so it only contains the documents we need
sub_corpus = self.corpus2corpus(corpus, documents, topic) if topic != root_topic else corpus
if sub_corpus: # only continue, of there are actually documents with this topic
# limit the number of sub-topics, if necessary
num_topics_adjusted = min(int(len(sub_corpus) / self.min_docs_per_topic), num_topics) \
if self.min_docs_per_topic else num_topics
if num_topics_adjusted <= 3:
logger.info("skipping topic {} (too few documents: {})".format(topic.topic_id, len(sub_corpus)))
else:
# build the topic model
self.stream_topic_model(topic, dictionary, sub_corpus, num_topics_adjusted)
# classify documents using the topic model
sub_topics = self.stream_classify_documents(topic, sub_corpus, documents, topic_limit=topic_limit)
# save the sub-topics for the next layer
next_topics.extend(sub_topics)
logger.info("All {} documents in topic '{}' have been classified".format(len(sub_corpus), topic.topic_id))
else:
logger.warning("there are no documents in topic '{}'. Hint: parent topic '{}' has {} documents"
.format(topic.topic_id, topic.parent.topic_id if topic.parent else "root", len(corpus)))
# select the topics for the next iteration
current_topics = next_topics
logger.info("all {} documents have been classified. storing results...".format(len(documents)))
topics = {topic.topic_id: topic for topic in root_topic._collect_topics()}
collection = DocumentCollection(topics, documents)
util.json_write(collection.to_dict(), self.file_topics, pretty=False)
def stream_token_dict(self):
"""
make a single run over the file containing all documents as plaintext.
Parse all documents using spacy, store the token counts for each document
and build the global token dict
"""
if self.file_corpus_input:
| |
easier and less verbose! These options are meant to be
used in conjunction with a config file for overriding certain
configuration options and testing them.
"""),
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(plot_parser, True)
add_dump_args(plot_parser, True)
add_plot_args(plot_parser, False)
# [config]
config_parser = subparsers.add_parser('config', help='Displays, loads, and saves the configuration.', \
description = textwrap.dedent("""\
Displays, loads, and saves the configuration.
Inherits arguments from [list], [dump], and [plot]. [list], [dump],
and [plot] arguments may be required to use [config]. For information
on [list], [dump], or [plot] arguments, see their respective help.
"""), \
formatter_class=argparse.RawDescriptionHelpFormatter)
add_list_args(config_parser, True)
add_dump_args(config_parser, True)
add_plot_args(config_parser, True)
add_config_args(config_parser, False)
return parser
def parse_to_config(parse):
# Init a few variables:
pyradmon_config = {}
plot_dict = {}
## Core args
# First, examine the core arguments.
if isset_obj("config_file", parse):
# OK, check if the file exists.
if os.path.isfile(parse.config_file):
# Attempt to load
res = config.load(parse.config_file)
if res == None:
print "ERROR: Could not open configuration file!"
return (None, None, None)
pyradmon_config = res[0]
plot_dict = res[1]
else:
print "ERROR: Configuration file path does not exist!"
return (None, None, None)
# Validate configuration
config.validate(pyradmon_config, plot_dict)
# Now check for logging args!
file_enabled = False
if isset_obj("logging_output", parse):
logging_output = parse.logging_output
if logging_output != "":
logging_output = logging_output.split(",")
logging_output = [x.strip() for x in logging_output]
final_logging_output = []
for log_o in logging_output:
if log_o == 'file':
file_enabled = True
elif log_o == 'stdout':
final_logging_output.append(sys.stdout)
elif log_o == 'stderr':
final_logging_output.append(sys.stderr)
else:
print "ERROR: Invalid logging output! Valid output: stdout, stderr, file"
return (None, None, None)
logging_output = final_logging_output
else:
logging_output = [ sys.stdout ]
if logging_output and file_enabled and isset_obj("logging_file", parse):
logging_file = parse.logging_file
else:
logging_file = None
if isset_obj("logging_level", parse):
logging_level = parse.logging_level
logging_level = logging_level.strip()
if logging_level == "INFO":
logging_level = logging.INFO
elif logging_level == "WARNING":
logging_level = logging.WARNING
elif logging_level == "ERROR":
logging_level = logging.ERROR
elif logging_level == "CRITICAL":
logging_level = logging.CRITICAL
elif logging_level == "DEBUG":
logging_level = logging.DEBUG
else:
print "ERROR: Invalid logging level specified!"
print "Valid levels: INFO, WARNING, ERROR, CRITICAL, DEBUG"
return (None, None, None)
else:
logging_level = logging.INFO
if isset_obj("mp_disable", parse):
pyradmon_config['mp_disable'] = parse.mp_disable
if isset_obj("mp_priority_mode", parse):
mp_priority_mode = parse.mp_priority_mode
mp_priority_mode = mp_priority_mode.strip()
if mp_priority_mode == "GENEROUS":
pyradmon_config['mp_priority_mode'] = "GENEROUS"
elif mp_priority_mode == "NORMAL":
pyradmon_config['mp_priority_mode'] = "NORMAL"
elif mp_priority_mode == "AGGRESSIVE":
pyradmon_config['mp_priority_mode'] = "AGGRESSIVE"
elif mp_priority_mode == "EXTREME":
pyradmon_config['mp_priority_mode'] = "EXTREME"
elif mp_priority_mode == "NUCLEAR":
pyradmon_config['mp_priority_mode'] = "NUCLEAR"
else:
print "ERROR: Invalid multiprocessing (mp) priority mode specified!"
print "Valid levels: GENEROUS, NORMAL, AGGRESSIVE, EXTREME, NUCLEAR"
return (None, None, None)
else:
pyradmon_config['mp_priority_mode'] = "NORMAL"
if isset_obj("mp_cpu_limit", parse):
if (parse.mp_cpu_limit).isdigit():
pyradmon_config['mp_cpu_limit'] = int(parse.mp_cpu_limit)
else:
print "ERROR: Invalid multiprocessing (mp) CPU limit! The CPU limit"
print "must specify an integer number of CPUs to limit use to."
return (None, None, None)
# We're ready - let's set up logging!
logger = log.init(logging_level, logging_output, logging_file)
# From now on, we'll stick to using the log module to print stuff
# out.
## Config args, part 1
if parse.verb == "config":
# We will only read --config-load here. All others will be
# checked at the end.
if isset_obj("config_load", parse):
if isset_obj("config_file", parse):
warn("Detected --config-load when --config-file is already specified! --config-load will override the configuration file specified in --config-file. You should only specify one argument, preferrably --config-file.")
# OK, check if the file exists.
if os.path.isfile(parse.config_load):
# Attempt to load
res = config.load(parse.config_load)
if res == None:
critical("ERROR: Could not open configuration file!")
return (None, None, None)
pyradmon_config = res[0]
plot_dict = res[1]
else:
critical("ERROR: Configuration file path does not exist!")
return (None, None, None)
# Validate configuration
config.validate(pyradmon_config, plot_dict)
## Plot args
# FUN TIME
if parse.verb == "plot" or parse.verb == "config":
if isset_obj("plot_define_plots", parse):
plots = ",".join(parse.plot_define_plots).split(",")
# Cleanup
plots = [x.strip() for x in plots]
for plot in plots:
if not plot in plot_dict:
if plot == "":
warn("Invalid plot ID detected - plot ID can't be blank!")
warn("This plot definition will be skipped.")
continue
plot_dict[plot] = {}
if isset_obj("plot_define_subplots", parse):
# "plot1:sub1,sub2;abc2:abc,def;superplot3:s1,s2"
subplots_def = ";".join(parse.plot_define_subplots).split(";")
# Cleanup
subplots_def = [x.strip() for x in subplots_def]
for subplot_def in subplots_def:
# Chunk: plot1:sub1,sub2
subplot_def_split = subplot_def.split(":")
subplot_def_split = [x.strip() for x in subplot_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(subplot_def_split) != 2:
warn("Invalid subplot definition detected - invalid key-value pair '%s'!" % subplot_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This subplot definition will be skipped.")
continue
# OK, now seperate it out!
subplot_def_plot = subplot_def_split[0]
subplot_def_subplots = subplot_def_split[1]
# Sanity check 2: does the plot named exist?!?
if not subplot_def_plot in plot_dict:
warn("Invalid subplot definition detected - the plot specified, '%s', does not exist!" % subplot_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This subplot definition will be skipped.")
continue
# OK, let's process subplots.
subplot_def_subplots = subplot_def_subplots.split(",")
subplot_def_subplots = [x.strip() for x in subplot_def_subplots]
# Prep plot_dict
if not "plots" in plot_dict[subplot_def_plot]:
plot_dict[subplot_def_plot]["plots"] = []
# Add away!
for subplot_def_subplot in subplot_def_subplots:
plot_dict[subplot_def_plot]["plots"].append({ subplot_def_subplot : {} })
# Done!
if isset_obj("plot_define_axes", parse):
# "plot1|sub1|x:ticks=5,label=Hello world"
axes_def = ";".join(parse.plot_define_axes).split(";")
# Cleanup
axes_def = [x.strip() for x in axes_def]
for axis_def in axes_def:
# Chunk: plot1|sub1|x:ticks=5,label=Hello world
axis_def_split = axis_def.split(":")
axis_def_split = [x.strip() for x in axis_def_split]
# SAAAAAAAANITY CHECK!!!!!!!1111
# Sanity check 1: do we have 2 elements?
if len(axis_def_split) != 2:
warn("Invalid axis definition detected - invalid key-value pair '%s'!" % axis_def)
warn("(Key-value pair should be key:value - make sure there are no extra colons!)")
warn("This axis definition will be skipped.")
continue
# OK, now seperate it out!
# Chunk: plot1|sub1|x --> [plot1, sub1, x]
axis_def_plot_subplot_axis = axis_def_split[0].split("|")
# Chunk: ticks=5,label=Hello world
axis_def_attrs = axis_def_split[1]
# Sanity check 2: does the plot/subplot/axe key have 3 elements?
if len(axis_def_plot_subplot_axis) != 3:
warn("Invalid axis definition detected - the key is invalid! It should only have")
warn("3 elements - plot|subplot|x/y!")
warn("This axis definition will be skipped.")
continue
# OK, let's seperate that out!
axis_def_plot = axis_def_plot_subplot_axis[0]
axis_def_subplot = axis_def_plot_subplot_axis[1]
axis_def_axis = axis_def_plot_subplot_axis[2].lower()
# Sanity check 3: does the plot/subplot named exist?!?
if not axis_def_plot in plot_dict:
warn("Invalid axis definition detected - the plot specified, '%s', does not exist!" % axis_def_plot)
warn("Ensure spelling is correct. If it is a new plot, make sure it is defined.")
warn("This axis definition will be skipped.")
continue
# OK, plot exists. How about subplot?
# We have to do some strange magic here...
axis_def_subplot_found = False
axis_def_subplot_dat = None
for axis_def_subplot_dict in plot_dict[axis_def_plot]['plots']:
if axis_def_subplot in axis_def_subplot_dict:
axis_def_subplot_dat = axis_def_subplot_dict[axis_def_subplot]
axis_def_subplot_found = True
break
if not axis_def_subplot_found:
warn("Invalid axis definition detected - the subplot specified, '%s', does not exist!" % axis_def_subplot)
warn("Ensure spelling is correct. If it is a new subplot, make sure it is defined and")
warn("in the right subplot. This axis definition will be skipped.")
continue
# Sanity check 4: Is the axis valid?
if axis_def_axis != "x" and axis_def_axis != "y":
warn("Invalid axis definition detected - the axis specified, '%s', is invalid!" % axis_def_axis)
warn("'x' and 'y' are the only axes allowed. This axis definition will be skipped.")
continue
# OK, let's setup shop.
if not "axes" in axis_def_subplot_dat:
axis_def_subplot_dat["axes"] = {}
if not axis_def_axis in axis_def_subplot_dat["axes"]:
axis_def_subplot_dat["axes"][axis_def_axis] = {}
# OK, let's process attributes.
axis_def_attrs = axis_def_attrs.split(",")
axis_def_attrs = [x.strip().split("=") for x in axis_def_attrs]
# Sanity check 5: Are these valid key-value pairs?
kvpair_bad = False
for kvpair in axis_def_attrs:
if len(kvpair) != 2:
warn("Invalid axis definition detected - the key/value subpair, '%s', is invalid!" % '='.join(kvpair))
kvpair_bad | |
the redundancy of the filters
(the larger r_psi, the larger the overlap between adjacent wavelets),
and stability against time-warp deformations (larger r_psi improves it).
Defaults to sqrt(0.5).
Tuple sets separately for first- and second-order filters.
criterion_amplitude : float, optional
Represents the numerical error which is allowed to be lost after
convolution and padding. Defaults to 1e-3.
normalize : string / tuple[string], optional
Normalization convention for the filters (in the temporal domain).
Supports 'l1', 'l2', 'l1-energy', 'l2-energy', but only 'l1' or 'l2' is
used. See `help(Scattering1D)`.
max_subsampling: int or None, optional
maximal dyadic subsampling to compute, in order
to save computation time if it is not required. Defaults to None, in
which case this value is dynamically adjusted depending on the filters.
sigma0 : float, optional
parameter controlling the frequential width of the
low-pass filter at J_scattering=0; at a an absolute J_scattering, it
is equal to sigma0 / 2**J_scattering. Defaults to 1e-1
alpha : float, optional
tolerance factor for the aliasing after subsampling.
The larger alpha, the more conservative the value of maximal
subsampling is. Defaults to 4.
P_max : int, optional
maximal number of periods to use to make sure that the Fourier
transform of the filters is periodic. P_max = 5 is more than enough for
double precision. Defaults to 5. Should be >= 1
eps : float, optional
required machine precision for the periodization (single
floating point is enough for deep learning applications).
Defaults to 1e-7
Returns
-------
phi_f : dictionary
a dictionary containing the low-pass filter at all possible
subsamplings. See above for a description of the dictionary structure.
The possible subsamplings are controlled by the inputs they can
receive, which correspond to the subsamplings performed on top of the
1st and 2nd order transforms.
psi1_f : dictionary
a dictionary containing the band-pass filters of the 1st order,
only for the base resolution as no subsampling is used in the
scattering tree.
Each value corresponds to a dictionary for a single filter, see above
for an exact description.
The keys of this dictionary are of the type (j, n) where n is an
integer counting the filters and j the maximal dyadic subsampling
which can be performed on top of the filter without aliasing.
psi2_f : dictionary
a dictionary containing the band-pass filters of the 2nd order
at all possible subsamplings. The subsamplings are determined by the
input they can receive, which depends on the scattering tree.
Each value corresponds to a dictionary for a single filter, see above
for an exact description.
The keys of this dictionary are of th etype (j, n) where n is an
integer counting the filters and j is the maximal dyadic subsampling
which can be performed on top of this filter without aliasing.
References
----------
1. Convolutional operators in the time-frequency domain, <NAME>,
PhD Thesis, 2017
https://tel.archives-ouvertes.fr/tel-01559667
2. This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/
filter_bank.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
# compute the spectral parameters of the filters
(sigma_low, xi1s, sigma1s, j1s, is_cqt1s, xi2s, sigma2s, j2s, is_cqt2s
) = calibrate_scattering_filters(J_scattering, Q, T, r_psi=r_psi,
sigma0=sigma0, alpha=alpha, J_pad=J_support)
# split `normalize` into orders
normalize1, normalize2 = normalize
# instantiate the dictionaries which will contain the filters
phi_f = {}
psi1_f = []
psi2_f = []
# compute the band-pass filters of the second order,
# which can take as input a subsampled
N_pad = 2**J_support
for (n2, j2) in enumerate(j2s):
# compute the current value for the max_subsampling,
# which depends on the input it can accept.
if max_subsampling is None:
possible_subsamplings_after_order1 = [
j1 for j1 in j1s if j2 >= j1]
if len(possible_subsamplings_after_order1) > 0:
max_sub_psi2 = max(possible_subsamplings_after_order1)
else:
max_sub_psi2 = 0
else:
max_sub_psi2 = max_subsampling
# We first compute the filter without subsampling
psi_f = {}
psi_f[0] = morlet_1d(
N_pad, xi2s[n2], sigma2s[n2], normalize=normalize2,
P_max=P_max, eps=eps)
# compute the filter after subsampling at all other subsamplings
# which might be received by the network, based on this first filter
for subsampling in range(1, max_sub_psi2 + 1):
factor_subsampling = 2**subsampling
psi_f[subsampling] = periodize_filter_fourier(
psi_f[0], nperiods=factor_subsampling)
psi2_f.append(psi_f)
# for the 1st order filters, the input is not subsampled so we
# can only compute them with N_pad=2**J_support
for (n1, j1) in enumerate(j1s):
psi1_f.append({0: morlet_1d(
N_pad, xi1s[n1], sigma1s[n1], normalize=normalize1,
P_max=P_max, eps=eps)})
# compute the low-pass filters phi
# Determine the maximal subsampling for phi, which depends on the
# input it can accept (both 1st and 2nd order)
log2_T = math.floor(math.log2(T))
if max_subsampling is None:
max_subsampling_after_psi1 = max(j1s)
max_subsampling_after_psi2 = max(j2s)
max_sub_phi = min(max(max_subsampling_after_psi1,
max_subsampling_after_psi2), log2_T)
else:
max_sub_phi = max_subsampling
# compute the filters at all possible subsamplings
phi_f[0] = gauss_1d(N_pad, sigma_low, normalize=normalize1, P_max=P_max,
eps=eps)
for subsampling in range(1, max_sub_phi + 1):
factor_subsampling = 2**subsampling
# compute the low_pass filter
phi_f[subsampling] = periodize_filter_fourier(
phi_f[0], nperiods=factor_subsampling)
# Embed the meta information within the filters
ca = dict(criterion_amplitude=criterion_amplitude)
s0ca = dict(N=N, sigma0=sigma0, criterion_amplitude=criterion_amplitude)
for (n1, j1) in enumerate(j1s):
psi1_f[n1]['xi'] = xi1s[n1]
psi1_f[n1]['sigma'] = sigma1s[n1]
psi1_f[n1]['j'] = j1
psi1_f[n1]['is_cqt'] = is_cqt1s[n1]
psi1_f[n1]['width'] = {0: 2*compute_temporal_width(
psi1_f[n1][0], **s0ca)}
psi1_f[n1]['support'] = {0: 2*compute_temporal_support(
psi1_f[n1][0], **ca)}
for (n2, j2) in enumerate(j2s):
psi2_f[n2]['xi'] = xi2s[n2]
psi2_f[n2]['sigma'] = sigma2s[n2]
psi2_f[n2]['j'] = j2
psi2_f[n2]['is_cqt'] = is_cqt2s[n2]
psi2_f[n2]['width'] = {}
psi2_f[n2]['support'] = {}
for k in psi2_f[n2]:
if isinstance(k, int):
psi2_f[n2]['width'][k] = 2*compute_temporal_width(
psi2_f[n2][k], **s0ca)
psi2_f[n2]['support'][k] = 2*compute_temporal_support(
psi2_f[n2][k], **ca)
phi_f['xi'] = 0.
phi_f['sigma'] = sigma_low
phi_f['j'] = log2_T
phi_f['width'] = 2*compute_temporal_width(phi_f[0], **s0ca)
phi_f['support'] = 2*compute_temporal_support(phi_f[0], **ca)
# return results
return phi_f, psi1_f, psi2_f
#### Energy renormalization ##################################################
def energy_norm_filterbank_tm(psi1_f, psi2_f, phi_f, J, log2_T, normalize):
"""Energy-renormalize temporal filterbank; used by `base_frontend`.
See `help(wavespin.scattering1d.filter_bank.energy_norm_filterbank)`.
"""
# in case of `trim_tm` for JTFS
if phi_f is not None:
phi = phi_f[0][0] if isinstance(phi_f[0], list) else phi_f[0]
else:
phi = None
kw = dict(phi_f=phi, log2_T=log2_T, passes=3)
# first order
if 'energy' in normalize[0]:
psi1_f0 = [p[0] for p in psi1_f]
energy_norm_filterbank(psi1_f0, J=J[0], **kw)
# second order
if 'energy' in normalize[1]:
psi2_f0 = [p[0] for p in psi2_f]
scaling_factors2 = energy_norm_filterbank(psi2_f0, J=J[1], **kw)
# apply unsubsampled scaling factors on subsampled
for n2 in range(len(psi2_f)):
for k in psi2_f[n2]:
if isinstance(k, int) and k != 0:
psi2_f[n2][k] *= scaling_factors2[0][n2]
def energy_norm_filterbank_fr(psi1_f_fr_up, psi1_f_fr_dn, phi_f_fr,
J_fr, log2_F, sampling_psi_fr):
"""Energy-renormalize frequential filterbank; used by `base_frontend`.
See `help(wavespin.scattering1d.filter_bank.energy_norm_filterbank)`.
"""
psi_id_max = max(psi_id for psi_id in psi1_f_fr_up
if isinstance(psi_id, int))
psi_id_break = None
for psi_id in range(psi_id_max + 1):
psi_fs_up = psi1_f_fr_up[psi_id]
psi_fs_dn = psi1_f_fr_dn[psi_id]
if len(psi_fs_up) <= 3: # possible with `sampling_psi_fr = 'exclude'`
if psi_id == 0:
raise Exception("largest scale filterbank must have >=4 filters")
psi_id_break = psi_id
break
phi_f = None # not worth the hassle to account for
passes = 10 # can afford to do more without performance hit
is_recalibrate = bool(sampling_psi_fr == 'recalibrate')
scaling_factors = energy_norm_filterbank(psi_fs_up, psi_fs_dn, phi_f,
J_fr, log2_F,
is_recalibrate=is_recalibrate,
passes=passes)
# we stopped normalizing when there were <= 3 filters ('exclude'),
# but must still normalize the rest, so reuse factors from when we last had >3
if psi_id_break is not None:
for psi_id in range(psi_id_break, psi_id_max + 1):
if psi_id not in psi1_f_fr_dn:
continue
for n1_fr in range(len(psi1_f_fr_dn[psi_id])):
psi1_f_fr_up[psi_id][n1_fr] *= scaling_factors[0][n1_fr]
psi1_f_fr_dn[psi_id][n1_fr] *= scaling_factors[1][n1_fr]
def energy_norm_filterbank(psi_fs0, psi_fs1=None, phi_f=None, J=None, log2_T=None,
is_recalibrate=False, warn=False, passes=3,
scaling_factors=None):
"""Rescale wavelets such that their frequency-domain energy sum
(Littlewood-Paley sum) peaks at 2 for an analytic-only filterbank
(e.g. time scattering for real inputs) or 1 for analytic + anti-analytic.
This makes the filterbank energy non-expansive.
Parameters
----------
psi_fs0 : list[np.ndarray]
Analytic filters (also spin up for frequential).
psi_fs1 : list[np.ndarray] / None
Anti-analytic filters (spin down). If None, filterbank is treated as
analytic-only, and LP peaks are scaled to 2 instead of 1.
phi_f : np.ndarray / None
Lowpass filter. If `log2_T < J`, will exclude from computation as
it will excessively attenuate low frequency bandpasses.
J, log2_T : int, int
See `phi_f`. For JTFS frequential scattering these are `J_fr, log2_F`.
is_recalibrate | |
<reponame>AvinashSingh786/W2RC
import tkinter
from tkinter import messagebox
import tkinter.filedialog as fd
import signal
import subprocess
import hashlib
import os
import sys
import ctypes
import winreg
import psutil
import datetime
import pickle
import logging
import datetime as dt
import json
import requests
import getpass
import socket
import threading
import time
from tkinter import *
from tkinter import ttk
from memory_profiler import profile
# Winlogbear
# CMD = r"C:\Windows\System32\cmd.exe"
# FOD_HELPER = r'C:\Windows\System32\fodhelper.exe'
# PYTHON_CMD = "F:\DigiForS\\venv\Scripts\python.exe"
# REG_PATH = 'Software\Classes\ms-settings\shell\open\command'
# DELEGATE_EXEC_REG_KEY = 'DelegateExecute'
CWD = os.getcwd()
LOCAL_FAILED = []
WHITELIST_DB_PATH = CWD + r'\db\WHITELIST.db'
WHITELIST_DB = []
SEEN_DB_PATH = CWD + r'\db\SEEN.db'
SEEN_DB = []
FAILED_DB_PATH = CWD + r'\db\FAILED.db'
FAILED_DB = []
BLACKLIST_DB_PATH = CWD + r'\db\BLACKLIST.db'
BLACKLIST_DB = []
HEADERS = {"Authorization": "Bearer oMACdSqsxpjHx55H1ukQ8e"}
MONITOR = True
# IP = "http://192.168.1.127:8090"
IP = "https://digifors.cs.up.ac.za/api"
IPS = "https://digifors.cs.up.ac.za/storage"
API_KEY = "<KEY>"
API_SECRET = "TTNu3tfFEdpd"
CONFIG = {"user": getpass.getuser(), "longuser": getpass.getuser() + " ("+socket.gethostname() + ")", 'machine': socket.gethostname(),
"ip": socket.gethostbyname_ex(socket.gethostname())[2][-1]}
# if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)),
# s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET,
# socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]}
def is_running_as_admin():
'''
Checks if the script is running with administrative privileges.
Returns True if is running as admin, False otherwise.
'''
return True
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if not is_running_as_admin():
messagebox.showerror('W2RC', 'Please run as administrator')
sys.exit(1)
OWNER_ID = getpass.getuser()
date = dt.datetime.now()
logfile = str(date)[0:10]
# logging.basicConfig(filename=CWD+'log\\' + logfile + '.log', level=logging.NOTSET)
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
fh = logging.FileHandler('log\\' + logfile + '.log')
fh.setLevel(logging.NOTSET)
formatter = logging.Formatter('%(asctime)s - %(ip)s - %(longuser)-8s [%(levelname)s] --> %(message)s',
datefmt='%d/%m/%Y %I:%M:%S')
handler.setFormatter(formatter)
fh.setFormatter(formatter)
log.addHandler(handler)
log.addHandler(fh)
log.setLevel(logging.INFO)
log = logging.LoggerAdapter(log, CONFIG)
log.info('W2RC Started', extra=CONFIG)
w2rc = ""
rlock = threading.RLock()
def welcome():
global w2rc
w2rc = "\n\n=======================================================\n" + \
"\t ██╗ ██╗██████╗ ██████╗ ██████╗\n" + \
"\t ██║ ██║╚════██╗██╔══██╗██╔════╝\n" + \
"\t ██║ █╗ ██║ █████╔╝██████╔╝██║\n" + \
"\t ██║███╗██║██╔═══╝ ██╔══██╗██║\n" + \
"\t ╚███╔███╔╝███████╗██║ ██║╚██████╗\n" + \
"\t ╚══╝╚══╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝\n" + \
"\n\t Windows Registry and RAM Collector\n\t\t\t\t -BY AVINASH SINGH" + \
"\n=======================================================\n"
print(w2rc)
print("\nMonitor now running press <CTRL> C twice to stop monitoring safely.\n\n")
# log.error('[RegSmart] An error occurred', exc_info=True, extra=CONFIG)
def is_running_as_admin():
'''
Checks if the script is running with administrative privileges.
Returns True if is running as admin, False otherwise.
'''
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def gen_safe_db():
procList = psutil.pids()
failed = 0
global WHITELIST_DB
WHITELIST_DB = []
for ps in procList:
try:
p = psutil.Process(ps)
entry = {'pid': ps, 'name': p.name(), 'md5': md5(p.exe()), 'time': str(datetime.datetime.now()),
'exe': p.exe(), 'CAT': "N/A"}
WHITELIST_DB.append(entry)
except Exception as e:
failed += 1
with open(WHITELIST_DB_PATH, 'wb') as f:
pickle.dump(WHITELIST_DB, f, pickle.HIGHEST_PROTOCOL)
log.info("Successfully created whitelist database", extra=CONFIG)
log.info(str(len(WHITELIST_DB)) + " entries were added successfully.", extra=CONFIG)
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def load_safe_db():
global WHITELIST_DB
global FAILED_DB
global SEEN_DB
global BLACKLIST_DB
global HASHLIST
global CWD
dblist = ["WHITELIST", "FAILED", "BLACKLIST", "SEEN"]
if not os.path.exists(WHITELIST_DB_PATH):
os.makedirs(CWD + 'db')
if os.path.exists(WHITELIST_DB_PATH) and os.path.getsize(WHITELIST_DB_PATH) > 0:
HASHLIST["WHITELIST"].set(md5(WHITELIST_DB_PATH))
with open(WHITELIST_DB_PATH, 'rb') as f:
WHITELIST_DB = pickle.load(f)
else:
with open(WHITELIST_DB_PATH, 'wb') as f:
pickle.dump(WHITELIST_DB, f, pickle.HIGHEST_PROTOCOL)
HASHLIST["WHITELIST"].set(md5(WHITELIST_DB_PATH))
if os.path.exists(FAILED_DB_PATH):
HASHLIST["FAILED"].set(md5(FAILED_DB_PATH))
with open(FAILED_DB_PATH, 'rb') as f:
FAILED_DB = pickle.load(f)
else:
with open(FAILED_DB_PATH, 'wb') as f:
pickle.dump(FAILED_DB, f, pickle.HIGHEST_PROTOCOL)
HASHLIST["FAILED"].set(md5(FAILED_DB_PATH))
if os.path.exists(BLACKLIST_DB_PATH):
HASHLIST["BLACKLIST"].set(md5(BLACKLIST_DB_PATH))
with open(BLACKLIST_DB_PATH, 'rb') as f:
BLACKLIST_DB = pickle.load(f)
else:
with open(BLACKLIST_DB_PATH, 'wb') as f:
pickle.dump(BLACKLIST_DB, f, pickle.HIGHEST_PROTOCOL)
HASHLIST["BLACKLIST"].set(md5(BLACKLIST_DB_PATH))
if os.path.exists(SEEN_DB_PATH):
HASHLIST["SEEN"].set(md5(SEEN_DB_PATH))
with open(SEEN_DB_PATH, 'rb') as f:
SEEN_DB = pickle.load(f)
else:
with open(SEEN_DB_PATH, 'wb') as f:
pickle.dump(SEEN_DB, f, pickle.HIGHEST_PROTOCOL)
HASHLIST["SEEN"].set(md5(SEEN_DB_PATH))
print("Successfully loaded WHITELIST_DB database (" + WHITELIST_DB_PATH + ") with (" + str(
len(WHITELIST_DB)) + ") entries and MD5 hash ["+HASHLIST["WHITELIST"].get()+"].")
print("Successfully loaded FAILED_DB database (" + FAILED_DB_PATH + ") with (" + str(
len(FAILED_DB)) + ") entries and MD5 hash ["+HASHLIST["FAILED"].get()+"].")
print("Successfully loaded BLACKLIST_DB database (" + BLACKLIST_DB_PATH + ") with (" + str(
len(BLACKLIST_DB)) + ") entries and MD5 hash ["+HASHLIST["BLACKLIST"].get()+"].")
print("Successfully loaded SEEN_DB database (" + SEEN_DB_PATH + ") with (" + str(
len(SEEN_DB)) + ") entries and MD5 hash ["+HASHLIST["SEEN"].get()+"].")
log.debug(dblist, extra=CONFIG)
log.debug(HASHLIST, extra=CONFIG)
log.info("Successfully loaded databases", extra=CONFIG)
def refresh_db():
load_safe_db()
update_state()
messagebox.showinfo('W2RC', 'Successfully reloaded all databases.')
def update_state():
global BLACKLIST_DB, SEEN_DB, FAILED_DB, HASHLIST
with open(BLACKLIST_DB_PATH, 'wb') as f:
pickle.dump(BLACKLIST_DB, f, pickle.HIGHEST_PROTOCOL)
with open(SEEN_DB_PATH, 'wb') as f:
pickle.dump(SEEN_DB, f, pickle.HIGHEST_PROTOCOL)
with open(FAILED_DB_PATH, 'wb') as f:
pickle.dump(FAILED_DB, f, pickle.HIGHEST_PROTOCOL)
with open(WHITELIST_DB_PATH, 'wb') as f:
pickle.dump(WHITELIST_DB, f, pickle.HIGHEST_PROTOCOL)
for c in failed_tv.get_children():
failed_tv.delete(c)
i = 0
for d in FAILED_DB:
i += 1
failed_tv.insert('', 'end', i, text=i, values=(d["name"], d["time"]),
tags=('failed', 'simple'))
for c in whitelist_tv.get_children():
whitelist_tv.delete(c)
i = 0
for d in WHITELIST_DB:
i += 1
whitelist_tv.insert('', 'end', i, text=i, values=(d["exe"], d["time"], d["md5"]),
tags=('success', 'simple'))
for c in seen_tv.get_children():
seen_tv.delete(c)
i = 0
for d in SEEN_DB:
i += 1
seen_tv.insert('', 'end', i, text=i,
values=(d["exe"], d["CAT"] if "CAT" in d.keys() else "N/A", d["time"], d["md5"]),
tags=('success', 'simple'))
for c in blacklist_tv.get_children():
blacklist_tv.delete(c)
i = 0
for d in BLACKLIST_DB:
i += 1
blacklist_tv.insert('', 'end', i, text=i,
values=(d["exe"], d['CAT'] if 'CAT' in d.keys() else "N/A", d["time"], d["md5"]),
tags=('black', 'simple'))
HASHLIST["BLACKLIST"].set(md5(BLACKLIST_DB_PATH))
HASHLIST["WHITELIST"].set(md5(WHITELIST_DB_PATH))
HASHLIST["SEEN"].set(md5(SEEN_DB_PATH))
HASHLIST["FAILED"].set(md5(FAILED_DB_PATH))
mon_tv.yview_moveto(1)
blacklist_tv.yview_moveto(1)
whitelist_tv.yview_moveto(1)
seen_tv.yview_moveto(1)
failed_tv.yview_moveto(1)
def monitor():
failed = 0
path_list = []
global WHITELIST_DB
global FAILED_DB
global SEEN_DB
global BLACKLIST_DB, MONITOR
log.info("Monitoring has been started: " + IP, extra=CONFIG)
iter = 0
while MONITOR:
# log.info("Entering new iteration "+ str(iter), extra=CONFIG)
iter += 1
for p in psutil.process_iter():
if p.pid != os.getpid():
try:
entry = {'pid': p.pid, 'name': p.name(), 'md5': "", 'time': str(datetime.datetime.now()),
'exe': "", 'CAT': "N/A"}
try:
# log.info('W2RC', 'Quickly computing md5 for {}'.format(entry['name']), extra=CONFIG)
p.suspend()
# if "sublime" in p.exe():
# log.debug(p.name() + " suspend...", extra=CONFIG)
entry['md5'] = ""
# entry['md5'] = md5(p.exe()) // TODO
# entry['exe'] = p.exe()
except Exception:
p.resume()
# if "sublime" in p.name():
# log.debug(p.name() + " resumed Exception ...", extra=CONFIG)
pass
if any(d['md5'] == entry['md5'] for d in BLACKLIST_DB):
if any(d['name'] == entry['name'] for d in BLACKLIST_DB):
log.warning("DANGER !!!!!! " + p.name() +
" has been blacklisted and the process has been killed.", extra=CONFIG)
p.suspend()
p.kill()
messagebox.showerror('W2RC ALERT', 'Suspicious executable ' + entry['name'] +
' that is blacklisted has been detected and killed.')
break
p.resume()
# if "sublime" in p.exe():
# log.info("Found Sublime after blacklist", extra=CONFIG)
if not any(d['name'] == p.name() for d in FAILED_DB):
entry['md5'] = ""
entry['exe'] = p.exe()
if not any(d['name'] == entry['name'] for d in FAILED_DB) or not \
any(d['exe'] == entry['exe'] for d in FAILED_DB):
if not any(d['exe'] == entry['exe'] for d in SEEN_DB):
if not any(d['exe'] == entry['exe'] for d in LOCAL_FAILED):
if not any(d['name'] == entry['name'] for d in WHITELIST_DB) or not \
any(d['exe'] == entry['exe'] for d in WHITELIST_DB):
if not MONITOR:
return
log.debug(entry, extra=CONFIG)
log.debug(p.cmdline(), extra=CONFIG)
if "sublime" in p.exe():
log.info("Found Sublime after all searches", extra=CONFIG)
data = {}
cmdline = p.cmdline()
log.info(cmdline, extra=CONFIG)
files = []
for c in cmdline:
if "\\" in c:
try:
files.append(("files", open(c, "rb")))
except Exception:
continue
if len(files) == 0:
try:
files.append(("files", open(os.path.join("C:/Windows/System32/", entry['exe']), "rb")))
except Exception:
continue
data['files'] = files
data['args'] = cmdline[1:]
print(files)
# if p.name() == "WINWORD.EXE" or p.name() == "sublime_text.exe":
path_list.append(p.exe())
entry = {'pid': p.pid, 'name': p.name(), 'md5': md5(p.exe()),
'time': str(datetime.datetime.now()),
'exe': p.exe(), 'CAT': "N/A"}
with rlock:
log.info("Sending to cuckoo", extra=CONFIG)
SEEN_DB.append(entry)
mon_tv.insert('', 'end', entry['pid'], text=entry['pid'], values=(entry['name'], entry['time'], "N/A", "Submitting"),
tags=('submitted', 'simple'))
update_state()
if "sublime" in p.exe():
log.info("Found Sublime and now sending to cuckoo", extra=CONFIG)
cuckoo = threading.Thread(target=send_cuckoo, args=(p, data, entry,))
cuckoo.daemon = True
cuckoo.start()
except Exception as e:
try:
if not any(d['name'] == p.name() for d in FAILED_DB):
FAILED_DB.append({'pid': p.pid, 'name': p.name(), 'time': str(datetime.datetime.now())})
failed += 1
log.info("Trying to add entry to failed list, " + p.name(), extra=CONFIG)
# failed_tv.insert('', 'end', i, text=i, values=(p.name(), str(datetime.datetime.now())),
# tags=('failed', 'simple'))
update_state()
print(e)
except Exception as e:
log.error("Error at Monitor", exc_info=True, extra=CONFIG)
continue
def check_online():
global IP, IPS, MONITOR, t, API_KEY, API_SECRET
MONITOR = False
IP = ip.get()
IPS = ips.get()
API_KEY = key.get()
API_SECRET = secret.get()
print(IP)
print(IPS)
if "http" not in IP:
IP = "http://"+IP
if "http" not in IPS:
IPS = "https://"+IPS
log.info('Checking if ' + IP + " analysis machine is online.", extra=CONFIG)
messagebox.showinfo('W2RC', 'Trying to see if ' + IP + " analysis and storage machines are online. This should take a few seconds.")
try:
s = None
r = requests.get(IP + "/cuckoo/status", stream=True, | |
<reponame>LuisPalacios/iptv2hts
# Stardard tools
import struct
import re
import sys
import os
import itertools
import logging
import json
# Networking
import socket
from errno import EAGAIN
# Time handling
import time
import pytz
import datetime
from datetime import timedelta
# XML
import urllib
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import pprint
import binascii
from pprint import pprint
module_logger = logging.getLogger('movistarxmltv.tva')
class TvaStream(object):
def __init__(self,mcast_grp,mcast_port):
self.mcast_grp = mcast_grp
self.mcast_port = mcast_port
self._files = {}
self.logger = logging.getLogger('movistarxmltv.tva.TvaStream')
def files(self):
return self._files
def _getchunk(self,socket):
#Struct of header - first 12 bytes
# end xmlsize type ? id chunk# *10 total chunks \0
# -- -------- ----- ------ ---- --------- ------------- --
# 00 00 00 00 F1 X 0 00 00 00 00 00 00
#FIXME: XMLsize print is incorrect
data = socket.recv(1500)
chunk = {}
chunk["end"] = struct.unpack('B',data[:1])[0]
chunk["size"] = struct.unpack('>HB',data[1:4])[0]
chunk["filetype"] = struct.unpack('B',data[4:5])[0]
chunk["fileid"] = struct.unpack('>H',data[5:7])[0]&0x0fff
chunk["chunk_number"] = struct.unpack('>H',data[8:10])[0]/0x10
chunk["chunk_total"] = struct.unpack('B',data[10:11])[0]
chunk["data"] = data[12:]
self.logger.debug("Chunk "+str(chunk["chunk_number"])+"/"+str(chunk["chunk_total"])+" ---- e:"+str(chunk["end"])+" s:"+ str(chunk["size"])+" f:"+str(chunk["fileid"]))
return chunk
def getfiles(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(3)
sock.bind((self.mcast_grp, self.mcast_port))
mreq = struct.pack("=4sl", socket.inet_aton(self.mcast_grp), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
loop = True
chunk = {}
chunk["end"] = 0
N = 335
#Wait for an end chunk to start by the beginning
while not (chunk["end"]):
chunk = self._getchunk(sock)
firstfile = str(chunk["filetype"])+"_"+str(chunk["fileid"])
#Loop until firstfile
while (loop):
xmldata=""
chunk = self._getchunk(sock)
#Discard headers
body=chunk["data"]
while not (chunk["end"]):
xmldata+=body
chunk = self._getchunk(sock)
body=chunk["data"]
#Discard last 4bytes binary footer?
xmldata+=body[:-4]
self._files[str(chunk["filetype"])+"_"+str(chunk["fileid"])]=xmldata
N = N - 1
if (str(chunk["filetype"])+"_"+str(chunk["fileid"]) == firstfile or N == 0):
loop = False
sock.close()
class TvaParser(object):
ENCODING_EPG = "utf-8"
def __init__(self,xmldata):
self.xmldata = xmldata
self.logger = logging.getLogger('movistarxmltv.tva.TvaParser')
def get_mcast_demarcationip(self,dem_code):
regexp = re.compile("DEM_" + str(dem_code) + "\..*?Address\=\\\"(.*?)\\\".*?",re.DOTALL)
return regexp.findall(self.xmldata)[0]
def channellist(self,clist):
root = ET.fromstring(self.xmldata)
services = root[0][0].findall("{urn:dvb:ipisdns:2006}SingleService")
for i in services:
channelid = i[1].attrib["ServiceName"]
clist[channelid] = {}
#clist[channelid]["logo"] = i[1].attrib["logoURI"]
url = "http://172.26.22.23:2001/appclient/incoming/epg/MAY_1/imSer/"+channelid+".jpg"
clist[channelid]["logo"] = url
clist[channelid]["address"] = i[0][0].attrib["Address"]
clist[channelid]["port"] = i[0][0].attrib["Port"]
clist[channelid]["name"] = i[2][0].text
clist[channelid]["shortname"] = i[2][1].text
clist[channelid]["desc"] = i[2][2].text
clist[channelid]["tags"] = i[2][3][0].text.split("/")
return clist
def getpackages(self):
root = ET.fromstring(self.xmldata)
packages = root[0].findall("{urn:dvb:ipisdns:2006}Package")
packageslist = {}
for p in packages:
services = p.findall("{urn:dvb:ipisdns:2006}Service")
package = p[0].text
packageslist[package] = {}
for s in services:
channelid = s[0].attrib["ServiceName"]
packageslist[package][channelid] = {}
packageslist[package][channelid]["order"] = s[1].text
return packageslist
def channels2xmltv(self,xmltv,clist):
for channelid in clist.keys():
channelName = clist[channelid]["name"]
channelId = channelid
channelKey = clist[channelid]["shortname"]
channelIp = clist[channelid]["address"]
channelPort = str(clist[channelid]["port"])
channelLogo = clist[channelid]["logo"]
cChannel = SubElement(xmltv,'channel',{"id": channelKey })
cName = SubElement(cChannel, "display-name", {"lang":"es"})
cicon = SubElement(cChannel, "icon", {"src": channelLogo })
cName.text = channelName
return xmltv
def channels2m3u(self,clist):
m3ucontent = "#EXTM3U\n"
for channelid in sorted(clist, key=lambda key: int(clist[key]["order"])):
channelName = clist[channelid]["name"]
channelId = channelid
channelKey = clist[channelid]["shortname"]
channelIp = clist[channelid]["address"]
channelPort = str(clist[channelid]["port"])
channelTags = clist[channelid]["tags"]
try:
channelOrder = clist[channelid]["order"]
except:
channelOrder = "99999"
channelLogo = clist[channelid]["logo"]
m3ucontent += "#EXTINF:-1," + channelOrder + ' - ' + channelName + '\n'
m3ucontent += "#EXTTV:"+','.join(channelTags)+";es;"+channelKey+";"+channelLogo+'\n'
m3ucontent += "rtp://@" + channelIp + ":" + channelPort + '\n'
return m3ucontent
def channels2m3usimple(self,clist):
m3ucontent = "#EXTM3U\n"
for channelid in sorted(clist, key=lambda key: int(clist[key]["order"])):
channelName = clist[channelid]["name"]
channelId = channelid
channelKey = clist[channelid]["shortname"]
channelIp = clist[channelid]["address"]
channelPort = str(clist[channelid]["port"])
channelTags = clist[channelid]["tags"]
try:
channelOrder = clist[channelid]["order"]
except:
channelOrder = "99999"
channelLogo = clist[channelid]["logo"]
m3ucontent += "#EXTINF:-1 tvg-id=\""+channelKey+"\" tvg-logo=\""+channelid+".jpg\", "+ channelName + '\n'
m3ucontent += "rtp://@" + channelIp + ":" + channelPort + '\n'
return m3ucontent
def parseepg(self,xmltv,clist):
try:
root = ET.fromstring(self.xmldata)
except ET.ParseError, v:
row, column = v.position
self.logger.error("Error parsing xml, skipping...")
self.logger.error(str(ET.ParseError))
self.logger.error("error on row" + str(row) + "column" + str(column) + ":" + str(v))
return
#root = tree.getroot()
if root[0][0][0].get('serviceIDRef') is not None:
channelid = root[0][0][0].get('serviceIDRef')
else:
self.logger.info("No serviceIDRef found")
return None
for child in root[0][0][0]:
programmeId = None
if child[0].get('crid') is not None:
programmeId = child[0].get('crid').split('/')[5] # id for description
if child[1][1][0] is not None:
genre = child[1][1][0].text #.encode(ENCODING_EPG).replace('\n', ' ') # Genre
else:
year = None
# 20030702000000 XMLTV format
# YYYYMMddHHmmss
# 2014-09-21T22:24:00.000Z IPTV multicast format
# YYYY-MM-ddTHH:mm:ss.000Z
# start and stop are mandatory, so we set a future date so we can at least find the programme
startTimePy = datetime.datetime.now() + timedelta(weeks=10)
stopTimePy = startTimePy + timedelta(minutes=1)
if child[2].text is not None:
startTimeXml = child[2].text.replace('\n', ' ') # Start time
startTimePy = datetime.datetime.strptime(startTimeXml,'%Y-%m-%dT%H:%M:%S.%fZ')
startTime = startTimePy.strftime('%Y%m%d%H%M%S') + ' +0000'
durationXml = child[3].text.replace('\n', ' ').replace('PT','') # Duration
if durationXml.find('H') > 0 and durationXml.find('M') > 0:
durationPy = datetime.datetime.strptime(durationXml,'%HH%MM')
elif durationXml.find('H') > 0 and durationXml.find('M') < 0:
durationPy = datetime.datetime.strptime(durationXml,'%HH')
elif durationXml.find('H') < 0 and durationXml.find('M') > 0:
durationPy = datetime.datetime.strptime(durationXml,'%MM')
else:
durationPy = None
if durationPy is not None:
durationPy = 60 * int(durationPy.strftime('%H')) + int(durationPy.strftime('%M'))
duration = str(durationPy)
stopTimePy = startTimePy + timedelta(minutes=durationPy)
stopTime = stopTimePy.strftime('%Y%m%d%H%M%S') + ' +0000' # Stop time
try:
url ='http://www-60.svc.imagenio.telefonica.net:2001/appserver/mvtv.do?action=getEpgInfo&extInfoID='+ programmeId +'&tvWholesaler=1'
strProgramme = urllib.urlopen(url).read().replace('\n',' ')
jsonProgramme = json.loads(strProgramme)['resultData']
except:
jsonProgramme = {}
self.logger.error("Download program info failed")
# Genre can be also got from the extra information
# s = strProgramme[:]
# genre = s.split('"genre":"')[1].split('","')[0] # Genre
year = jsonProgramme.get("productionDate")
s = strProgramme[:]
fullTitle = child[1][0].text.replace('\n', ' ').replace('Cine: ', '')
s = fullTitle[:]
m = re.search(r"(.*?) T(\d+) Cap. (\d+) - (.+)", s)
n = re.search(r"(.*?) T(\d+) Cap. (\d+)", s)
p = re.search(r"(.*?): (.*?)", s)
title = None
episodeShort = None
extra = ""
if m:
try:
season = int(m.group(2)) # season
episode = int(m.group(3)) # episode
episodeTitle = m.group(4)
if episode < 10:
episode = "0"+str(episode)
if season < 10:
season = "0"+str(season)
episodeShort = "S"+str(season)+"E"+str(episode)
extra = episodeShort +" "+episodeTitle
except ValueError:
self.logger.error("m: Error getting episode in: " + fullTitle)
title = m.group(1) # title
elif n:
try:
season = int(n.group(2)) # season
episode = int(n.group(3)) # episode
if episode < 10:
episode = "0"+str(episode)
if season < 10:
season = "0"+str(season)
episodeShort = "S"+str(season)+"E"+str(episode)
extra = episodeShort
except ValueError:
self.logger.error("n: Error getting episode in: " + fullTitle)
title = n.group(1) # title
elif s.find(': Episodio ') > 0 :
try:
episode = re.findall(r'[0-9]+', s)[0] # Episode
season = 0
except ValueError:
self.logger.error("Error getting episode in: " + fullTitle)
title = s.split(': Episodio ')[0] # Title
elif p:
self.logger.info("Grabbing episode in: " + fullTitle)
try:
title = p.group(1) # title
episodeTitle = p.group(2)
episode = None
season = None
except ValueError:
self.logger.error("n: Error getting episode in: " + fullTitle)
else:
episode = None
season = None
title = fullTitle[:]
title = title.replace('\n',' ').encode(TvaParser.ENCODING_EPG)
description = jsonProgramme.get("description")
subgenre = jsonProgramme.get("subgenre")
originalTitle = jsonProgramme.get("OriginalTitle")
#if jsonProgramme.get("longTitle") is not None:
# title = jsonProgramme.get("longTitle")[0]
mainActors = jsonProgramme.get("mainActors")
############################################################################
# Creating XMLTV with XML libraries instead XMLTV to avoid encoding issues #
############################################################################
cid = channelid.replace(".imagenio.es","")
if cid in clist.keys():
channelKey = clist[cid]["shortname"]
else:
channelKey = cid
# cProgramme = SubElement(OBJ_XMLTV,'programme', {"start":startTime+" +0200", "stop": stopTime+" +0200", "channel": channelKey })
cProgramme = SubElement(xmltv,'programme', {"start":startTime, "stop": stopTime, "channel": channelKey })
cTitle = SubElement(cProgramme, "title", {"lang":"es"})
cTitle.text = title
category = None
if subgenre is not None:
category = subgenre
elif genre is None:
category = genre
if len(extra) > 2:
extra = extra + " | "
if category is not None and year is not None and originalTitle is not None:
extra = extra + category+" | "+year[0]+" | "+originalTitle
elif category is not None and year is None and originalTitle is None:
extra = extra + category
elif category is not None and year is not None and originalTitle is None:
extra = extra + category+" | "+year[0]
if extra is not None:
cDesc = SubElement(cProgramme, "sub-title", {"lang":"es"})
cDesc.text = extra
if description is not None:
cDesc = SubElement(cProgramme, "desc", {"lang":"es"})
cDesc.text = description
cCredits = SubElement(cProgramme, "credits")
if mainActors is not None:
for i in mainActors[0].split(","):
cActors = SubElement(cCredits, "actor")
cActors.text = i
if year is not None:
cDate = SubElement(cProgramme, "date")
cDate.text = year[0]
json_data=open(os.path.split(__file__)[0]+"/categories.json").read()
categories_map = json.loads(json_data)
if categories_map.get(category) is not None:
category = categories_map.get(category)
cCategory = SubElement(cProgramme, "category", {"lang":"es"})
cCategory.text = category
if len(duration) > 0:
cDuration = SubElement(cProgramme, "length", {"units":"minutes"})
cDuration.text = duration
if episode is | |
cluster.
:rtype: dict
:returns:
"""
pass
def batch_modify_cluster_snapshots(self, SnapshotIdentifierList: List, ManualSnapshotRetentionPeriod: int = None, Force: bool = None) -> Dict:
"""
Modifies the settings for a list of snapshots.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/BatchModifyClusterSnapshots>`_
**Request Syntax**
::
response = client.batch_modify_cluster_snapshots(
SnapshotIdentifierList=[
'string',
],
ManualSnapshotRetentionPeriod=123,
Force=True|False
)
**Response Syntax**
::
{
'Resources': [
'string',
],
'Errors': [
{
'SnapshotIdentifier': 'string',
'SnapshotClusterIdentifier': 'string',
'FailureCode': 'string',
'FailureReason': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **Resources** *(list) --*
A list of the snapshots that were modified.
- *(string) --*
- **Errors** *(list) --*
A list of any errors returned.
- *(dict) --*
Describes the errors returned by a snapshot.
- **SnapshotIdentifier** *(string) --*
A unique identifier for the snapshot returning the error.
- **SnapshotClusterIdentifier** *(string) --*
A unique identifier for the cluster.
- **FailureCode** *(string) --*
The failure code for the error.
- **FailureReason** *(string) --*
The text message describing the error.
:type SnapshotIdentifierList: list
:param SnapshotIdentifierList: **[REQUIRED]**
A list of snapshot identifiers you want to modify.
- *(string) --*
:type ManualSnapshotRetentionPeriod: integer
:param ManualSnapshotRetentionPeriod:
The number of days that a manual snapshot is retained. If you specify the value -1, the manual snapshot is retained indefinitely.
The number must be either -1 or an integer between 1 and 3,653.
If you decrease the manual snapshot retention period from its current value, existing manual snapshots that fall outside of the new retention period will return an error. If you want to suppress the errors and delete the snapshots, use the force option.
:type Force: boolean
:param Force:
A boolean value indicating whether to override an exception if the retention period has passed.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def cancel_resize(self, ClusterIdentifier: str) -> Dict:
"""
Cancels a resize operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CancelResize>`_
**Request Syntax**
::
response = client.cancel_resize(
ClusterIdentifier='string'
)
**Response Syntax**
::
{
'TargetNodeType': 'string',
'TargetNumberOfNodes': 123,
'TargetClusterType': 'string',
'Status': 'string',
'ImportTablesCompleted': [
'string',
],
'ImportTablesInProgress': [
'string',
],
'ImportTablesNotStarted': [
'string',
],
'AvgResizeRateInMegaBytesPerSecond': 123.0,
'TotalResizeDataInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123,
'ResizeType': 'string',
'Message': 'string',
'TargetEncryptionType': 'string',
'DataTransferProgressPercent': 123.0
}
**Response Structure**
- *(dict) --*
Describes the result of a cluster resize operation.
- **TargetNodeType** *(string) --*
The node type that the cluster will have after the resize operation is complete.
- **TargetNumberOfNodes** *(integer) --*
The number of nodes that the cluster will have after the resize operation is complete.
- **TargetClusterType** *(string) --*
The cluster type after the resize operation is complete.
Valid Values: ``multi-node`` | ``single-node``
- **Status** *(string) --*
The status of the resize operation.
Valid Values: ``NONE`` | ``IN_PROGRESS`` | ``FAILED`` | ``SUCCEEDED`` | ``CANCELLING``
- **ImportTablesCompleted** *(list) --*
The names of tables that have been completely imported .
Valid Values: List of table names.
- *(string) --*
- **ImportTablesInProgress** *(list) --*
The names of tables that are being currently imported.
Valid Values: List of table names.
- *(string) --*
- **ImportTablesNotStarted** *(list) --*
The names of tables that have not been yet imported.
Valid Values: List of table names
- *(string) --*
- **AvgResizeRateInMegaBytesPerSecond** *(float) --*
The average rate of the resize operation over the last few minutes, measured in megabytes per second. After the resize operation completes, this value shows the average rate of the entire resize operation.
- **TotalResizeDataInMegaBytes** *(integer) --*
The estimated total amount of data, in megabytes, on the cluster before the resize operation began.
- **ProgressInMegaBytes** *(integer) --*
While the resize operation is in progress, this value shows the current amount of data, in megabytes, that has been processed so far. When the resize operation is complete, this value shows the total amount of data, in megabytes, on the cluster, which may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data before resize).
- **ElapsedTimeInSeconds** *(integer) --*
The amount of seconds that have elapsed since the resize operation began. After the resize operation completes, this value shows the total actual time, in seconds, for the resize operation.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
The estimated time remaining, in seconds, until the resize operation is complete. This value is calculated based on the average resize rate and the estimated amount of data remaining to be processed. Once the resize operation is complete, this value will be 0.
- **ResizeType** *(string) --*
An enum with possible values of ``ClassicResize`` and ``ElasticResize`` . These values describe the type of resize operation being performed.
- **Message** *(string) --*
An optional string to provide additional details about the resize action.
- **TargetEncryptionType** *(string) --*
The type of encryption for the cluster after the resize is complete.
Possible values are ``KMS`` and ``None`` . In the China region possible values are: ``Legacy`` and ``None`` .
- **DataTransferProgressPercent** *(float) --*
The percent of data transferred from source cluster to target cluster.
:type ClusterIdentifier: string
:param ClusterIdentifier: **[REQUIRED]**
The unique identifier for the cluster that you want to cancel a resize operation for.
:rtype: dict
:returns:
"""
pass
def copy_cluster_snapshot(self, SourceSnapshotIdentifier: str, TargetSnapshotIdentifier: str, SourceSnapshotClusterIdentifier: str = None, ManualSnapshotRetentionPeriod: int = None) -> Dict:
"""
Copies the specified automated cluster snapshot to a new manual cluster snapshot. The source must be an automated snapshot and it must be in the available state.
When you delete a cluster, Amazon Redshift deletes any automated snapshots of the cluster. Also, when the retention period of the snapshot expires, Amazon Redshift automatically deletes it. If you want to keep an automated snapshot for a longer period, you can make a manual copy of the snapshot. Manual snapshots are retained until you delete them.
For more information about working with snapshots, go to `Amazon Redshift Snapshots <https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html>`__ in the *Amazon Redshift Cluster Management Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CopyClusterSnapshot>`_
**Request Syntax**
::
response = client.copy_cluster_snapshot(
SourceSnapshotIdentifier='string',
SourceSnapshotClusterIdentifier='string',
TargetSnapshotIdentifier='string',
ManualSnapshotRetentionPeriod=123
)
**Response Syntax**
::
{
'Snapshot': {
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False,
'MaintenanceTrackName': 'string',
'ManualSnapshotRetentionPeriod': 123,
'ManualSnapshotRemainingDays': 123,
'SnapshotRetentionStartTime': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
- **Snapshot** *(dict) --*
Describes a snapshot.
- **SnapshotIdentifier** *(string) --*
The snapshot identifier that is provided in the request.
- **ClusterIdentifier** *(string) --*
The identifier of the cluster for which the snapshot was taken.
- **SnapshotCreateTime** *(datetime) --*
The time (in UTC format) when Amazon Redshift began the snapshot. A snapshot contains a copy of the cluster data as of this exact time.
- **Status** *(string) --*
The snapshot status. The value of the status depends on the API operation used:
* CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
* DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or | |
import functools
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from estimagic import batch_evaluators as be
from estimagic.config import CRITERION_PENALTY_CONSTANT
from estimagic.config import CRITERION_PENALTY_SLOPE
from estimagic.logging.database_utilities import append_row
from estimagic.logging.database_utilities import load_database
from estimagic.logging.database_utilities import make_optimization_iteration_table
from estimagic.logging.database_utilities import make_optimization_problem_table
from estimagic.logging.database_utilities import make_steps_table
from estimagic.optimization.check_arguments import check_optimize_kwargs
from estimagic.optimization.get_algorithm import get_algorithm
from estimagic.optimization.internal_criterion_template import (
internal_criterion_and_derivative_template,
)
from estimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids
from estimagic.optimization.process_results import process_internal_optimizer_result
from estimagic.optimization.scaling import calculate_scaling_factor_and_offset
from estimagic.optimization.tiktak import get_internal_sampling_bounds
from estimagic.optimization.tiktak import run_multistart_optimization
from estimagic.optimization.tiktak import WEIGHT_FUNCTIONS
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.parameter_conversion import get_internal_bounds
from estimagic.parameters.parameter_conversion import get_reparametrize_functions
from estimagic.parameters.parameter_preprocessing import add_default_bounds_to_params
from estimagic.parameters.parameter_preprocessing import check_params_are_valid
from estimagic.parameters.process_constraints import process_constraints
from estimagic.utilities import hash_array
def maximize(
criterion,
params,
algorithm,
*,
criterion_kwargs=None,
constraints=None,
algo_options=None,
derivative=None,
derivative_kwargs=None,
criterion_and_derivative=None,
criterion_and_derivative_kwargs=None,
numdiff_options=None,
logging=False,
log_options=None,
error_handling="raise",
error_penalty=None,
cache_size=100,
scaling=False,
scaling_options=None,
multistart=False,
multistart_options=None,
):
"""Maximize criterion using algorithm subject to constraints.
Args:
criterion (callable): A function that takes a pandas DataFrame (see
:ref:`params`) as first argument and returns one of the following:
- scalar floating point or a :class:`numpy.ndarray` (depending on the
algorithm)
- a dictionary that contains at the entries "value" (a scalar float),
"contributions" or "root_contributions" (depending on the algortihm) and
any number of additional entries. The additional dict entries will be
logged and (if supported) displayed in the dashboard. Check the
documentation of your algorithm to see which entries or output type are
required.
params (pandas.DataFrame): A DataFrame with a column called "value" and optional
additional columns. See :ref:`params` for detail.
algorithm (str or callable): Specifies the optimization algorithm. For supported
algorithms this is a string with the name of the algorithm. Otherwise it can
be a callable with the estimagic algorithm interface. See :ref:`algorithms`.
criterion_kwargs (dict): Additional keyword arguments for criterion
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
algo_options (dict): Algorithm specific configuration of the optimization. See
:ref:`list_of_algorithms` for supported options of each algorithm.
derivative (callable, optional): Function that calculates the first derivative
of criterion. For most algorithm, this is the gradient of the scalar
output (or "value" entry of the dict). However some algorithms (e.g. bhhh)
require the jacobian of the "contributions" entry of the dict. You will get
an error if you provide the wrong type of derivative.
derivative_kwargs (dict): Additional keyword arguments for derivative.
criterion_and_derivative (callable): Function that returns criterion
and derivative as a tuple. This can be used to exploit synergies in the
evaluation of both functions. The fist element of the tuple has to be
exactly the same as the output of criterion. The second has to be exactly
the same as the output of derivative.
criterion_and_derivative_kwargs (dict): Additional keyword arguments for
criterion and derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives. See :ref:`first_derivative` for details. Note that the default
method is changed to "forward" for speed reasons.
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
When doing parallel optimizations and logging is provided, you have to
provide a different path for each optimization you are running. You can
disable logging completely by setting it to False, but we highly recommend
not to do so. The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
error_handling (str): Either "raise" or "continue". Note that "continue" does
not absolutely guarantee that no error is raised but we try to handle as
many errors as possible in that case without aborting the optimization.
error_penalty (dict): Dict with the entries "constant" (float) and "slope"
(float). If the criterion or gradient raise an error and error_handling is
"continue", return ``constant + slope * norm(params - start_params)`` where
``norm`` is the euclidean distance as criterion value and adjust the
derivative accordingly. This is meant to guide the optimizer back into a
valid region of parameter space (in direction of the start parameters).
Note that the constant has to be high enough to ensure that the penalty is
actually a bad function value. The default constant is f0 + abs(f0) + 100
for minimizations and f0 - abs(f0) - 100 for maximizations, where
f0 is the criterion value at start parameters. The default slope is 0.1.
cache_size (int): Number of criterion and derivative evaluations that are cached
in memory in case they are needed.
scaling (bool): If True, the parameter vector is rescaled internally for
better performance with scale sensitive optimizers.
scaling_options (dict or None): Options to configure the internal scaling ot
the parameter vector. See :ref:`scaling` for details and recommendations.
multistart (bool): Whether to do the optimization from multiple starting points.
Requires the params to have the columns ``"soft_lower_bound"`` and
``"soft_upper_bounds"`` with finite values for all parameters, unless
the standard bounds are already finite for all parameters.
multistart_options (dict): Options to configure the optimization from multiple
starting values. The dictionary has the following entries
(all of which are optional):
- n_samples (int): Number of sampled points on which to do one function
evaluation. Default is 10 * n_params.
- sample (pandas.DataFrame or numpy.ndarray) A user definde sample.
If this is provided, n_samples, sampling_method and sampling_distribution
are not used.
- share_optimizations (float): Share of sampled points that is used to
construct a starting point for a local optimization. Default 0.1.
- sampling_distribution (str): One of "uniform", "triangle". Default is
"uniform" as in the original tiktak algorithm.
- sampling_method (str): One of "random", "sobol", "halton", "hammersley",
"korobov", "latin_hypercube" or a numpy array or DataFrame with custom
points. Default is sobol for problems with up to 30 parameters and random
for problems with more than 30 parameters.
- mixing_weight_method (str or callable): Specifies how much weight is put
on the currently best point when calculating a new starting point for a
local optimization out of the currently best point and the next random
starting point. Either "tiktak" or "linear" or a callable that takes the
arguments ``iteration``, ``n_iterations``, ``min_weight``, ``max_weight``.
Default "tiktak".
- mixing_weight_bounds (tuple): A tuple consisting of a lower and upper
bound on mixing weights. Default (0.1, 0.995).
- convergence_max_discoveries (int): The multistart optimization converges
if the currently best local optimum has been discovered independently in
``convergence_max_discoveries`` many local optimizations. Default 2.
- convergence.relative_params_tolerance (float): Determines the maximum
relative distance two parameter vectors can have to be considered equal
for convergence purposes.
- n_cores (int): Number cores used to evaluate the criterion function in
parallel during exploration stages and number of parallel local
optimization in optimization stages. Default 1.
- batch_evaluator (str or callable): See :ref:`batch_evaluators` for
details. Default "joblib".
- batch_size (int): If n_cores is larger than one, several starting points
for local optimizations are created with the same weight and from the same
currently best point. The ``batch_size`` argument is a way to reproduce
this behavior on a small machine where less cores are available. By
default the batch_size is equal to ``n_cores``. It can never be smaller
than ``n_cores``.
- seed (int): Random seed for the creation of starting values. Default None.
- exploration_error_handling (str): One of "raise" or "continue". Default
is continue, which means that failed function evaluations are simply
| |
# -*- coding: utf-8 -*-
######################################################
# PROJECT : Anuvaad PDF Paragraph Parser
# AUTHOR : <NAME>
# DATE : Apr 27, 2020
######################################################
import ast
import cv2
import io
import numpy
import os
import re
import pyspark.sql.functions as F
import pyspark.sql.types as T
from PIL import Image
from bs4 import BeautifulSoup
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession
from pyspark.sql import Window
from pyspark.sql.types import ArrayType, BooleanType, MapType, StringType, StructType, StructField, IntegerType
'''
This is the scalable framework for the paragraph extraction process from PDF for
Anuvaad pipeline. The code reads the input pdf files and converts into HTML files
pagewise and also generates respective images containing the lines which acts as
the background images for the HTML pages.
There is an external dependency on pdftohtml tool.
Please read the general documentation for Anuvaad before going through the code.
The code has been tested on HDP 3.1.4 and Airflow 1.10.7 stack.
'''
'''
---------------------------------------
SPARK SESSION CREATION
---------------------------------------
'''
spark = SparkSession \
.builder \
.appName("Anuvaad_Pdf_Para_Extractor") \
.getOrCreate()
LOG4JLOGGER = spark.sparkContext._jvm.org.apache.log4j
LOGGER = LOG4JLOGGER.LogManager.getLogger(__name__)
LOGGER.info("========================================================")
LOGGER.info("Starting the Paragraph Extraction process for Anuvaad...")
LOGGER.info("========================================================")
'''
---------------------------------------
VARIABLES
---------------------------------------
'''
CONVERTED_OUT_DIR = "/Users/TIMAC044/Documents/Anuvaad/converted_htmls/ex1/*"
# Util class for identifying a rectangle
RECT_UTIL = '/Users/TIMAC044/Documents/Anuvaad/table-detection/rect.py'
# Util class for identifying a rectangle
TABLE_UTIL = '/Users/TIMAC044/Documents/Anuvaad/table-detection/table.py'
INPUT_HTML_DOCS = CONVERTED_OUT_DIR + "--25.html"
INPUT_IMAGES = CONVERTED_OUT_DIR + "*.png"
NUM_PARTITIONS = 10
# Define Regex
END_OF_SENT_REGEX = """(([\"|”|,|a-zA-Z|0-9|.]{3,}[.|?|!|\"|”|:|;]|([:][ ][-]))$)"""
STYLE_REGEX = """.(.*){(.*)font-size:([0-9]*)px;(.*)font-family:(.*);(.*)color:(.*);(.*)"""
TOP_REGEX = """(.*)(top:)([0-9]*)(.*)"""
LEFT_REGEX = """(.*)(left:)([0-9]*)(.*)"""
PAGE_NUM_POSSIBLE_FRMT = """(Page|page)(\s)([0-9]*)(\s)(of)(\s)([0-9]*)"""
ABBRIVATIONS2 = [' no.', ' mr.', ' ft.', ' kg.', ' dr.', ' ms.', ' st.', ' pp.', ' co.', ' rs.', ' sh.', ' vs.', ' ex.']
ABBRIVATIONS3 = [' pvt.', ' nos.', ' smt.', ' sec.', ' spl.', ' kgs.', ' ltd.', ' pty.', ' vol.', ' pty.', ' m/s.', ' mrs.', ' i.e.', ' etc.', ' (ex.', ' o.s.', ' anr.', ' ors.', ' c.a.']
ABBRIVATIONS4 = [' assn.']
ABBRIVATIONS6 = [' w.e.f.']
# Add Util classes to the Spark Context
spark.sparkContext.addPyFile(RECT_UTIL)
spark.sparkContext.addPyFile(TABLE_UTIL)
# Define sql Context
sqlContext = SQLContext(sparkContext=spark.sparkContext, sparkSession=spark)
# Define String to Array UDF
def parse_array_from_string(x):
return ast.literal_eval(x)
RETRIEVE_ARRAY_UDF = F.udf(parse_array_from_string, T.ArrayType(T.StringType()))
# Define HTML Parsing UDF
def parse_html_tags(x):
page_no_list = []
content_list = []
para = ''
soup = BeautifulSoup (x, 'html.parser')
# Extact the numeric value alone
page_no_list.append(str(soup.title.string).split()[1])
# ######################
# MAP FOR STYLES - Extract Font size, family & color
# ######################
style_map = dict()
for line in soup.find_all("style"):
for entry in line.text.split():
style_values = re.search(STYLE_REGEX, entry, re.IGNORECASE)
if style_values:
style_map[style_values.group(1)] = (style_values.group(3) + style_values.group(5) + style_values.group(7))
# ######################
# print(style_map)
# ######################
formatted_text = []
for line in soup.find_all("p"):
# Remove unwanted tags
clean_line = str(line).replace('<br/>','') \
.replace('<i>','') \
.replace('<i/>','') \
.replace('</i>','') \
.replace('<b>','') \
.replace('</b>','') \
.replace('<b/>','')
# Make the sentence single-spaced and remove leading/trailing spaces
clean_line = ' '.join(clean_line.split()).strip()
# Extract the required sections (class, mapped style, p style, actual text)
para_reg = re.search('<p class="(.*)" style=\"(.*)\">(.*)</p>', clean_line)
formatted_text += ['{0}\t{1}\t{2}\t{3}'.format(para_reg.group(1), style_map.get(para_reg.group(1)), para_reg.group(2), para_reg.group(3))]
content_list.append(str(formatted_text))
return page_no_list, content_list
parse_html_tags_schema = T.StructType([
T.StructField('page_no', T.ArrayType(T.StringType()), False),
T.StructField('contents', T.ArrayType(T.StringType()), False),
])
PARSE_HTML_TAGS_UDF = F.udf(parse_html_tags, parse_html_tags_schema)
# Define the UDF to get the line coordinates from an image
def extract_line_coords(binary_images):
name, img = binary_images
pil_image = Image.open(io.BytesIO(img)).convert('RGB')
cv2_image = numpy.array(pil_image)
cv2_image = cv2_image[:, :, ::-1].copy()
gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
MAX_THRESHOLD_VALUE = 255
BLOCK_SIZE = 15
THRESHOLD_CONSTANT = 0
SCALE = 15
# Filter image
filtered = cv2.adaptiveThreshold(~gray, MAX_THRESHOLD_VALUE, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, BLOCK_SIZE, THRESHOLD_CONSTANT)
horizontal = filtered.copy()
horizontal_size = int(horizontal.shape[1] / SCALE)
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
# isolate_lines
cv2.erode(horizontal, horizontal_structure, horizontal, (-1, -1)) # makes white spots smaller
cv2.dilate(horizontal, horizontal_structure, horizontal, (-1, -1))
contours = cv2.findContours(horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
lines = []
for index, contour in enumerate(contours):
x, y, w, h = cv2.boundingRect(contour)
if w > 50:
lines.append((x,y,w,h))
return os.path.basename(name), lines
# Convert PDF to HTML
# NEED TO BE MODIFIED TO INVOKE IN CMD LINE (UNIX)
#pdftohtml -p -c nltk_data/corpora/framenet_v15/docs/book.pdf CONVERTED_OUT_DIR
# Load into RDD
rdd = spark.sparkContext.wholeTextFiles(INPUT_HTML_DOCS, NUM_PARTITIONS)
# Confirm the number of Partitions
LOGGER.info("Number of partitions : + format(rdd.getNumPartitions())")
# glom()- return an RDD created by coalescing all elements within each partition into a list
# print(rdd.glom().collect())
# Convert to DF
SCHEMA_INPUT_FILE = StructType([StructField("filepath", StringType(), True), StructField("html_page_tags", StringType(), True)])
HTML_DF = sqlContext.createDataFrame(rdd, SCHEMA_INPUT_FILE)
#Invoke the UDF to parse out the HTML tags
APPEND_DF = HTML_DF.withColumn("metadata", PARSE_HTML_TAGS_UDF(HTML_DF.html_page_tags)) \
.select(HTML_DF.filepath, F.col('metadata.*'))
EXPLODED_DF = APPEND_DF.withColumn("tmp", F.arrays_zip("page_no", "contents")) \
.withColumn("tmp", F.explode("tmp")) \
.select(APPEND_DF.filepath, F.col("tmp.page_no"), F.col("tmp.contents"))
# UDF to extract the filename from the File path
FILENAME_EXT_UDF = F.udf(lambda f : re.match(r'(.*/)(.*)(--[0-9]*)(\.html)', f).group(2), StringType())
TRIM_FN_DF = EXPLODED_DF.select(FILENAME_EXT_UDF(EXPLODED_DF.filepath).alias("filename"), EXPLODED_DF.page_no, EXPLODED_DF.contents)
# Section identifies the style of the paragraph (constant across the entire doc)
EXPLODED_SENT_DF = TRIM_FN_DF.select("filename", "page_no", F.explode(RETRIEVE_ARRAY_UDF(F.col("contents"))))
split_col = F.split(EXPLODED_SENT_DF['col'], '\t')
EXPLODED_SENT_DF = EXPLODED_SENT_DF.withColumn("style_key", split_col.getItem(1))
STYLE_COUNT_DF = EXPLODED_SENT_DF.groupBy("filename", "style_key").count()
# Identify the para style for each of the PDFs.
window = Window.partitionBy("filename").orderBy(F.desc("count"))
MAX_OCCURANCE_DF = STYLE_COUNT_DF.withColumn("rank", F.rank().over(window)).filter('rank==1') \
.select("filename", F.col("style_key").alias("para_style_key"))
#para_style = MAX_OCCURANCE_DF.select('filename', 'style_key').first().style_key
LINES_WITH_STYLEID_DF = TRIM_FN_DF.join(MAX_OCCURANCE_DF, TRIM_FN_DF.filename == MAX_OCCURANCE_DF.filename) \
.select(TRIM_FN_DF.filename, TRIM_FN_DF.page_no, TRIM_FN_DF.contents, MAX_OCCURANCE_DF.para_style_key)
#LINES_WITH_STYLEID_DF.show()
# ######################################
# Extracting Footer lines from Images
# ######################################
IMG_FILENAME_UTILS_UDF = F.udf(lambda f,n : re.match(r'(.*)(-)(0*)(.*)(\.png)', f).group(n), StringType())
IMAGES_RDD = spark.sparkContext.binaryFiles(INPUT_IMAGES)
LINE_COORD_DF = IMAGES_RDD.map(lambda img: extract_line_coords(img)).toDF()
#LINE_COORD_EXP_DF = LINE_COORD_DF.select("_1", F.explode("_2"))
LINE_COORD_EXP_DF = LINE_COORD_DF.select(F.col("_1").alias("filename"), \
F.explode(F.col("_2")).alias("coord"))
LINE_COORD_EXP_DF = LINE_COORD_EXP_DF.select(F.col("filename"), \
IMG_FILENAME_UTILS_UDF(F.col("filename"), F.lit(1)).alias("file_id"), \
IMG_FILENAME_UTILS_UDF(F.col("filename"), F.lit(4)).alias("page_num"), \
F.col("coord._1").alias("x"), \
F.col("coord._2").alias("y"), \
F.col("coord._3").alias("w"), \
F.col("coord._4").alias("h"))
LINE_COORD_EXP_DF.createOrReplaceTempView('line_coord_exp_df')
# Condition for Footer line.
footer_list = spark.sql("""
SELECT
file_id, page_num, x, w,
COUNT(*) cnt,
MIN(y) min_y,
MAX(y) max_y
FROM line_coord_exp_df
GROUP BY file_id, page_num, x, w
HAVING (min_y > 1000 and (cnt==1 OR max_y-min_y>150))
ORDER BY file_id ASC, cast(page_num AS int) ASC
""").collect()
ftr = spark.sparkContext.broadcast(footer_list)
footer_coord_lookup = dict()
for f in footer_list:
#print("{0}\t{1}\t{2}".format(f.file_id, f.page_num, f.min_y))
footer_coord_lookup[f.file_id + "#" + str(f.page_num)] = f.min_y
# ######################################
# Extracting Tables from Images
# ######################################
def extract_table_coords(image):
from rect import RectRepositories
from table import TableRepositories
name, img = image
pil_image = Image.open(io.BytesIO(img)).convert('RGB')
open_cv_image = numpy.array(pil_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
Rects = RectRepositories(open_cv_image)
lines, _ = Rects.get_tables_and_lines ()
table = None
TableRepo = TableRepositories(open_cv_image, table)
tables = TableRepo.response ['response'] ['tables']
lines = []
for table in tables:
base_x = int(table.get('x'))
base_y = int(table.get('y'))
for t in table.get('rect'):
x = base_x + int(t['x'])
y = base_y + int(t['y'])
w = int(t['w'])
h = int(t['h'])
row = int(t['row'])
col = int(t['col'])
lines.append((row, col, x, y, w, h))
return os.path.basename(name), lines
# For each of the images, extract the coordinates along with row & column
TABLE_COORD_DF = IMAGES_RDD.map(lambda img: extract_table_coords(img)).toDF()
# Explode the cells (one row per cell)
TABLE_COORD_EXP_DF = TABLE_COORD_DF.select(F.col("_1").alias("filename"), \
F.explode(F.col("_2")).alias("coord"))
# Final view
FINAL_TABLE_COORD_DF = TABLE_COORD_EXP_DF.select("filename", \
IMG_FILENAME_UTILS_UDF(F.col("filename"), F.lit(1)).alias("file_id"), \
IMG_FILENAME_UTILS_UDF(F.col("filename"), F.lit(4)).alias("page_num"), \
F.col("coord._1").alias("row"), \
F.col("coord._2").alias("col"), \
F.col("coord._3").alias("x"), \
F.col("coord._4").alias("y"), \
F.col("coord._5").alias("w"), \
F.col("coord._6").alias("h"))
# FINAL_TABLE_COORD_DF.show(500, False)
FINAL_TABLE_COORD_DF.createOrReplaceTempView('final_table_coord_df')
table_list = spark.sql("""
SELECT
file_id, page_num, row, col, x, y, w, h
FROM
final_table_coord_df
ORDER BY
file_id ASC, cast(page_num AS int) ASC, y ASC
""").collect()
tlist = spark.sparkContext.broadcast(table_list)
table_coord_lookup = dict()
for f in table_list:
#print("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}".format(f.file_id, f.page_num, f.row, f.col, f.x, f.y, f.w, f.h))
cell_record = dict()
cell_record['row'] = f.row
cell_record['col'] = f.col
cell_record['x'] = f.x
cell_record['y'] = f.y
cell_record['w'] = f.w
cell_record['h'] = f.h
lookup_key = f.file_id + "#" + str(f.page_num)
# if key is already available, append the cell record to the list.
if lookup_key in table_coord_lookup:
existing_list = table_coord_lookup[lookup_key]
existing_list.append(cell_record)
# else create a new list with the cell record and insert into the lookup.
else:
cell_list = list()
cell_list.append(cell_record)
table_coord_lookup[lookup_key] = cell_list
# content is the set of lines containing class, style_key, line style & line text
# doc_p_style is the derived paragraph style for the entire document.
def parse_para(content, file_id, page_no, doc_p_style):
out_para_list = []
# Pick the first style for the entire para
out_style_list = []
out_page_num_list = []
out_y_end_list = []
out_para_position_list = []
out_sup_list = []
lines = parse_array_from_string(content)
para_text = ''
para_style = ''
indexed_prev_top_val = -1
curr_top_val = ''
processed_index = 0
l_class_val_list = []
l_style_key_list = []
l_p_style_list = []
l_p_text_list = []
l_top_list = []
l_left_list = []
len_page = len(lines)
f_min_y = "-1"
table_coords = list()
img_page_key = file_id + "#" + str(page_no)
is_table = False
# Header
if img_page_key in footer_coord_lookup.keys():
f_min_y = footer_coord_lookup[img_page_key]
# Table
if img_page_key in table_coord_lookup.keys():
table_coords = table_coord_lookup[img_page_key]
for line in lines:
parts = line.split('\t')
l_class_val_list.append(parts[0].strip())
l_style_key_list.append(parts[1].strip())
l_p_style_list.append(parts[2].strip())
l_p_text_list.append(parts[3].strip())
# Extract Top Value
style_values = re.search(TOP_REGEX, parts[2], re.IGNORECASE)
if style_values:
l_top_list.append(style_values.group(3))
else:
l_top_list.append("-1")
# Extract Left Value
style_values = re.search(LEFT_REGEX, parts[2], re.IGNORECASE)
if style_values:
l_left_list.append(style_values.group(3))
else:
l_left_list.append("-1")
# Logic to find last para line of the page.
last_line_index = len_page - 1
| |
** 2
# 2) vx/vy = dx/dy
vy = sqrt(1600 / (1 + dx**2/dy**2)) if dy != 0 else 0
vx = sqrt(1600 / (1 + dy**2/dx**2)) if dx != 0 else 0
self.owner.spawner.spawn('tall_spark', (2 + 2*dx_sign,
7 + 2*dy_sign),
vx=vx * dx_sign,
vy=vy * dy_sign,
**kwargs)
self.dispatcher.add_event(BearEvent('play_sound',
'spark'))
def __repr__(self):
d = loads(super().__repr__())
d['range'] = self.range
return dumps(d)
class HealerPowerInteractionComponent(PowerInteractionComponent):
"""
Shoots healing projectiles in random directions.
Expects owner to have a SpawnerComponent.
Expects its widget to be a SwitchWidget with 'powered' and 'unpowered'
states.
"""
def get_power(self):
super().get_power()
self.owner.widget.switch_to_image('powered')
def take_action(self, *args, **kwargs):
vx = randint(-10, 10)
vy = randint(-10, 10)
self.dispatcher.add_event(BearEvent('play_sound', 'balloon'))
self.owner.spawner.spawn('healing_projectile', (5 * vx // abs(vx) if vx != 0 else choice((5, -5)),
5 * vy // abs(vy) if vy != 0 else choice((5, -5))),
vx=vx, vy=vy)
self.owner.widget.switch_to_image('unpowered')
self.powered = False
class SpawnerComponent(Component):
"""
A component responsible for spawning stuff near its owner
For projectiles and other such things
"""
def __init__(self, *args, factory=None, **kwargs):
super().__init__(*args, name='spawner', **kwargs)
self.factory = factory
self.dispatcher.register_listener(self, 'key_down')
def spawn(self, item, relative_pos, **kwargs):
"""
Spawn item at self.pos+self.relative_pos
:param item:
:param relative_pos:
:return:
"""
self.factory.create_entity(item, (self.owner.position.x + relative_pos[0],
self.owner.position.y + relative_pos[1]),
**kwargs)
# No __repr__ because its only kwarg is the factory instance that cannot
# be stored between runs.
class FactionComponent(Component):
"""
Stores the faction data to see who should attack whom.
:param faction: str. The faction name. Defaults to 'placeholder'
:param phrase_color: str. The color that should be used in speech
"""
colors = {'police': '#aaaaaa',
'scientists': '#aaaaaa',
'punks': '#ffffff',
'placeholder': '#ffffff'}
def __init__(self, *args, faction='placeholder',
phrase_color='white',
**kwargs):
super().__init__(*args, name='faction', **kwargs)
self.faction = faction
self.phrase_color = self.colors[faction]
def __repr__(self):
return dumps({'class': self.__class__.__name__,
'faction': self.faction})
class LevelSwitchComponent(Component):
"""
Stores the level ID for the level switch widgets
"""
# TODO: let LevelSwitchComponent track whether the level was won or lost
def __init__(self, *args, next_level='ghetto_test', **kwargs):
super().__init__(*args, name='level_switch', **kwargs)
self.next_level = next_level
def __repr__(self):
return dumps({'class': self.__class__.__name__,
'next_level': self.next_level})
class InputComponent(Component):
"""
A component that handles input.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, name='controller', **kwargs)
self.dispatcher.register_listener(self, ['key_down', 'tick'])
self.walk_delay = 0.1
self.current_walk_delay = 0
self.action_delay = 0.4
self.current_action_delay = 0
self.next_move = [0, 0]
self.accepts_input = True
def on_event(self, event):
#TODO: Support non-hardcoded actions and keys
x = super().on_event(event)
if isinstance(x, BearEvent):
r = [x]
elif isinstance(x, list):
r = x
else:
r = []
if event.event_type == 'tick':
if self.current_walk_delay > 0:
self.current_walk_delay -= event.event_value
else:
# Movement is processed from the commands collected during
# the previous tick. Of course, the input is ignored while
# entity is on the cooldown
if self.next_move[0] != 0 or self.next_move[1] != 0:
self.owner.position.walk(self.next_move)
self.next_move = [0, 0]
self.current_walk_delay = self.walk_delay
if self.current_action_delay > 0:
self.current_action_delay -= event.event_value
if event.event_type == 'key_down' and self.accepts_input:
if self.owner.health.hitpoints > 0 and \
self.current_action_delay <= 0:
# These actions are only available to a non-dead player char
if event.event_value == 'TK_Q':
# left-handed attack
self.current_action_delay = self.owner.hands.use_hand('left')
elif event.event_value == 'TK_E':
# Right-handed attack
self.current_action_delay = self.owner.hands.use_hand('right')
elif event.event_value == 'TK_Z':
# Left-handed pickup
self.owner.hands.pick_up(hand='left')
self.current_action_delay = self.action_delay
elif event.event_value == 'TK_C':
# Right-handed pickup
self.owner.hands.pick_up(hand='right')
self.current_action_delay = self.action_delay
elif event.event_value == 'TK_SPACE':
self.owner.position.jump()
self.current_action_delay = self.action_delay
# These actions are available whether or not the player is dead
if event.event_value in ('TK_D', 'TK_RIGHT') and self.current_walk_delay <= 0:
self.next_move[0] += 2
elif event.event_value in ('TK_A', 'TK_LEFT') and self.current_walk_delay <= 0:
self.next_move[0] -= 2
elif event.event_value in ('TK_S', 'TK_DOWN') and self.current_walk_delay <= 0:
self.next_move[1] += 2
elif event.event_value in ('TK_W', 'TK_UP') and self.current_walk_delay <= 0:
self.next_move[1] -= 2
elif event.event_value == 'TK_KP_6':
r.append(BearEvent(event_type='ecs_scroll_by',
event_value=(1, 0)))
elif event.event_value == 'TK_KP_4':
r.append(BearEvent(event_type='ecs_scroll_by',
event_value=(-1, 0)))
elif event.event_value == 'TK_KP_8':
r.append(BearEvent(event_type='ecs_scroll_by',
event_value=(0, -1)))
elif event.event_value == 'TK_KP_2':
r.append(BearEvent(event_type='ecs_scroll_by',
event_value=(0, 1)))
elif event.event_value == 'TK_KP_5':
r.append(BearEvent(event_type='ecs_scroll_to',
event_value=(0, 0)))
return r
class HidingComponent(Component):
"""
Hides the widget for a given entity on the condition, but does not
destroy the entity itself.
Expects owner to have PositionComponent and WidgetComponent
"""
def __init__(self, *args, hide_condition='keypress',
lifetime=1.0, age=0, is_working=False,
should_hide=True,
**kwargs):
super().__init__(*args, name='hiding', **kwargs)
if hide_condition == 'keypress':
self.dispatcher.register_listener(self, 'key_down')
elif hide_condition == 'timeout':
self.dispatcher.register_listener(self, 'tick')
self.lifetime = lifetime
self.age = age
else:
raise ValueError('hide_condition should be either keypress or timeout')
# This is set to True whenever the owner's widget is actually shown, to
# avoid triggering when the Entity is already hidden
self.is_working = is_working
# This is set to False when item should not be hidden
self.should_hide = should_hide
self.hide_condition = hide_condition
def hide(self):
self.should_hide = True
self.is_working = False
self.dispatcher.add_event(BearEvent(event_type='ecs_remove',
event_value=self.owner.id))
def unhide(self):
"""
Stop hiding the entity forever
:return:
"""
self.should_hide = False
self.show()
def show(self):
"""
Show temporarily, until self.hide_condition becomes true
:return:
"""
if not self.is_working:
self.is_working = True
self.should_hide = True
self.dispatcher.add_event(BearEvent(event_type='ecs_add',
event_value=(self.owner.id,
self.owner.position.x,
self.owner.position.y)))
if self.hide_condition == 'timeout':
self.age = 0
def on_event(self, event):
if not self.should_hide or not self.is_working:
return
if self.hide_condition == 'keypress' and event.event_type == 'key_down':
self.hide()
elif self.hide_condition == 'timeout' and event.event_type == 'tick':
self.age += event.event_value
if self.age >= self.lifetime:
self.hide()
def __repr__(self):
return dumps({'class': self.__class__.__name__,
'hide_condition': self.hide_condition,
'should_hide': self.should_hide,
'lifetime': self.lifetime,
'age': self.age,
'is_working': self.is_working})
class ParticleDestructorComponent(DestructorComponent):
"""
A subclass of the DestructorComponent that spawns particles when owner is
destroyed.
Intended for use with single-use items (eg bandages) which need some sort of
a visual confirmation that it did indeed work.
Unlike SpawnerDestructorComponent, contains a pile of data related to the
particle effects
"""
def __init__(self, *args, spawned_item, relative_pos = (0, 0),
size=(10, 10),
character=',', char_count=8, char_speed=10,
color='red', lifetime=0.3,
**kwargs):
super().__init__(*args, **kwargs)
self.spawned_item = spawned_item
self.relative_pos = relative_pos
self.size = size
self.character = character
self.char_count = char_count
self.char_speed = char_speed
self.color = color
self.lifetime = lifetime
def destroy(self):
self.owner.spawner.spawn(self.spawned_item, self.relative_pos,
size=self.size, character=self.character,
char_count=self.char_count,
char_speed=self.char_speed,
color=self.color, lifetime=self.lifetime)
super().destroy()
def __repr__(self):
d = loads(super().__repr__())
d['spawned_item'] = self.spawned_item
d['relative_pos'] = self.relative_pos
d['size'] = self.size
d['character'] = self.character
d['char_count'] = self.char_count
d['char_speed'] = self.char_speed
d['color'] = self.color
d['lifetime'] = self.lifetime
return dumps(d)
class HandInterfaceComponent(Component):
"""
A Component that allows human characters to use hands.
This entity keeps track of all hand entities assigned to this character. It
shows the correct hand at the correct position and expects the hand itself
to call the corresponding item. When created, requires two dicts:
`hands_dict` should be a dict with the following
keys: 'forward_l', 'forward_r', 'back_l', and 'back_r', which should have
entity IDs as corresponding values. These should be the Entities with a
foreground hand pointed left, foreground hand pointing right, background
hand pointed left and a background hand pointed right (left, right, right
and left hands of the character respectively). Other keys, if any, are
ignored. All hand entities are expected to have HidingComponent.
`hands_offsets` should have the positions of these hands relative to the
owner's widget, as a tuple of ints.
Expects owner to have a PositionComponent.
"""
def __init__(self, *args, hand_entities, hands_offsets, item_offsets,
left_item=None,
right_item=None,
**kwargs):
super().__init__(*args, name='hands', **kwargs)
# In case the item is destroyed while it is held
self.dispatcher.register_listener(self, 'ecs_destroy')
self.hand_entities = hand_entities
# Offsets of hands relative to the character
self.hands_offsets = hands_offsets
# Offsets of items relative to the hand, ie the position of the
# outer tip of the hand. For left-facing items, *right edge* of the item
# should be at this position
self.item_offsets = item_offsets
self.left_item = left_item
self.right_item = right_item
# A little helper to find correct hand for a given direction
self.which_hand = {'right': {'r': 'forward_r',
'l': 'back_l'},
'left': {'r': 'back_r',
'l': 'forward_l'}}
def use_hand(self, hand='right'):
"""
use the item in hand
Draws the hand and the item where they belong. Returns the item cooldown
(in seconds)
:param hand:
:return:
"""
hand_label = self.which_hand[hand][self.owner.position.direction]
# Have to call the HidingComponent and WidgetComponent directly
hand_entity = EntityTracker().entities[self.hand_entities[hand_label]]
hand_entity.widget.z_level = self.owner.widget.z_level + 1
hand_x = self.owner.position.x + self.hands_offsets[hand_label][0]
hand_y = self.owner.position.y + self.hands_offsets[hand_label][1]
hand_entity.hiding.show()
hand_entity.position.move(hand_x, hand_y)
# Use item in hand
item_id = self.right_item | |
superfreq, superf / superfreq
)
+ superblob
)
else:
superblob = (
"\n{} 0 N/A\n".format(supercat.capitalize()) + superblob
)
totalfreq += superfreq
totalf += superf # TODO is this correct?
bprint("".join(superblob))
bprint("Total frequency: {}".format(totalfreq))
bprint("Total F-score: {}".format(totalf / totalfreq))
def output_all_scores():
"""Results for each supercategory in iEC given in SUPERCATEGORIES, each subcategory, and error code, in tsv format."""
bprint(
"Category\tfrequency\ttp\tfn\tfp\trecall\tprecision\tF-score\tctp\tcfn\tcfp\tcrecall\tcprecision\tCF-score"
)
totalfreq = 0
totaltp = 0
totalfn = 0
totalfp = 0
totalrecall = 0.0
totalprecision = 0.0
totalf = 0.0
totalctp = 0
totalcfn = 0
totalcfp = 0
totalcrecall = 0.0
totalcprecision = 0.0
totalcf = 0.0
for supercat in SUPERCATEGORIES:
# supercategory: {subcategory : error code}
# entry = supercategory, catlist = {subcategory : error code}
superfreq = 0
supertp = 0
superfn = 0
superfp = 0
superrecall = 0.0
superprecision = 0.0
superf = 0.0
superctp = 0
supercfn = 0
supercfp = 0
supercrecall = 0.0
supercprecision = 0.0
supercf = 0.0
superblob = ""
for subcat in SUPERCATEGORIES[supercat]:
subfreq = 0
subtp = 0
subfn = 0
subfp = 0
subrecall = 0.0
subprecision = 0.0
subf = 0.0
subctp = 0
subcfn = 0
subcfp = 0
subcrecall = 0.0
subcprecision = 0.0
subcf = 0.0
subblob = ""
for code in SUPERCATEGORIES[supercat][subcat]:
if code not in OUT_OF_SCOPE:
et = calc_error_category_metrics(code)
freq = cast(int, et["freq"])
fscore = cast(float, et["f05score"])
cfscore = cast(float, et["cf05score"])
# codework
subblob = subblob + "{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
code,
freq,
cast(int, et["tp"]) if "tp" in et else 0,
cast(int, et["fn"]) if "fn" in et else 0,
cast(int, et["fp"]) if "fp" in et else 0,
cast(float, et["recall"]) * 100.0
if ("recall" in et and et["recall"] > 0.0)
else NO_RESULTS, # Or "N/A", but that messes with the f-string formatting
cast(float, et["precision"]) * 100.0
if ("precision" in et and et["precision"] > 0.0)
else NO_RESULTS,
fscore * 100.0 if fscore > 0.0 else NO_RESULTS,
cast(int, et["ctp"]) if "ctp" in et else 0,
cast(int, et["cfn"]) if "cfn" in et else 0,
cast(int, et["cfp"]) if "cfp" in et else 0,
cast(float, et["crecall"]) * 100.0
if ("crecall" in et and et["crecall"] > 0.0)
else NO_RESULTS,
cast(float, et["cprecision"]) * 100.0
if ("cprecision" in et and et["cprecision"] > 0.0)
else NO_RESULTS,
cfscore * 100.0 if cfscore > 0.0 else NO_RESULTS,
)
# subwork
subfreq += freq
subtp += cast(int, et["tp"]) if "tp" in et else 0
subfn += cast(int, et["fn"]) if "fn" in et else 0
subfp += cast(int, et["fp"]) if "fp" in et else 0
subrecall += (
cast(float, et["recall"]) * freq * 100.0
if ("recall" in et and et["recall"] > 0.0)
else 0.0
)
subprecision += (
cast(float, et["precision"]) * freq * 100.0
if ("precision" in et and et["precision"] > 0.0)
else 0.0
)
subf += fscore * freq * 100.0 if fscore > 0.0 else 0.0
subctp += cast(int, et["ctp"]) if "ctp" in et else 0
subcfn += cast(int, et["cfn"]) if "cfn" in et else 0
subcfp += cast(int, et["cfp"]) if "cfp" in et else 0
subcrecall += (
cast(float, et["crecall"]) * freq * 100.0
if ("crecall" in et and et["crecall"] > 0.0)
else 0.0
)
subcprecision += (
cast(float, et["cprecision"]) * freq * 100.0
if ("cprecision" in et and et["cprecision"] > 0.0)
else 0.0
)
subcf += cfscore * freq * 100.0 if cfscore > 0.0 else 0.0
if subfreq != 0:
subblob = (
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
subcat.capitalize(),
subfreq,
subtp,
subfn,
subfp,
subrecall / subfreq,
subprecision / subfreq,
subf / subfreq,
subctp,
subcfn,
subcfp,
subcrecall / subfreq,
subcprecision / subfreq,
subcf / subfreq,
)
+ subblob
)
else:
subblob = (
"\n{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
subcat.capitalize(),
subfreq,
subtp,
subfn,
subfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
subctp,
subcfn,
subcfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
)
+ subblob
)
# superwork
superblob += subblob
superfreq += subfreq
supertp += subtp
superfn += subfn
superfp += subfp
superrecall += subrecall
superprecision += subprecision
superf += subf
superctp += subctp
supercfn += subcfn
supercfp += subcfp
supercrecall += subcrecall
supercprecision += subcprecision
supercf += subcf
if superfreq != 0:
superblob = (
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
supercat.capitalize(),
superfreq,
supertp,
superfn,
superfp,
superrecall / superfreq,
superprecision / superfreq,
superf / superfreq,
superctp,
supercfn,
supercfp,
supercrecall / superfreq,
supercprecision / superfreq,
supercf / superfreq,
)
+ superblob
)
else:
superblob = (
"\n{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
supercat.capitalize(),
superfreq,
supertp,
superfn,
superfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
superctp,
supercfn,
supercfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
)
+ superblob
)
totalfreq += superfreq
totaltp += supertp
totalfn += superfn
totalfp += superfp
totalrecall += superrecall
totalprecision += superprecision
totalf += superf
totalctp += superctp
totalcfn += supercfn
totalcfp += supercfp
totalcrecall += supercrecall
totalcprecision += supercprecision
totalcf += supercf
bprint("".join(superblob))
bprint(
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
"Total",
totalfreq,
totaltp,
totalfn,
totalfp,
totalrecall / totalfreq,
totalprecision / totalfreq,
totalf / totalfreq,
totalctp,
totalcfn,
totalcfp,
totalcrecall / totalfreq,
totalcprecision / totalfreq,
totalcf / totalfreq,
)
)
# output_duration()
# output_sentence_scores()
# output_token_scores()
# output_error_cat_scores()
bprint(f"\n\nResults for iEC-categories:")
# output_supercategory_scores()
output_all_scores()
# Print the accumulated output before exiting
for s in buffer:
print(s)
def correct_spaces(tokens: List[Tuple[str, str]]) -> str:
"""Returns a string with a reasonably correct concatenation
of the tokens, where each token is a (tag, text) tuple."""
return detokenize(
Tok(TOK.PUNCTUATION if tag == "c" else TOK.WORD, txt, None)
for tag, txt in tokens
)
# Accumulate standard output in a buffer, for writing in one fell
# swoop at the end (after acquiring the output lock)
buffer: List[str] = []
def bprint(s: str):
"""Buffered print: accumulate output for printing at the end"""
buffer.append(s)
def process(fpath_and_category: Tuple[str, str]) -> Dict[str, Any]:
"""Process a single error corpus file in TEI XML format.
This function is called within a multiprocessing pool
and therefore usually executes in a child process, separate
from the parent process. It should thus not modify any
global state, and arguments and return values should be
picklable."""
# Unpack arguments
fpath, category = fpath_and_category
# Set up XML namespace stuff
NS = "http://www.tei-c.org/ns/1.0"
# Length of namespace prefix to cut from tag names, including { }
nl = len(NS) + 2
# Namespace dictionary to be passed to ET functions
ns = dict(ns=NS)
# Statistics about processed sentences. These data will
# be returned back to the parent process.
stats: List[StatsTuple] = []
# Counter of iceErrorCorpus error codes (xtypes) encountered
true_positives: Dict[str, int] = defaultdict(int)
false_negatives: Dict[str, int] = defaultdict(int)
# Counter of iceErrorCorpus error codes in unparsable sentences
ups: Dict[str, int] = defaultdict(int)
# Stats for each error code (xtypes)
errtypefreqs: ErrTypeStatsDict = ErrTypeStatsDict(TypeFreqs().copy)
try:
if not QUIET:
# Output a file header
bprint("-" * 64)
bprint(f"File: {fpath}")
bprint("-" * 64)
# Parse the XML file into a tree
try:
tree = ET.parse(fpath)
except ET.ParseError as e:
if QUIET:
bprint(f"000: *** Unable to parse XML file {fpath} ***")
else:
bprint(f"000: *** Unable to parse XML file ***")
raise e
# Obtain the root of the XML tree
root = tree.getroot()
# Iterate through the sentences in the file
for sent in root.findall("ns:text/ns:body/ns:p/ns:s", ns):
# Skip sentence if find exclude
if EXCLUDE:
exc = sent.attrib.get("exclude", "")
if exc:
continue
check = False # When args.single, checking single error code
# Sentence identifier (index)
index = sent.attrib.get("n", "")
tokens: List[Tuple[str, str]] = []
errors: List[ErrorDict] = []
# A dictionary of errors by their index (idx field)
error_indexes: Dict[str, ErrorDict] = {}
dependencies: List[Tuple[str, ErrorDict]] = []
analysisblob: List[str] = []
# Error corpora annotations for sentences marked as unparsable
# Enumerate through the tokens in the sentence
for el in sent:
tag = el.tag[nl:]
if tag == "revision":
# An error annotation starts here, eventually
# spanning multiple tokens
original = ""
corrected = ""
# Note the index of the starting token within the span
start = len(tokens)
# Revision id
rev_id = el.attrib["id"]
# Look at the original text
el_orig = el.find("ns:original", ns)
if el_orig is not None:
# We have 0 or more original tokens embedded
# within | |
ans = float(S - ans) / S
return A
def func_8ac373f0b58c4f80a2a6c5b1152ffbf0(r, s, q, N):
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return D
def func_b9594001be2543359b732fd54ee0d6dd(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return b
def func_051af4430bd240da8d703fcbf5be3a2e(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return A
def func_25e91e5495b04b9db58fe4ed602581c8(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return B
def func_40f2054886854ef885fcdfee5b47590c(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return t
def func_3ec9de9a396847b78009e7d8b4eae973(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return S
def func_65f1e60aec364668adb36a07121ad701(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return ans
def func_bafb526c7e3144d1b0a97a4273525c9a(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return C
def func_7547066c8f5341299536f025addd18b2(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return a
def func_ad79f8895fde4bb1b56285375bab7557(test, N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return p
def func_44dba9c45f6f47e4abdd2bc980c0e419(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return p
def func_185103f211d540efbdc061d9042dd27c(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return ans
def func_683ebaaec42a48c5b44e091d1d55584e(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
| |
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from torch.autograd import Function
from math import sqrt
import torch.utils.data
import torch.utils.data.distributed
from torch.nn.utils import spectral_norm
F_conv = torch.nn.functional.conv2d
''' fix the last four layers and put a small net on the head
'''
style_dim = 256
stdd = 0.1
''' ------ conv3d small size h
no style'''
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
# EqualLR.apply(module, name)
return module
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
# conv.weight.data.normal_()
# conv.weight.data.normal_(0.0, stdd)
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_(0.0, stdd)
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input / torch.sqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
class BlurFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
grad_input = F.conv2d(
grad_output, kernel_flip, padding=1, groups=grad_output.shape[1]
)
return grad_input
@staticmethod
def backward(ctx, gradgrad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = F.conv2d(
gradgrad_output, kernel, padding=1, groups=gradgrad_output.shape[1]
)
return grad_input, None, None
class BlurFunction(Function):
@staticmethod
def forward(ctx, input, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
output = F.conv2d(input, kernel, padding=1, groups=input.shape[1])
return output
@staticmethod
def backward(ctx, grad_output):
kernel, kernel_flip = ctx.saved_tensors
grad_input = BlurFunctionBackward.apply(grad_output, kernel, kernel_flip)
return grad_input, None, None
blur = BlurFunction.apply
class Blur(nn.Module):
def __init__(self, channel):
super().__init__()
weight = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=torch.float32)
weight = weight.view(1, 1, 3, 3)
weight = weight / weight.sum()
weight_flip = torch.flip(weight, [2, 3])
self.register_buffer('weight', weight.repeat(channel, 1, 1, 1))
self.register_buffer('weight_flip', weight_flip.repeat(channel, 1, 1, 1))
def forward(self, input):
return blur(input, self.weight, self.weight_flip)
# return F.conv2d(input, self.weight, padding=1, groups=input.shape[1])
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
gamma_c = gamma.clamp(-1.0, 1.0)
out = gamma_c * out + out.detach() * (gamma - gamma_c) + beta
# out = gamma * input + beta
return out
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels // 8, kernel_size=1, stride=1,
padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels // 8, kernel_size=1, stride=1,
padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels // 2, kernel_size=1, stride=1,
padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels // 2, out_channels=in_channels, kernel_size=1, stride=1,
padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch // 8, h * w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch // 8, h * w // 4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch // 2, h * w // 4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch // 2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma * attn_g
return out
class AdaptiveInstanceNorm_H(nn.Module):
def __init__(self, in_channel, map_size):
super().__init__()
# self.norm = nn.InstanceNorm2d(in_channel)
self.norm = nn.LayerNorm([map_size, map_size])
# self.style = EqualLinear(style_dim, in_channel * 2)
#
# self.style.linear.bias.data[:in_channel] = 1
# self.style.linear.bias.data[in_channel:] = 0
self.weight = nn.Parameter(1000.0 + torch.randn(1, in_channel, 1, 1))
self.beta = nn.Parameter(0.0 + torch.randn(1, in_channel, 1, 1))
def forward(self, input, style=0):
out = self.norm(input)
# out = self.weight * out + self.beta
# out = self.weight * input + self.beta
out = 1e-2 * out + out.detach() * self.weight + self.beta
return out
class NoiseInjection(nn.Module):
def __init__(self, channel):
super().__init__()
# self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
self.weight = nn.Parameter(torch.randn(1, channel, 1, 1) * 0.1)
def forward(self, image, noise):
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyleBlock_firstLayer(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True, initial=False, upsample=False, fused=False):
super().__init__()
# Attributes
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
kernel_size = 3
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
# Submodules
self.conv_0 = ConstantInput(self.fin)
self.noise_0 = equal_lr(NoiseInjection(self.fout))
def forward(self, x, style, noise=0):
out = self.conv_0(x)
batch_size, cc, hh, ww = out.shape
noise_0 = torch.randn(batch_size, 1, hh, ww, device=x[0].device)
out = self.noise_0(out, noise_0.data)
return out
class StyleBlock_noise(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True, initial=False, upsample=False, fused=False):
super().__init__()
# Attributes
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
kernel_size = 3
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
# Submodules
self.conv_0 = nn.ConvTranspose2d(self.fin, self.fout, 4, stride=2, padding=1)
self.noise_0 = equal_lr(NoiseInjection(self.fout))
self.adain_0 = AdaptiveInstanceNorm(self.fout, style_dim)
self.lrelu_0 = nn.LeakyReLU(0.2)
self.conv_1 = EqualConv2d(self.fout, self.fout, 3, stride=1, padding=1)
self.noise_1 = equal_lr(NoiseInjection(self.fout))
self.adain_1 = AdaptiveInstanceNorm(self.fout, style_dim)
self.lrelu_1 = nn.LeakyReLU(0.2)
self.conv_s = nn.ConvTranspose2d(self.fin, self.fout, 4, stride=2, padding=1, bias=False)
def forward(self, x, style, noise=0):
x_s = self.conv_s(x)
# out = self.lrelu_0(x)
# out = self.adain_0(out, style)
# out = self.conv_0(out)
# batch_size, cc, hh, ww = out.shape
# noise_0 = torch.randn(batch_size, 1, hh, ww, device=x[0].device)
# out = self.noise_0(out, noise_0.data)
#
# out = self.lrelu_1(out)
# out = self.adain_1(out, style)
# out = self.conv_1(out)
# batch_size, cc, hh, ww = out.shape
# noise_1 = torch.randn(batch_size, 1, hh, ww, device=x[0].device)
# out = self.noise_1(out, noise_1.data)
out = self.lrelu_0(x)
out = self.conv_0(out)
batch_size, cc, hh, ww = out.shape
noise_0 = torch.randn(batch_size, 1, hh, ww, device=x[0].device)
out = self.noise_0(out, noise_0.data)
out = self.adain_0(out, style)
out = self.lrelu_1(out)
out = self.conv_1(out)
batch_size, cc, hh, ww = out.shape
noise_1 = torch.randn(batch_size, 1, hh, ww, device=x[0].device)
out = self.noise_1(out, noise_1.data)
out = self.adain_1(out, style)
out = x_s + 0.1 * out
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
class Generator(nn.Module):
def __init__(self, z_dim, nlabels, size, embed_size=256, nfilter=64, **kwargs):
super().__init__()
s0 = self.s0 = 4
nf = self.nf = nfilter
self.z_dim = z_dim
small_nf = self.small_nf = 64
# Submodules
self.small_embedding = nn.Embedding(nlabels, embed_size)
self.small_fc = nn.Linear(z_dim, 8 * small_nf * s0 * s0)
# self.small_net_1 = StyleBlock_firstLayer(8 * small_nf, 8 * small_nf, initial=True)
self.small_net_2 = StyleBlock_noise(8 * small_nf, 8 * small_nf, upsample=True)
self.small_net_3 = StyleBlock_noise(8 * small_nf, 8 * small_nf, upsample=True)
# self.small_Attn = Self_Attn(8 * small_nf)
self.small_H = AdaptiveInstanceNorm_H(8 * small_nf, 16)
# self.resnet_3_0 = ResnetBlock(8*nf, 4*nf)
# self.resnet_3_1 = ResnetBlock(4*nf, 4*nf)
#
# self.resnet_4_0 = ResnetBlock(4*nf, 2*nf)
# self.resnet_4_1 = ResnetBlock(2*nf, 2*nf)
#
# self.resnet_5_0 = ResnetBlock(2*nf, 1*nf)
# self.resnet_5_1 = ResnetBlock(1*nf, 1*nf)
self.resnet_3_0 = ResnetBlock_adafm(8 * nf, 4 * nf)
self.resnet_3_1 = ResnetBlock_adafm(4 * nf, 4 * nf)
# self.small_Attn = Self_Attn(4 * nf)
self.resnet_4_0 = ResnetBlock_adafm(4 * nf, 2 * nf)
self.resnet_4_1 = ResnetBlock_adafm(2 * nf, 2 * nf)
self.resnet_5_0 = ResnetBlock_adafm(2 * nf, 1 * nf)
self.resnet_5_1 = ResnetBlock_adafm(1 * nf, 1 * nf)
self.conv_img = nn.Conv2d(nf, 3, 3, padding=1)
layers = [PixelNorm()]
# layers = []
layers.append(EqualLinear(z_dim, style_dim))
layers.append(nn.LeakyReLU(0.2))
for i in range(7):
layers.append(EqualLinear(style_dim, style_dim))
layers.append(nn.LeakyReLU(0.2))
self.small_style = nn.Sequential(*layers)
def forward(self, z, y, FLAG=500):
assert (z.size(0) == y.size(0))
batch_size = z.size(0)
if y.dtype is torch.int64:
yembed = self.small_embedding(y)
else:
yembed = y
yembed = yembed / torch.norm(yembed, p=2, dim=1, keepdim=True)
yz = torch.cat([z, yembed], dim=1)
style_w = self.small_style(z)
# print('yembed ============ ', yembed.shape)
| |
nerc region matches the nerc region of operation (TRUE)
df['nerc_match'] = df['nerc_region'] == df['nerc_regions_of_operation']
# Group by utility, state, and report date to see which groups have at least one TRUE value
grouped_nerc_match_bools = (
df.groupby(['utility_id_eia', 'state', 'report_date'])
[['nerc_match']].any()
.reset_index()
.rename(columns={'nerc_match': 'nerc_group_match'})
)
# Merge back with original df to show cases where there are multiple non-matching nerc values
# per utility id, year, and state.
expanded_nerc_match_bools = (
pd.merge(df,
grouped_nerc_match_bools,
on=['utility_id_eia', 'state', 'report_date'],
how='left')
)
# Keep only rows where there are no matches for the whole group.
expanded_nerc_match_bools_false = (
expanded_nerc_match_bools[~expanded_nerc_match_bools['nerc_group_match']]
)
return expanded_nerc_match_bools_false
def _pct_to_mw(df, pct_col):
"""Turn pct col into mw capacity using total capacity col."""
mw_value = df['total_capacity_mw'] * df[pct_col] / 100
return mw_value
def _make_yn_bool(df_object):
"""Turn Y/N reporting into True or False boolean statements for df or series."""
return df_object.replace({"N": False, "Y": True})
def _thousand_to_one(df_object):
"""Turn reporting in thousands of dollars to regular dollars for df or series."""
return df_object * 1000
###############################################################################
# EIA Form 861 Table Transform Functions
###############################################################################
def service_territory(tfr_dfs):
"""Transform the EIA 861 utility service territory table.
Args:
tfr_dfs (dict): A dictionary of DataFrame objects in
which pages from EIA861 form (keys) correspond to normalized
DataFrames of values from that page (values)
Returns:
dict: a dictionary of pandas.DataFrame objects in
which pages from EIA861 form (keys) correspond to normalized
DataFrames of values from that page (values)
"""
# No data tidying required
# There are a few NA values in the county column which get interpreted
# as floats, which messes up the parsing of counties by addfips.
type_compatible_df = tfr_dfs["service_territory_eia861"].astype({
"county": str})
# Transform values:
# * Add state and county fips IDs
transformed_df = (
# Ensure that we have the canonical US Census county names:
pudl.helpers.clean_eia_counties(
type_compatible_df,
fixes=EIA_FIPS_COUNTY_FIXES)
# Add FIPS IDs based on county & state names:
.pipe(pudl.helpers.add_fips_ids)
)
tfr_dfs["service_territory_eia861"] = transformed_df
return tfr_dfs
def balancing_authority(tfr_dfs):
"""
Transform the EIA 861 Balancing Authority table.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 DataFrames, keyed by table
name. It will be mutated by this function.
Returns:
dict: A dictionary of transformed EIA 861 dataframes, keyed by table name.
"""
# No data tidying required
# All columns are already type compatible.
# Value transformations:
# * Backfill BA codes on a per BA ID basis
# * Fix data entry errors
df = (
tfr_dfs["balancing_authority_eia861"]
.pipe(pudl.helpers.convert_cols_dtypes, "eia", "balancing_authority_eia861")
.set_index(["report_date", "balancing_authority_name_eia", "utility_id_eia"])
)
# Fill in BA IDs based on date, utility ID, and BA Name:
df.loc[BA_ID_NAME_FIXES.index,
"balancing_authority_id_eia"] = BA_ID_NAME_FIXES.balancing_authority_id_eia
# Backfill BA Codes based on BA IDs:
df = df.reset_index().pipe(_ba_code_backfill)
# Typo: NEVP, BA ID is 13407, but in 2014-2015 in UT, entered as 13047
df.loc[
(df.balancing_authority_code_eia == "NEVP") &
(df.balancing_authority_id_eia == 13047),
"balancing_authority_id_eia"
] = 13407
# Typo: Turlock Irrigation District is TIDC, not TID.
df.loc[
(df.balancing_authority_code_eia == "TID") &
(df.balancing_authority_id_eia == 19281),
"balancing_authority_code_eia"
] = "TIDC"
tfr_dfs["balancing_authority_eia861"] = df
return tfr_dfs
def balancing_authority_assn(tfr_dfs):
"""
Compile a balancing authority, utility, state association table.
For the years up through 2012, the only BA-Util information that's
available comes from the balancing_authority_eia861 table, and it
does not include any state-level information. However, there is
utility-state association information in the sales_eia861 and
other data tables.
For the years from 2013 onward, there's explicit BA-Util-State
information in the data tables (e.g. sales_eia861). These observed
associations can be compiled to give us a picture of which
BA-Util-State associations exist. However, we need to merge in
the balancing authority IDs since the data tables only contain
the balancing authority codes.
Args:
tfr_dfs (dict): A dictionary of transformed EIA 861 dataframes.
This must include any dataframes from which we want to
compile BA-Util-State associations, which means this
function has to be called after all the basic transform
functions that depend on only a single raw table.
Returns:
dict: a dictionary of transformed dataframes. This function
both compiles the association table, and finishes the
normalization of the balancing authority table. It may be that
once the harvesting process incorporates the EIA 861, some or
all of this functionality should be pulled into the phase-2
transform functions.
"""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 BA-Util-State association table.")
# Helpful shorthand query strings....
early_years = "report_date<='2012-12-31'"
late_years = "report_date>='2013-01-01'"
early_dfs = [df.query(early_years) for df in data_dfs]
late_dfs = [df.query(late_years) for df in data_dfs]
# The old BA table lists utilities directly, but has no state information.
early_date_ba_util = _harvest_associations(
dfs=[tfr_dfs["balancing_authority_eia861"].query(early_years), ],
cols=["report_date",
"balancing_authority_id_eia",
"utility_id_eia"],
)
# State-utility associations are brought in from observations in data_dfs
early_date_util_state = _harvest_associations(
dfs=early_dfs,
cols=["report_date",
"utility_id_eia",
"state"],
)
early_date_ba_util_state = (
early_date_ba_util
.merge(early_date_util_state, how="outer")
.drop_duplicates()
)
# New BA table has no utility information, but has BA Codes...
late_ba_code_id = _harvest_associations(
dfs=[tfr_dfs["balancing_authority_eia861"].query(late_years), ],
cols=["report_date",
"balancing_authority_code_eia",
"balancing_authority_id_eia"],
)
# BA Code allows us to bring in utility+state data from data_dfs:
late_date_ba_code_util_state = _harvest_associations(
dfs=late_dfs,
cols=["report_date",
"balancing_authority_code_eia",
"utility_id_eia",
"state"],
)
# We merge on ba_code then drop it, b/c only BA ID exists in all years consistently:
late_date_ba_util_state = (
late_date_ba_code_util_state
.merge(late_ba_code_id, how="outer")
.drop("balancing_authority_code_eia", axis="columns")
.drop_duplicates()
)
tfr_dfs["balancing_authority_assn_eia861"] = (
pd.concat([early_date_ba_util_state, late_date_ba_util_state])
.dropna(subset=["balancing_authority_id_eia", ])
.astype({"utility_id_eia": pd.Int64Dtype()})
)
return tfr_dfs
def utility_assn(tfr_dfs):
"""Harvest a Utility-Date-State Association Table."""
# These aren't really "data" tables, and should not be searched for associations
non_data_dfs = [
"balancing_authority_eia861",
"service_territory_eia861",
]
# The dataframes from which to compile BA-Util-State associations
data_dfs = [tfr_dfs[table]
for table in tfr_dfs if table not in non_data_dfs]
logger.info("Building an EIA 861 Util-State-Date association table.")
tfr_dfs["utility_assn_eia861"] = _harvest_associations(
data_dfs, ["report_date", "utility_id_eia", "state"])
return tfr_dfs
def _harvest_associations(dfs, cols):
"""
Compile all unique, non-null combinations of values ``cols`` within ``dfs``.
Find all unique, non-null combinations of the columns ``cols`` in the dataframes
``dfs`` within records that are selected by ``query``. All of ``cols`` must be
present in each of the ``dfs``.
Args:
dfs (iterable of pandas.DataFrame): The DataFrames in which to search for
cols (iterable of str): Labels of columns for which to find unique, non-null
combinations of values.
Raises:
ValueError: if no associations for cols are found in dfs.
Returns:
pandas.DataFrame: A dataframe containing all the unique, non-null combinations
of values found in ``cols``.
"""
assn = pd.DataFrame()
for df in dfs:
if set(df.columns).issuperset(set(cols)):
assn = assn.append(df[cols])
assn = assn.dropna().drop_duplicates()
if assn.empty:
raise ValueError(
"These dataframes contain no associations for the columns: "
f"{cols}"
)
return assn
def normalize_balancing_authority(tfr_dfs):
"""
Finish the normalization of the balancing_authority_eia861 table.
The balancing_authority_assn_eia861 table depends on information that is only
available in the UN-normalized form of the balancing_authority_eia861 table, so
and also on having access to a bunch of transformed data tables, so it can compile
the observed combinations of report dates, balancing authorities, states, and
utilities. This means that we have to hold off on the final normalization of the
balancing_authority_eia861 table until the rest of the transform process is over.
"""
logger.info("Completing normalization of balancing_authority_eia861.")
ba_eia861_normed = (
tfr_dfs["balancing_authority_eia861"]
.loc[:, [
"report_date",
"balancing_authority_id_eia",
"balancing_authority_code_eia",
"balancing_authority_name_eia",
]]
.drop_duplicates(subset=["report_date", "balancing_authority_id_eia"])
)
# Make sure that there aren't any more BA IDs we can recover from later years:
ba_ids_missing_codes = (
ba_eia861_normed.loc[
ba_eia861_normed.balancing_authority_code_eia.isnull(),
"balancing_authority_id_eia"]
.drop_duplicates()
.dropna()
)
fillable_ba_codes = ba_eia861_normed[
(ba_eia861_normed.balancing_authority_id_eia.isin(ba_ids_missing_codes)) &
(ba_eia861_normed.balancing_authority_code_eia.notnull())
]
if len(fillable_ba_codes) != 0:
raise ValueError(
f"Found {len(fillable_ba_codes)} unfilled but fillable BA Codes!"
)
tfr_dfs["balancing_authority_eia861"] = ba_eia861_normed
return tfr_dfs
def sales(tfr_dfs):
"""Transform the EIA 861 Sales table."""
idx_cols = [
"utility_id_eia",
"state",
"report_date",
"balancing_authority_code_eia",
]
# Pre-tidy clean specific to sales table
raw_sales = (
tfr_dfs["sales_eia861"].copy()
.query("utility_id_eia not in (88888, 99999)")
)
###########################################################################
# Tidy Data:
###########################################################################
logger.info("Tidying the EIA 861 Sales table.")
tidy_sales, idx_cols = _tidy_class_dfs(
raw_sales,
df_name='Sales',
idx_cols=idx_cols,
class_list=pc.CUSTOMER_CLASSES,
class_type='customer_class',
)
# remove duplicates on the primary key columns + customer_class -- there
# are lots of records that have reporting | |
dparam in pdC]
else:
# Don't kill gradients
dC = pdC
# Set class attributes and return
self.C, self.dC, self.L = C, dC, L
return C, dC, L
# Method to get updates
def getupdates(self, cost=None, gradients=None, method='sgd', **kwargs):
"""
:type cost: theano.tensor.var.TensorVariable
:param cost: Cost scalar
:type gradients: list
:param gradients: List of gradients w.r.t. the corresponding element in the list of parameters
:type method: str or callable
:param method: Method for weight update. If callable, should take (params, cost, gradient), in that order.
:type kwargs: dict
:param kwargs: Extra arguments for method (if any)
"""
# Parse cost and gradient
if cost is None:
cost = self.C
if gradients is None:
gradients = self.dC
# Make sure there are no ghost variables lurking in the parameter list
assert not any([isinstance(param, netutils.ghostvar) for param in self.params]), \
"Uninstantiated ghost variables found in the parameter list. Run feedforward() or cost() method first."
if method in ['sgd', 'stochastic gradient descent']:
self.updates = nt.sgd(self.params, cost=cost, gradients=gradients,
learningrate=kwargs["learningrate"] if "learningrate" in kwargs.keys() else None)
else:
# This allows method to be a function name string from the netrain py file.
try:
if isinstance(method, str):
self.updates = netutils.smartfunc(getattr(nt, method))(params=self.params, cost=cost,
gradients=gradients, **kwargs)
elif callable(method):
self.updates = netutils.smartfunc(method)(self.params, cost=cost, gradients=gradients, **kwargs)
else:
raise NotImplementedError("Update method evaluation failed.")
except AttributeError:
raise NotImplementedError("Update method {} not implemented.".format(method))
# Append update requests
self.updates += self.updaterequests
# Return
return self.updates
# Method to compile model functions
def compile(self, what='trainer', isautoencoder=None, fetchgrads=True, extrarguments={}, compilekwargs=None):
"""
:type what: str
:param what: Compile what? Possible keys: "trainer", "inference", "error"
:type isautoencoder: bool
:param isautoencoder: Whether to compile an autoencoder.
:type fetchgrads: bool
:param fetchgrads: Whether to fetch gradients. Can be laggy for a lot of parameters in the gradient.
:type extrarguments: dict
:param extrarguments: Extra arguments to the compiled function.
- Keys must be theano variables, and
- Values must be generators, i.e. contain a next() method OR a callable(costlog, losslog)
Optional: restartgenerator() method to unwind generators when a new epoch begins.
"""
if isautoencoder is None:
isautoencoder = self.isautoencoder
if compilekwargs is None:
compilekwargs = {}
if what is "trainer" or what is "all":
# Compile classifier trainer
if not isautoencoder:
# Compile
classifiertrainer = th.function(inputs=pyk.obj2list(self.x) + pyk.obj2list(self.yt) +
extrarguments.keys(),
outputs=[self.C, self.L, self.E] + (self.dC if fetchgrads else []),
updates=self.updates,
allow_input_downcast=True,
on_unused_input='warn', **compilekwargs)
self.classifiertrainer = classifiertrainer
return classifiertrainer
else:
# Compile autoencoder trainer
autoencodertrainer = th.function(inputs=pyk.obj2list(self.x) + extrarguments.keys(),
outputs=[self.C, self.L, self.E] + (self.dC if fetchgrads else []),
updates=self.updates,
allow_input_downcast=True,
on_unused_input='warn', **compilekwargs)
self.autoencodertrainer = autoencodertrainer
return autoencodertrainer
if what is "inference" or what is "all":
if not isautoencoder:
classifier = th.function(inputs=pyk.obj2list(self.x),
outputs=self.y,
allow_input_downcast=True,
on_unused_input='warn')
self.classifier = classifier
return classifier
else:
autoencoder = th.function(inputs=pyk.obj2list(self.x),
outputs=self.xr,
allow_input_downcast=True,
on_unused_input='warn')
self.autoencoder = autoencoder
return autoencoder
if what is "error" or what is "all":
if not isautoencoder:
classifiererror = th.function(inputs=pyk.obj2list(self.x) + pyk.obj2list(self.yt),
outputs=self.E,
allow_input_downcast=True,
on_unused_input='warn')
self.classifiererror = classifiererror
return classifiererror
else:
reconstructionerror = th.function(inputs=pyk.obj2list(self.x),
outputs=self.E,
allow_input_downcast=True,
on_unused_input='warn')
self.reconstructionerror = reconstructionerror
return reconstructionerror
# Method to train the model
def fit(self, trX, trY=None, numepochs=100, maxiter=np.inf, verbosity=0, progressbarunit=1,
vaX=None, vaY=None, validateevery=None, recompile=False, nanguard=True, extrarguments={}, circuitX=None,
circuitY=None, trainmonitors=None, validatemonitors=None, backupparams=True, backupbestparams=True):
"""
:type trX: generator
:param trX: Generator for the training X data (i.e. images). Must have a next() and restartgenerator() method.
:type trY: generator
:param trY: Generator for the training Y data (i.e. labels). Must have a next() and restartgenerator() method.
Omit to train an autoencoder, or set to -1 if generator trX.next() returns both X and Y batches
:type numepochs: int
:param numepochs: int
:type verbosity: int
:param verbosity: Verbosity. 0 for silent execution, 4 for full verbosity.
:type progressbarunit: int
:param progressbarunit: Print training progress every `progressbarunit` iterations.
:type vaX: generator
:param vaX: Generator for the validation X data (i.e. images). Must have a next() method.
:type vaY: generator
:param vaY: Generator for the validation X data (i.e. images). Must have a next() method.
:type validateevery: int
:param validateevery: Validate every validateevery iteration.
:type recompile: bool
:param recompile: Whether to recompile trainer functions even if a precompiled version is available in the cache
:type nanguard: bool
:param nanguard: Breaks training loop if cost or loss is found to be NaN.
:type extrarguments: dict
:param extrarguments: Extra arguments to the compiled function.
- Keys must be theano variables, and
- Values must be generators, i.e. contain a next() method OR a callable(costlog, losslog)
Optional: restartgenerator() method to unwind generators when a new epoch begins.
:type trainmonitors: list of callables
:param trainmonitors: Training Monitors (see netools for a few pre-implemented monitors)
:type validatemonitors: list of callables
:param validatemonitors: Validation Monitors (see netools for a few pre-implemented monitors)
:type backupparams: bool or int
:param backupparams: Whether to backup parameters every time a better set of parameters are found (if bool).
If int, save parameters every backupparams iterations.
:type backupbestparams: bool
:param backupbestparams: Whether to backup best set of parameters
"""
# This function will:
# Compile a trainer function if not precompiled
# Run trainer batch-wise
# Check if inputs are correct
assert trY is None or trY is -1 or hasattr(trY, "next"), "trY must have a next method or equal -1 if Y batch" \
" is returned by the next() method of trX."
assert hasattr(trX, "next"), "trX must have a next method."
assert vaY is None or vaY is -1 or hasattr(vaY, "next"), "vaY must have a next method or equal -1 if Y batch" \
" is returned by the next() method of vaX."
assert hasattr(vaX, "next") or vaX is None, "vaX must have a next method."
# Confirm that the model is an autoencoder if trY is not given
assert trY is not None or self.isautoencoder, "Training targets are required for non-autoencoding networks."
# Confirm if all theano graphs linked.
assert None not in [self.C.owner, self.L.owner], "Select cost function with the cost() method before fitting."
assert None not in [y.owner for y in pyk.obj2list(self.y)] if not self.isautoencoder else \
None not in [xr.owner for xr in pyk.obj2list(self.xr)] is not None, \
"Theano graph is not built correctly. Consider calling feedforward() or decoderfeedforward() followed " \
"by cost()."
# Check if monitors are valid
assert trainmonitors is None or all([callable(monitor) for monitor in trainmonitors]), \
"Training Monitors must be callable functions."
assert validatemonitors is None or all([callable(monitor) for monitor in validatemonitors]), \
"Validation Monitors must be callable functions."
if verbosity >= 2:
print("Tests passed. Training {} (ID: {})...".format("Autoencoder" if self.isautoencoder else "Classifier",
id(self)))
# Parse Monitors
if trainmonitors is None:
trainmonitors = [ntl.batchmonitor, ntl.costmonitor, ntl.lossmonitor, ntl.trEmonitor, ntl.gradnormmonitor,
ntl.updatenormmonitor] \
if verbosity >= 4 else \
[ntl.batchmonitor, ntl.costmonitor, ntl.lossmonitor] \
if verbosity >= 3 else \
[ntl.batchmonitor] \
if verbosity >= 2 else []
if validatemonitors is None:
validatemonitors = [ntl.batchmonitor, ntl.costmonitor, ntl.lossmonitor, ntl.vaEmonitor] \
if verbosity >= 4 else \
[ntl.batchmonitor, ntl.costmonitor, ntl.lossmonitor] \
if verbosity >= 3 else \
[ntl.batchmonitor] \
if verbosity >= 2 else []
# Link error variable if not done already
if self.E.owner is None:
self.error()
# Compile trainers
if (self.classifiertrainer is None and not self.isautoencoder) or (self.autoencodertrainer is None
and self.isautoencoder) or recompile:
if verbosity >= 1:
print("Compiling Trainer...")
self.compile(what="trainer", extrarguments=extrarguments)
# Set up validation
validate = not(validateevery is None or validateevery == 0 or (vaY is None and not self.isautoencoder)
or vaX is None)
# UI
if verbosity >= 2:
print("Validation Status: {}...".format("Active" if validate else "Inactive"))
# Check if validation functions compiled
if self.classifiererror is None and not self.isautoencoder and validate:
if verbosity >= 1:
print("Compiling Validation Function...")
self.compile(what="error", isautoencoder=False, extrarguments=extrarguments)
if self.reconstructionerror is None and self.isautoencoder and validate:
if verbosity >= 1:
print("Compiling Validation Function...")
# Compile
self.compile(what="error", isautoencoder=True, extrarguments=extrarguments)
# Loop variables
numiter = 0
costlog = []
losslog = []
errorlog = []
validationlog = []
skipvalidation = False
skipepoch = False
bestparams = []
| |
<reponame>WindRiver-Labs/wrlinux<filename>wrlinux-ovp/recipes-extended/system-report/files/systemReporter.py
#!/usr/bin/env python
###############################################################################
#
# Copyright (C) 2013 Wind River Systems, Inc.
#
# This code is licensed under the GPLv2.
#
###############################################################################
import sys
import getopt
import subprocess
import imp
import string
import os
import re
import random
# global variables section
vdsmPresent = True
vdsmPath = "/usr/share/vdsm"
cpuinfoFile = "/proc/cpuinfo"
meminfoFile = "/proc/meminfo"
versionFile = "/proc/version"
modulesFile = "/proc/modules"
# check for the existence of vdsm on the current system. if exists, then import
# the required utility classes
if os.path.exists(vdsmPath):
vdsmPresent = True
sys.path.append(vdsmPath)
from vdsm import utils
import caps
else:
vdsmPresent = False
# this method is used to print a simple centered headline to the console.
# the input parameter is stripped (spaces, newlines from the end), and will be
# centered between * characters.
def printReportHeader(headline, skipNewLine = 1):
headline = headline.rstrip()
headline = headline.lstrip()
headline = " " + headline + " "
lineToPrint = string.center(headline, 80, "*")
if skipNewLine != 0 :
lineToPrint = "\n" + lineToPrint
print lineToPrint
sys.stdout.flush()
################################################################################
### PROC filesystem related methods
################################################################################
# this method opens the /proc/cpuinfo file, and extracts the cpu related info
# from it. returns a hash, which will be in respect with the cpu-related info
# provided by the VDSM caps.py tool.
def getProcCpuInfo():
retVal = {}
retVal["cpuModel"] = ""
retVal["cpuSpeed"] = ""
retVal["cpuSockets"] = ""
retVal["cpuCores"] = ""
retVal["cpuThreads"] = ""
retVal["cpuFlags"] = ""
numProcs = 0
numSiblings = 0
try:
proc_cpu_info = open(cpuinfoFile, "r")
except:
sys.stderr.write("Unable to open " + cpuinfoFile + " file!")
return retVal
for line in proc_cpu_info:
# CPU model name
match = re.match("^model\s+name\s+\:\s(.*)$", line)
if match:
retVal["cpuModel"] = match.group(1)
# CPU speed
match = re.match("^cpu\s+MHz\s+\:\s(.*)$", line)
if match:
retVal["cpuSpeed"] = match.group(1) + "MHz"
# CPU cores
match = re.match("^cpu\s+cores\s+\:\s+(.*)$", line)
if match:
retVal["cpuCores"] = match.group(1)
# CPU Threads (siblings)
match = re.match("^siblings\s+\:\s+(.*)$", line)
if match:
retVal["cpuThreads"] = match.group(1)
numSiblings = match.group(1)
# CPU flags (flags)
match = re.match("^flags\s+\:\s+(.*)$", line)
if match:
retVal["cpuFlags"] = match.group(1)
# CPU count
if re.match("^processor\s+\:\s+(.*)$", line):
numProcs += 1
# CPU sockets (socket = numProcs/numSiblings)
try:
retVal["cpuSockets"] = str(numProcs / int(numSiblings))
except:
retVal["cpuSockets"] = "0"
proc_cpu_info.close()
return retVal
# reads the /proc/meminfo file and extracts various information from it.
def getProcMemInfo():
retVal = {"memTotal":"", "memFree":"", "swapTotal":"", "swapFree":""}
try:
mem_info_file = open(meminfoFile, "r")
except:
sys.stderr.write("Unable to open " + meminfoFile + " file!")
return retVal
for line in mem_info_file:
# MemTotal
match = re.match("^MemTotal\:\s+(.*)$", line)
if match:
retVal["memTotal"] = match.group(1)
# MemFree
match = re.match("^MemFree\:\s+(.*)$", line)
if match:
retVal["memFree"] = match.group(1)
# SwapTotal
match = re.match("^SwapTotal\:\s+(.*)$", line)
if match:
retVal["swapTotal"] = match.group(1)
# SwapFree
match = re.match("^SwapFree\:\s+(.*)$", line)
if match:
retVal["swapFree"] = match.group(1)
mem_info_file.close()
return retVal;
# reads the /proc/modules file, and the /proc/version file.
# returns in a hash, the kernel version and release, and the loaded modules.
def getProcModules():
retVal = {"version":"", "release":"", "modules":""}
# obtain the kernel version and release information.
try:
kernel_file = open(versionFile, "r")
except:
sys.stderr.write("Unable to open " + versionFile + " file!")
return retVal
kernel_line = kernel_file.readline()
kernel_file.close()
match = re.match("^Linux\sversion\s(.*?)\s(.*)$", kernel_line)
if match:
kernel_info = match.group(1).split("-", 1)
retVal["version"] = kernel_info[0]
retVal["release"] = kernel_info[1]
# now process the list of modules
try:
modules_file = open(modulesFile, "r")
except:
sys.stderr.write("Unable to open " + modulesFile + " file!")
return retVal
tempList = []
for line in modules_file:
match = re.match("^(.*?)\s+(.*)$", line)
if match:
tempList.append(match.group(1))
tempList.sort()
retVal["modules"] = ', '.join(tempList)
modules_file.close()
return retVal
################################################################################
### DMIDECODE related methods
################################################################################
dmiDecodeValues = {
'BIOS Information' : {
'Vendor:' : '',
'Version:': '',
'Release Date:' : '',
'BIOS Revision:' : ''
},
'System Information' : {
'Manufacturer:' : '',
'Product Name:' : '',
'Version:' : '',
'Serial Number:' : ''
},
'Built-in Pointing Device' : {
'Type:' : '',
'Buttons:' : '',
'Interface:' : ''
},
'Base Board Information' : {
'Manufacturer:' : '',
'Product Name:' : '',
'Serial Number:' : '',
},
'BIOS Language Information' : {
'Currently Installed Language:' : ''
},
}
dmiLines = ""
def processDmiDecodeValues():
# create a random named file, and gather dmidecode returned lines into it
char_set = string.ascii_uppercase + string.digits
tmpFileName = "/tmp/" + ''.join(random.sample(char_set*6,6))
try:
tmpFile = open(tmpFileName, "w")
except:
print "Cannot create file: " + tmpFileName
return
command = "dmidecode"
# execute dmidecode
try:
subprocess.check_call(command , shell=True, stderr=subprocess.STDOUT, universal_newlines=True, stdout=tmpFile)
except subprocess.CalledProcessError:
print "Error calling dmidecode!"
return
# close the file to flush the subprocess output into it,
# and open it for readin
tmpFile.close()
try:
tmpFile = open(tmpFileName, "r+")
except:
print "Cannot open file " + tmpFileName
return
# process each line....
# The processing logic is the following:
# getting from the dmiDecodeValues hash, the keys, and lookup that
# key in the file.
for dmiK1, dmiV1 in dmiDecodeValues.iteritems():
tmpFile.seek(0,0) # rewind to position 0
line = tmpFile.readline()
while line:
match = re.match("^"+dmiK1+"$", line)
if match:
# now we have a match, we found the requested category, so
# we need to lookup the additional information(s)
# get the current file position to be able to rewind to it
masterKeyFilePosition = tmpFile.tell()
# now lookup the next key in the following lines...
for dmiK2, dmiV2 in dmiV1.iteritems():
# seek for the master key position
tmpFile.seek(masterKeyFilePosition, 0)
# read ahead and search for the sub-key
nextLine = tmpFile.readline()
while nextLine:
subMatch = re.match("^\s+"+dmiK2+"(.*)$", nextLine)
if subMatch:
dmiDecodeValues[dmiK1][dmiK2] = subMatch.group(1)
nextLine = None
break
# go to the next line
nextLine = tmpFile.readline()
# read the next line, in search for master key
line = tmpFile.readline()
tmpFile.close()
os.remove(tmpFileName)
# return from the method!
# this method prints out a hash of hash-es...
def printDmiDecodeValues(dmiDecVal, headline, footline, prefix):
printReportHeader(headline)
for dmiK1,dmiV1 in dmiDecVal.iteritems():
print "\n" + prefix + dmiK1
for dmiK2,dmiV2 in dmiV1.iteritems():
print " " + prefix + dmiK2 + " " + dmiV2
printReportHeader(footline, 0)
################################################################################
### lspci - data processing
################################################################################
# just a simple helper method, to read a single line from a file, and
# try to match a given regex on that line. returns the regex first field.
def readHelper(fileToReadFrom, regexPart):
retV = ""
line = fileToReadFrom.readline()
match = re.match("^"+regexPart+":(.*)$", line)
if match:
retV = match.group(1)
return retV
# runs and captures the lspci command output into a temporary file.
# after that processes the file, and returns a list of elements;
# the elements are simple hashes, which contains the Slot, Class, Vendor and
# Device information about a listed PCI device.
def getPciDevicesList():
retVal = []
# create a random named file, and gather lspic returned lines into it
char_set = string.ascii_uppercase + string.digits
tmpFileName = "/tmp/" + ''.join(random.sample(char_set*6,6))
try:
tmpFile = open(tmpFileName, "w")
except:
print "Cannot create file " + tmpFileName
return retVal
command = "lspci -mm -vvv -D"
# execute lspci
subprocess.check_call(command , shell=True, stderr=subprocess.STDOUT, universal_newlines=True, stdout=tmpFile)
# close the file to flush the subprocess output into it,
# and open it for readin
tmpFile.close()
try:
tmpFile = open(tmpFileName, "r+")
except:
print "Cannot open file " + tmpFileName
return retVal
line = tmpFile.readline()
while line:
match = re.match("^Slot:\s+(.*)$", line)
if match:
pciDevice = {'Slot':'', 'Class':'', 'Vendor':'', 'Device':''}
pciDevice["Slot"] = match.group(1)
# now read ahead to gather the rest of the lines...
pciDevice["Class"] = readHelper(tmpFile, "Class")
pciDevice["Vendor"] = readHelper(tmpFile, "Vendor")
pciDevice["Device"] = readHelper(tmpFile, "Device")
retVal.append(pciDevice)
# read ahead, is not a line in which we have interest
line = tmpFile.readline()
tmpFile.close()
os.remove(tmpFileName)
return retVal
# return from the getPciDeviceList() method
# prints the PCI device list informations.
def printPciDevicesList(pciDevices):
printReportHeader("PCI Devices Information")
for pciDev in pciDevices:
print "\nPCI - Slot: " + pciDev["Slot"]
print "PCI - Device: " + pciDev["Device"]
print "PCI - Class: " + pciDev["Class"]
print "PCI - Vendor: " + pciDev["Vendor"]
printReportHeader("PCI Devices Info - Done", 0)
################################################################################
### HWLOC-info related data gathering
################################################################################
# prints localization topology data gathered using the hwloc-info package
def printHwlocInfoData():
printReportHeader("HWLOC-INFO Topology Information")
cmdLine = "lstopo"
subprocess.call(cmdLine, shell=True)
printReportHeader("HWLOC-INFO - Done")
################################################################################
### Kernel config options gathering....
################################################################################
# test if the /proc/config.gz file exists. if yes, then it will be read
# zcat /proc/config.gz, and the output will be shown between the
# tool header and footer....
def kernelConfigGathering():
configFile = "/proc/config.gz"
printReportHeader("KERNEL Configuration Information")
if not os.path.exists(configFile):
print "KERNEL Configuration Information - NOT PRESENT!"
else:
subprocess.call("zcat | |
0
#sometimes the HTTPS connection gets rejected/terminated
except requests.exceptions.ConnectionError:
logger.warning(f'GQ >>> Connection error encountered for {product_id}.')
return 0
except:
logger.critical('GQ >>> Processing has failed!')
#uncomment for debugging purposes only
#logger.error(traceback.format_exc())
return 0
def gog_files_extract_parser(db_connection, product_id):
db_cursor = db_connection.execute('SELECT gp_int_json_payload FROM gog_products WHERE gp_id = ?', (product_id, ))
json_payload = db_cursor.fetchone()[0]
json_parsed = json.loads(json_payload, object_pairs_hook=OrderedDict)
#extract installer entries
json_parsed_installers = json_parsed['downloads']['installers']
#extract patch entries
json_parsed_patches = json_parsed['downloads']['patches']
#extract language_packs entries
json_parsed_language_packs = json_parsed['downloads']['language_packs']
#extract bonus_content entries
json_parsed_bonus_content = json_parsed['downloads']['bonus_content']
#process installer entries
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? '
'AND gf_int_download_type = "installer" AND gf_int_removed IS NULL', (product_id,))
listed_installer_pks = [pk_result[0] for pk_result in db_cursor.fetchall()]
for installer_entry in json_parsed_installers:
installer_id = installer_entry['id']
installer_product_name = installer_entry['name'].strip()
installer_os = installer_entry['os']
installer_language = installer_entry['language']
try:
installer_version = installer_entry['version'].strip()
except AttributeError:
installer_version = None
installer_total_size = installer_entry['total_size']
for installer_file in installer_entry['files']:
installer_file_id = installer_file['id']
installer_file_size = installer_file['size']
if installer_version is not None:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "installer" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version = ? AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, installer_id, installer_os, installer_language, installer_version, installer_file_id, installer_file_size))
else:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "installer" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version IS NULL AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, installer_id, installer_os, installer_language, installer_file_id, installer_file_size))
entry_pk = db_cursor.fetchone()
if entry_pk is None:
#gf_int_nr, gf_int_added, gf_int_removed, gf_int_id, gf_int_download_type,
#gf_id, gf_name, gf_os, gf_language, gf_version,
#gf_type, gf_count, gf_total_size, gf_file_id, gf_file_size
db_cursor.execute(INSERT_FILES_QUERY, (None, datetime.now(), None, product_id, 'installer',
installer_id, installer_product_name, installer_os, installer_language, installer_version,
None, None, installer_total_size, installer_file_id, installer_file_size))
#no need to print the os here, as it's included in the installer_id
logger.info(f'FQ +++ Added DB entry for {product_id}: {installer_product_name}, {installer_id}, {installer_version}.')
else:
logger.debug(f'FQ >>> Found an existing entry for {product_id}: {installer_product_name}, {installer_id}, {installer_version}.')
listed_installer_pks.remove(entry_pk[0])
if len(listed_installer_pks) > 0:
for removed_pk in listed_installer_pks:
db_cursor.execute('UPDATE gog_files SET gf_int_removed = ? WHERE gf_int_nr = ? AND gf_int_removed IS NULL', (datetime.now(), removed_pk))
logger.info(f'FQ --- Marked some installer entries as removed for {product_id}')
#process patch entries
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? '
'AND gf_int_download_type = "patch" AND gf_int_removed IS NULL', (product_id,))
listed_patch_pks = [pk_result[0] for pk_result in db_cursor.fetchall()]
for patch_entry in json_parsed_patches:
patch_id = patch_entry['id']
patch_product_name = patch_entry['name'].strip()
patch_os = patch_entry['os']
patch_language = patch_entry['language']
try:
patch_version = patch_entry['version'].strip()
except AttributeError:
patch_version = None
#replace blank patch version with None (blanks happens with patches, but not with installers)
if patch_version == '': patch_version = None
patch_total_size = patch_entry['total_size']
for patch_file in patch_entry['files']:
patch_file_id = patch_file['id']
patch_file_size = patch_file['size']
if patch_version is not None:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "patch" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version = ? AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, patch_id, patch_os, patch_language, patch_version, patch_file_id, patch_file_size))
else:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "patch" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version IS NULL AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, patch_id, patch_os, patch_language, patch_file_id, patch_file_size))
entry_pk = db_cursor.fetchone()
if entry_pk is None:
#gf_int_nr, gf_int_added, gf_int_removed, gf_int_id, gf_int_download_type,
#gf_id, gf_name, gf_os, gf_language, gf_version,
#gf_type, gf_count, gf_total_size, gf_file_id, gf_file_size
db_cursor.execute(INSERT_FILES_QUERY, (None, datetime.now(), None, product_id, 'patch',
patch_id, patch_product_name, patch_os, patch_language, patch_version,
None, None, patch_total_size, patch_file_id, patch_file_size))
#no need to print the os here, as it's included in the patch_id
logger.info(f'FQ +++ Added DB entry for {product_id}: {patch_product_name}, {patch_id}, {patch_version}.')
else:
logger.debug(f'FQ >>> Found an existing entry for {product_id}: {patch_product_name}, {patch_id}, {patch_version}.')
listed_patch_pks.remove(entry_pk[0])
if len(listed_patch_pks) > 0:
for removed_pk in listed_patch_pks:
db_cursor.execute('UPDATE gog_files SET gf_int_removed = ? WHERE gf_int_nr = ? AND gf_int_removed IS NULL', (datetime.now(), removed_pk))
logger.info(f'FQ --- Marked some patch entries as removed for {product_id}')
#process language_packs entries
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? '
'AND gf_int_download_type = "language_packs" AND gf_int_removed IS NULL', (product_id,))
listed_language_packs_pks = [pk_result[0] for pk_result in db_cursor.fetchall()]
for language_pack_entry in json_parsed_language_packs:
language_pack_id = language_pack_entry['id']
language_pack_product_name = language_pack_entry['name'].strip()
language_pack_os = language_pack_entry['os']
language_pack_language = language_pack_entry['language']
try:
language_pack_version = language_pack_entry['version'].strip()
except AttributeError:
language_pack_version = None
language_pack_total_size = language_pack_entry['total_size']
for language_pack_file in language_pack_entry['files']:
language_pack_file_id = language_pack_file['id']
language_pack_file_size = language_pack_file['size']
if language_pack_version is not None:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "language_packs" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version = ? AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, language_pack_id, language_pack_os, language_pack_language, language_pack_version,
language_pack_file_id, language_pack_file_size))
else:
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "language_packs" AND gf_id = ? '
'AND gf_os = ? AND gf_language = ? AND gf_version IS NULL AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, language_pack_id, language_pack_os, language_pack_language,
language_pack_file_id, language_pack_file_size))
entry_pk = db_cursor.fetchone()
if entry_pk is None:
#gf_int_nr, gf_int_added, gf_int_removed, gf_int_id, gf_int_download_type, gf_id,
#gf_name, gf_os, gf_language, gf_version,
#gf_type, gf_count, gf_total_size, gf_file_id, gf_file_size
db_cursor.execute(INSERT_FILES_QUERY, (None, datetime.now(), None, product_id, 'language_packs', language_pack_id,
language_pack_product_name, language_pack_os, language_pack_language, language_pack_version,
None, None, language_pack_total_size, language_pack_file_id, language_pack_file_size))
#no need to print the os here, as it's included in the patch_id
logger.info(f'FQ +++ Added DB entry for {product_id}: {language_pack_product_name}, {language_pack_id}, {language_pack_version}.')
else:
logger.debug(f'FQ >>> Found an existing entry for {product_id}: {language_pack_product_name}, {language_pack_id}, {language_pack_version}.')
listed_language_packs_pks.remove(entry_pk[0])
if len(listed_language_packs_pks) > 0:
for removed_pk in listed_language_packs_pks:
db_cursor.execute('UPDATE gog_files SET gf_int_removed = ? WHERE gf_int_nr = ? AND gf_int_removed IS NULL', (datetime.now(), removed_pk))
logger.info(f'FQ --- Marked some language_pack entries as removed for {product_id}')
#process bonus_content entries
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? '
'AND gf_int_download_type = "bonus_content" AND gf_int_removed IS NULL', (product_id,))
listed_bonus_content_pks = [pk_result[0] for pk_result in db_cursor.fetchall()]
for bonus_content_entry in json_parsed_bonus_content:
bonus_content_id = bonus_content_entry['id']
bonus_content_product_name = bonus_content_entry['name'].strip()
#bonus content type 'guides & reference ' has a trailing space
bonus_content_type = bonus_content_entry['type'].strip()
bonus_content_count = bonus_content_entry['count']
bonus_content_total_size = bonus_content_entry['total_size']
for bonus_content_file in bonus_content_entry['files']:
bonus_content_file_id = bonus_content_file['id']
bonus_content_file_size = bonus_content_file['size']
db_cursor.execute('SELECT gf_int_nr FROM gog_files WHERE gf_int_id = ? AND gf_int_download_type = "bonus_content" AND gf_id = ? '
'AND gf_type = ? AND gf_count = ? AND gf_file_id = ? AND gf_file_size = ? AND gf_int_removed IS NULL',
(product_id, bonus_content_id, bonus_content_type, bonus_content_count, bonus_content_file_id, bonus_content_file_size))
entry_pk = db_cursor.fetchone()
if entry_pk is None:
#gf_int_nr, gf_int_added, gf_int_removed, gf_int_id, gf_int_download_type,
#gf_id, gf_name, gf_os, gf_language, gf_version,
#gf_type, gf_count, gf_total_size, gf_file_id, gf_file_size
db_cursor.execute(INSERT_FILES_QUERY, (None, datetime.now(), None, product_id, 'bonus_content',
bonus_content_id, bonus_content_product_name, None, None, None,
bonus_content_type, bonus_content_count, bonus_content_total_size,
bonus_content_file_id, bonus_content_file_size))
#print the entry type, since bonus_content entries are not versioned
logger.info(f'FQ +++ Added DB entry for {product_id}: {bonus_content_product_name}, {bonus_content_id}, {bonus_content_type}.')
else:
logger.debug(f'FQ >>> Found an existing entry for {product_id}: {bonus_content_product_name}, {bonus_content_id}, {bonus_content_type}.')
listed_bonus_content_pks.remove(entry_pk[0])
if len(listed_bonus_content_pks) > 0:
for removed_pk in listed_bonus_content_pks:
db_cursor.execute('UPDATE gog_files SET gf_int_removed = ? WHERE gf_int_nr = ? AND gf_int_removed IS NULL', (datetime.now(), removed_pk))
logger.info(f'FQ --- Marked some bonus_content entries as removed for {product_id}')
#batch commit
db_connection.commit()
def gog_products_bulk_query(product_id, scan_mode, session, db_connection):
#generate a string of comma separated ids in the current batch
product_ids_string = ','.join([str(product_id_value) for product_id_value in range(product_id, product_id + IDS_IN_BATCH)])
logger.debug(f'BQ >>> Processing the following product_id string batch: {product_ids_string}.')
bulk_products_url = f'https://api.gog.com/products?ids={product_ids_string}'
try:
response = session.get(bulk_products_url, timeout=HTTP_TIMEOUT)
logger.debug(f'BQ >>> HTTP response code: {response.status_code}.')
if response.status_code == 200 and response.text != '[]':
logger.info(f'BQ >>> Found something in the {product_id} <-> {product_id + IDS_IN_BATCH - 1} range...')
json_parsed = json.loads(response.text, object_pairs_hook=OrderedDict)
| |
# Copyright 2016 Brocade Communications System, Inc.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from keystoneauth1 import exceptions
from keystoneauth1 import identity
from keystoneauth1.identity import v3
from keystoneauth1 import session
from neutronclient.common import exceptions as nc_exceptions
from neutronclient.v2_0 import client as neutron_client
from oslo_config import cfg
from oslo_log import log as logging
from tacker._i18n import _
from tacker.common import log
from tacker import context as t_context
from tacker.extensions import nfvo
from tacker.keymgr import API as KEYMGR_API
from tacker.mistral import mistral_client
from tacker.nfvo.drivers.vim import abstract_vim_driver
from tacker.nfvo.drivers.vnffg import abstract_vnffg_driver
from tacker.nfvo.drivers.workflow import workflow_generator
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker.plugins.common import constants
from tacker.vnfm import keystone
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
OPTS = [cfg.StrOpt('openstack', default='/etc/tacker/vim/fernet_keys',
help='Dir.path to store fernet keys.'),
cfg.BoolOpt('use_barbican', default=False,
help=_('Use barbican to encrypt vim password if True, '
'save vim credentials in local file system '
'if False'))
]
# same params as we used in ping monitor driver
OPENSTACK_OPTS = [
cfg.StrOpt('count', default='1',
help=_('Number of ICMP packets to send')),
cfg.StrOpt('timeout', default='1',
help=_('Number of seconds to wait for a response')),
cfg.StrOpt('interval', default='1',
help=_('Number of seconds to wait between packets'))
]
cfg.CONF.register_opts(OPTS, 'vim_keys')
cfg.CONF.register_opts(OPENSTACK_OPTS, 'vim_monitor')
_VALID_RESOURCE_TYPES = {'network': {'client': neutron_client.Client,
'cmd': 'list_networks',
'vim_res_name': 'networks',
'filter_attr': 'name'
}
}
FC_MAP = {'name': 'name',
'description': 'description',
'eth_type': 'ethertype',
'ip_src_prefix': 'source_ip_prefix',
'ip_dst_prefix': 'destination_ip_prefix',
'source_port_min': 'source_port_range_min',
'source_port_max': 'source_port_range_max',
'destination_port_min': 'destination_port_range_min',
'destination_port_max': 'destination_port_range_max',
'network_src_port_id': 'logical_source_port',
'network_dst_port_id': 'logical_destination_port'}
CONNECTION_POINT = 'connection_points'
SFC_ENCAP = 'sfc_encap'
def config_opts():
return [('vim_keys', OPTS), ('vim_monitor', OPENSTACK_OPTS)]
class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
abstract_vnffg_driver.VnffgAbstractDriver):
"""Driver for OpenStack VIM
OpenStack driver handles interactions with local as well as
remote OpenStack instances. The driver invokes keystone service for VIM
authorization and validation. The driver is also responsible for
discovering placement attributes such as regions, availability zones
"""
def __init__(self):
self.keystone = keystone.Keystone()
self.keystone.create_key_dir(CONF.vim_keys.openstack)
def get_type(self):
return 'openstack'
def get_name(self):
return 'OpenStack VIM Driver'
def get_description(self):
return 'OpenStack VIM Driver'
def authenticate_vim(self, vim_obj):
"""Validate VIM auth attributes
Initialize keystoneclient with provided authentication attributes.
"""
verify = 'True' == vim_obj['auth_cred'].get('cert_verify', 'True') \
or False
auth_url = vim_obj['auth_url']
keystone_version = NfvoPlugin.validate_keystone_auth_url(
auth_url=auth_url,
verify=verify)
auth_cred = self._get_auth_creds(vim_obj, keystone_version)
return self._initialize_keystone(auth_cred)
def _get_auth_creds(self, vim_obj, keystone_version):
auth_cred = vim_obj['auth_cred']
vim_project = vim_obj['vim_project']
auth_cred['project_id'] = vim_project.get('id')
auth_cred['project_name'] = vim_project.get('name')
auth_cred['project_domain_name'] = vim_project.get(
'project_domain_name')
auth_cred['auth_url'] = vim_obj['auth_url']
if keystone_version not in auth_cred['auth_url']:
auth_cred['auth_url'] = auth_cred['auth_url'] + '/' + \
keystone_version
return auth_cred
def _get_auth_plugin(self, **kwargs):
auth_plugin = v3.Password(**kwargs)
return auth_plugin
def _initialize_keystone(self, auth):
ks_client = self.keystone.initialize_client(**auth)
return ks_client
def _find_regions(self, ks_client):
region_info = ks_client.regions.list()
region_list = [region.id for region in region_info]
return region_list
def discover_placement_attr(self, vim_obj, ks_client):
"""Fetch VIM placement information
Attributes can include regions, AZ.
"""
try:
regions_list = self._find_regions(ks_client)
except (exceptions.Unauthorized, exceptions.BadRequest) as e:
LOG.warning("Authorization failed for user")
raise nfvo.VimUnauthorizedException(message=e.message)
vim_obj['placement_attr'] = {'regions': regions_list}
return vim_obj
@log.log
def register_vim(self, vim_obj):
"""Validate and set VIM placements."""
if 'key_type' in vim_obj['auth_cred']:
vim_obj['auth_cred'].pop('key_type')
if 'secret_uuid' in vim_obj['auth_cred']:
vim_obj['auth_cred'].pop('secret_uuid')
ks_client = self.authenticate_vim(vim_obj)
self.discover_placement_attr(vim_obj, ks_client)
self.encode_vim_auth(vim_obj['id'], vim_obj['auth_cred'])
LOG.debug('VIM registration completed for %s', vim_obj)
@log.log
def deregister_vim(self, vim_obj):
"""Deregister VIM from NFVO
Delete VIM keys from file system
"""
self.delete_vim_auth(vim_obj['id'], vim_obj['auth_cred'])
@log.log
def delete_vim_auth(self, vim_id, auth):
"""Delete vim information
Delete vim key stored in file system
"""
LOG.debug('Attempting to delete key for vim id %s', vim_id)
if auth.get('key_type') == 'barbican_key':
try:
k_context = t_context.generate_tacker_service_context()
keystone_conf = CONF.keystone_authtoken
secret_uuid = auth['secret_uuid']
keymgr_api = KEYMGR_API(keystone_conf.auth_url)
keymgr_api.delete(k_context, secret_uuid)
LOG.debug('VIM key deleted successfully for vim %s',
vim_id)
except Exception as ex:
LOG.warning('VIM key deletion failed for vim %s due to %s',
vim_id,
ex)
raise
else:
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
try:
os.remove(key_file)
LOG.debug('VIM key deleted successfully for vim %s',
vim_id)
except OSError:
LOG.warning('VIM key deletion failed for vim %s',
vim_id)
@log.log
def encode_vim_auth(self, vim_id, auth):
"""Encode VIM credentials
Store VIM auth using fernet key encryption
"""
fernet_key, fernet_obj = self.keystone.create_fernet_key()
encoded_auth = fernet_obj.encrypt(auth['password'].encode('utf-8'))
auth['password'] = encoded_auth
if CONF.vim_keys.use_barbican:
try:
k_context = t_context.generate_tacker_service_context()
keystone_conf = CONF.keystone_authtoken
keymgr_api = KEYMGR_API(keystone_conf.auth_url)
secret_uuid = keymgr_api.store(k_context, fernet_key)
auth['key_type'] = 'barbican_key'
auth['secret_uuid'] = secret_uuid
LOG.debug('VIM auth successfully stored for vim %s',
vim_id)
except Exception as ex:
LOG.warning('VIM key creation failed for vim %s due to %s',
vim_id,
ex)
raise
else:
auth['key_type'] = 'fernet_key'
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
try:
with open(key_file, 'wb') as f:
f.write(fernet_key)
LOG.debug('VIM auth successfully stored for vim %s',
vim_id)
except IOError:
raise nfvo.VimKeyNotFoundException(vim_id=vim_id)
@log.log
def get_vim_resource_id(self, vim_obj, resource_type, resource_name):
"""Locates openstack resource by type/name and returns ID
:param vim_obj: VIM info used to access openstack instance
:param resource_type: type of resource to find
:param resource_name: name of resource to locate
:return: ID of resource
"""
if resource_type in _VALID_RESOURCE_TYPES:
res_cmd_map = _VALID_RESOURCE_TYPES[resource_type]
client_type = res_cmd_map['client']
cmd = res_cmd_map['cmd']
filter_attr = res_cmd_map.get('filter_attr')
vim_res_name = res_cmd_map['vim_res_name']
else:
raise nfvo.VimUnsupportedResourceTypeException(type=resource_type)
client = self._get_client(vim_obj, client_type)
cmd_args = {}
if filter_attr:
cmd_args[filter_attr] = resource_name
try:
resources = getattr(client, "%s" % cmd)(**cmd_args)[vim_res_name]
LOG.debug('resources output %s', resources)
except Exception:
raise nfvo.VimGetResourceException(
cmd=cmd, name=resource_name, type=resource_type)
if len(resources) > 1:
raise nfvo.VimGetResourceNameNotUnique(
cmd=cmd, name=resource_name)
elif len(resources) < 1:
raise nfvo.VimGetResourceNotFoundException(
cmd=cmd, name=resource_name)
return resources[0]['id']
@log.log
def _get_client(self, vim_obj, client_type):
"""Initializes and returns an openstack client
:param vim_obj: VIM Information
:param client_type: openstack client to initialize
:return: initialized client
"""
verify = 'True' == vim_obj.get('cert_verify', 'True') or False
auth_url = vim_obj['auth_url']
keystone_version = NfvoPlugin.validate_keystone_auth_url(
auth_url=auth_url,
verify=verify)
auth_cred = self._get_auth_creds(vim_obj, keystone_version)
auth_plugin = self._get_auth_plugin(**auth_cred)
sess = session.Session(auth=auth_plugin)
return client_type(session=sess)
def _translate_ip_protocol(self, ip_proto):
if ip_proto == '1':
return 'icmp'
elif ip_proto == '6':
return 'tcp'
elif ip_proto == '17':
return 'udp'
else:
return None
def _create_classifier_params(self, fc):
classifier_params = {}
for field in fc:
if field in FC_MAP:
classifier_params[FC_MAP[field]] = fc[field]
elif field == 'ip_proto':
protocol = self._translate_ip_protocol(str(fc[field]))
if not protocol:
raise ValueError('protocol %s not supported' % fc[field])
classifier_params['protocol'] = protocol
else:
LOG.warning("flow classifier %s not supported by "
"networking-sfc driver", field)
return classifier_params
def create_flow_classifier(self, name, fc, auth_attr=None):
if not auth_attr:
LOG.warning("auth information required for n-sfc driver")
return None
fc['name'] = name
LOG.debug('fc passed is %s', fc)
sfc_classifier_params = self._create_classifier_params(fc)
LOG.debug('sfc_classifier_params is %s', sfc_classifier_params)
if len(sfc_classifier_params) > 0:
neutronclient_ = NeutronClient(auth_attr)
fc_id = neutronclient_.flow_classifier_create(
sfc_classifier_params)
return fc_id
raise ValueError('empty match field for input flow classifier')
def create_chain(self, name, path_id, fc_ids, vnfs, symmetrical=False,
correlation='mpls', auth_attr=None):
if not auth_attr:
LOG.warning("auth information required for n-sfc driver")
return None
neutronclient_ = NeutronClient(auth_attr)
port_pairs_list = neutronclient_.port_pair_list()
port_pair_groups_list = neutronclient_.port_pair_group_list()
port_chains_list = neutronclient_.port_chain_list()
port_pair_group_list = []
new_ppgs = []
new_pps = []
try:
for vnf in vnfs:
# TODO(s3wong): once scaling is in place and VNFFG supports it
# that model needs to be implemented to concatenate all
# port-pairs into the port-pair-group
# port pair group could include port-pairs from different VNFs
if CONNECTION_POINT not in vnf:
LOG.warning("Chain creation failed due to missing "
"connection point info in VNF "
"%(vnfname)s", {'vnfname': vnf['name']})
return None
cp_list = vnf[CONNECTION_POINT]
num_cps = len(cp_list)
if num_cps not in [1, 2]:
LOG.warning("Chain creation failed due to wrong number of "
"connection points: expected [1 | 2], got "
"%(cps)d", {'cps': num_cps})
return None
if num_cps == 1:
ingress = cp_list[0]
egress = cp_list[0]
else:
ingress = cp_list[0]
egress = cp_list[1]
# If sfc_encap is True, pp_corr is set to correlation to
# make use of correlation, otherwise pp_corr is set to None
# to install SFC proxy
sfc_encap = vnf.get(SFC_ENCAP, True)
pp_corr = correlation if sfc_encap else None
# valid_port_in_use function is used to find out the
# port_pair_group_id of the existing port pair group
# which was created by ingress and egress of current VNF
port_pair_group_id = self.valid_port_in_use(
ingress, egress, port_pairs_list, port_pair_groups_list)
if not port_pair_group_id:
# create the new port pair group if it is not existed
port_pair = dict()
port_pair['name'] = vnf['name'] + '-connection-points'
port_pair['description'] = 'port pair for ' + vnf['name']
port_pair['ingress'] | |
<gh_stars>0
import copy
import numpy as np
import os
import random
import sys
import time
import warnings
from contextlib import redirect_stdout
import ipdb
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# To deactivate warnings: https://github.com/tensorflow/tensorflow/issues/7778
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# No more available
# import keras.backend.tensorflow_backend as ktf
from keras.callbacks import Callback, ModelCheckpoint
from keras.layers import Input, Conv2D, add, Activation
from keras.layers import Lambda
from keras.layers.advanced_activations import ELU
from keras.layers.merge import concatenate
from keras.models import Model
# https://github.com/tensorflow/tensorflow/issues/23728
# from keras.optimizers import Adam
from tensorflow.keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils.data_utils import Sequence
import sparsetools as sp
from ISPy.util import gentools
# ==================================================================================
class DataGenerator(Sequence):
"""Generates data for training a neural network from a STiC model
:Authors:
<NAME> (ISP/SU 2020)
"""
def __init__(self, datasize, dx, batch_size, logtau, stokelist, cubelist, noise):
'Initialization'
self.n_training_orig = datasize
self.batch_size = batch_size
self.dx = dx
self.noise = noise # CHECK THAT IM USING NOISE!
self.logtau = logtau
self.stokelist = np.array(stokelist)
self.cubelist = np.array(cubelist)
self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
self.n_training = self.batchs_per_epoch_training * self.batch_size
def __getitem__(self, index):
'Generate one batch of data'
input_train_get, output_train_get = self.__data_generation(self)
return input_train_get, output_train_get
def __len__(self):
'Denotes the number of batches per epoch'
return self.batchs_per_epoch_training
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
stokes = self.stokelist[0, :]
cube = self.cubelist[0, :]
nl, ny, nx = stokes.shape
ntau, ny, nx = cube.shape
Lx = nx
Ly = ny
dx = self.dx
# Regularization
jitterOption = True
mulitplyJitter = 2
mynoisecube = 1e-2
input_train = np.zeros((self.batch_size, dx, dx, int(nl)))
output_train = np.zeros((self.batch_size, dx, dx, int(ntau)))
for j in range(self.batch_size):
randi = random.randint(0, self.stokelist.shape[0] - 1)
stokes = self.stokelist[randi, :]
xpos = random.randint(0, Lx - dx)
ypos = random.randint(0, Ly - dx)
rota = random.randint(0, 3)
ni = len(self.logtau)
ministokes = stokes[:,ypos:ypos + dx, xpos:xpos + dx]
lenq = cube.shape[0]
minicube = np.zeros((lenq, ministokes.shape[1], ministokes.shape[2]))
for iq in range(lenq):
jitterX = random.randint(-1 * mulitplyJitter, +1 * mulitplyJitter)
jitterY = random.randint(-1 * mulitplyJitter, +1 * mulitplyJitter)
if jitterOption is False:
jitterY, jitterX = 0, 0
import scipy.ndimage as nd
minicube[iq, :, :] = nd.shift(cube[iq, ypos:ypos + dx, xpos:xpos + dx],
(jitterY, jitterX), mode='nearest')
# Extra noise
minicube = minicube[:] + minicube * np.random.normal(0., mynoisecube,(cube.shape[0], dx, dx))
ministokes = ministokes[:] + np.random.normal(0.,self.noise,(stokes.shape[0],dx,dx))
from ISPy.util.azimuth import BTAZI2BQBU_cube
minicube[ni * 4:5 * ni, :, :], minicube[ni * 5:6 * ni, :, :] = BTAZI2BQBU_cube(
minicube[ni * 4:5 * ni, :, :], minicube[ni * 5:6 * ni, :, :])
input_train[j, :, :, :] = gentools.rotate_cube(np.swapaxes(ministokes, 0, 2), rota)
output_train[j, :, :, :] = gentools.rotate_cube(np.swapaxes(minicube, 0, 2), rota)
return input_train, output_train
# ==================================================================================
def network1D(nx, ny, nd, nq, activation='relu', n_filters=64, l2_reg=1e-7):
""" Neural network architecture
:Authors:
<NAME> (ISP/SU 2020)
"""
def minires(inputs, n_filters, kernel=1):
x = Conv2D(int(n_filters), (kernel, kernel), padding='valid',
kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(inputs)
x = ELU(alpha=1.0)(x)
x = Conv2D(n_filters, (kernel, kernel), padding='valid',
kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
return x
def myblock(inputs, n_filters, kernel=1):
x = Conv2D(n_filters, (kernel, kernel), padding='valid',
kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(inputs)
xo = ELU(alpha=1.0)(x)
x = Conv2D(n_filters, (kernel, kernel), padding='valid',
kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(xo)
x = ELU(alpha=1.0)(x)
x = add([x, xo])
return x
inputs = Input(shape=(nx, ny, nd)) # depth goes last in TensorFlow
nd4 = int(nd/4)
# TEMP
x1 = myblock(inputs, n_filters)
x1 = minires(x1, int(nq / 6))
# x1 = Activation('elu')(x1)
x1 = Lambda(lambda x: x + 5.0)(x1)
# VLOS
x2 = myblock(inputs, n_filters)
x2 = minires(x2, int(nq / 6))
# VTURB
x3 = myblock(inputs, n_filters)
x3 = minires(x3, int(nq / 6))
# x3 = Activation('relu')(x3)
# BLONG
xV = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], x[:,:,:, 1*nd4:2*nd4],
x[:,:,:, 2*nd4:3*nd4], 100*x[:,:,:, 3*nd4:]]) )(inputs)
x4 = myblock(xV, n_filters)
x4 = minires(x4, int(nq / 6))
# BHOR - BQ
xQ = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], 100*x[:,:,:, 1*nd4:2*nd4],
x[:,:,:, 2*nd4:3*nd4], x[:,:,:, 3*nd4:]]) )(inputs)
x5 = myblock(xQ, n_filters)
x5 = minires(x5, int(nq / 6))
# BHOR - BU
xU = Lambda(lambda x: concatenate([x[:,:,:, 0:nd4], x[:,:,:, 1*nd4:2*nd4],
100*x[:,:,:, 2*nd4:3*nd4], x[:,:,:, 3*nd4:]]) )(inputs)
x6 = myblock(xU, n_filters)
x6 = minires(x6, int(nq / 6))
final = concatenate([x1, x2, x3, x4, x5, x6])
return Model(inputs=inputs, outputs=final)
# ==================================================================================
class deep_network(object):
"""Deep neural network class: it defines the network, load the weigths, does the
training and the predictions.
:Authors:
<NAME> (ISP/SU 2020)
"""
def __init__(self, root, logtau, nl):
self.root = root
self.nl = nl
self.logtau = logtau
self.ntau = len(self.logtau) * 6
self.dx = 20
self.nx, self.ny = self.dx, self.dx
self.nworker = 16
def define_network(self):
print("[INFO] Setting up network from scratch")
self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))
def read_network(self):
print("[INFO] Setting up network and loading weights {0}_weights.hdf5".format(self.root))
self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))
self.model.load_weights("{0}_weights.hdf5".format(self.root))
def train(self, n_iterations, stokelist, cubelist, extranoise, learning_rate, batch_size,
datasize):
self.n_training_orig = datasize
self.batch_size = batch_size
self.n_validation_orig = int(batch_size)
self.lr = learning_rate
self.noise = extranoise
self.batchs_per_epoch_training = int(self.n_training_orig / self.batch_size)
self.batchs_per_epoch_validation = int(self.n_validation_orig / self.batch_size)
self.n_training = self.batchs_per_epoch_training * self.batch_size
self.n_validation = self.batchs_per_epoch_validation * self.batch_size
self.model.compile(loss='mean_absolute_error', optimizer=Adam(lr=self.lr))
print("[INFO] Training network during {} epochs:".format(n_iterations))
losses = []
self.checkpointer = ModelCheckpoint(filepath="{0}_weights.hdf5".format(self.root),
verbose=2, save_best_only=False)
# Generators
training_generator_class = DataGenerator(self.n_training_orig, self.dx, self.batch_size,
self.logtau, stokelist, cubelist, self.noise)
validation_generator_class = DataGenerator(self.n_validation_orig, self.dx, self.batch_size,
self.logtau, stokelist, cubelist, self.noise)
self.metrics = self.model.fit_generator(training_generator_class,
self.batchs_per_epoch_training, epochs=n_iterations,
callbacks=[self.checkpointer],
validation_data=validation_generator_class,
validation_steps=self.batchs_per_epoch_validation,
use_multiprocessing=True, workers=self.nworker)
def read_and_predict(self, inputdata):
print("[INFO] Setting up network for predictions")
# print(inputdata.shape)
self.nx = inputdata.shape[3]
self.ny = inputdata.shape[2]
self.nl = inputdata.shape[1]
self.ntau = len(self.logtau) * 6
self.model = network1D(self.nx, self.ny, int(self.nl), int(self.ntau))
print("[INFO] Loading network weights: {0}_weights.hdf5".format(self.root))
self.model.load_weights("{0}_weights.hdf5".format(self.root))
input_validation = np.zeros((1, self.nx, self.ny, self.nl), dtype='float32')
input_validation[0, :, :, :] = inputdata.T[:, :, :, 0]
start = time.time()
out = self.model.predict(input_validation)
end = time.time()
print("[INFO] Prediction took: {0:3.2} seconds".format(end - start))
print("[INFO] Azimuth inverse transformation")
from ISPy.util.azimuth import BQBU2BTAZI_cube
# Inverse transformation
out = np.reshape(out, (input_validation.shape[0], self.nx, self.ny, 6, 9))
out[0, :, :, 4, :], out[0, :, :, 5, :] = BQBU2BTAZI_cube(out[0, :, :, 4, :], out[0, :, :, 5, :])
out = np.reshape(out, (input_validation.shape[0], self.nx, self.ny, 54))
return out
# ==================================================================================
class neural_estimator(object):
"""Creates a small neural network that can be trained with STiC results
to perform faster inversions on new data. Note: data index np.where(o.weights[:, 0] < 1.0)[0]
Example
-------
>>> from ISPy.util import neural_estimation as nst
>>> import sparsetools as sp
# Reading data in STiC format:
>>> model_train_list = ['model.nc']
>>> stokes_train_list = ['profiles.nc']
>>> logtau = [-7,-6,-5,-4,-3,-2,-1, 0, 1]
# Initializing the neural network
>>> myestimator = nst.neural_estimator()
>>> myestimator.train(name='network1',option='start',nepochs=40,model_train_list,stokes_train_list,logtau)
>>> myestimator.quickplot(filename ='testplot.pdf')
>>> dataprediction = 'newprofiles.nc'
>>> original_logtau = sp.model(model_train_list[0],0,0,0).ltau[0,0,0,:]
>>> myestimator.predict(name='network1',dataprediction,logtau,original_logtau,"model_output.nc")
:Authors:
<NAME> (ISP/SU 2020)
"""
def __init__(self):
# self.name = name
self.init = 0
self.num_params = 6
self.logtau = 0
self.nl = None
def predict(self, name, inputdata, logtau, original_logtau, nameoutput='model_neuralnetwork.nc', pgastop = 1.0):
"""It uses a pre-trained neural network with new observed data
Parameters
----------
name : str, optional
name of the network, by default 'network1'
inputdata : ncfile
input file in STiC format
logtau : list
logtau scale used to train the network
original_logtau : list
Final stratification of the model to do the interpolation
nameoutput : str, optional
name of the output model, by default 'model_neuralnetwork.nc'
Example
-------
>>> dataprediction = 'newprofiles.nc'
>>> original_logtau = sp.model(model_train_list[0],0,0,0).ltau[0,0,0,:]
>>> myestimator.prediction(name='network1',dataprediction,logtau,original_logtau,"model_output.nc")
"""
print('[INFO] Sending the data to the network')
o = sp.profile(inputdata)
idx = np.where(o.weights[:, 0] < 1.0)[0]
stokelist = np.array([np.concatenate([o.dat[0, :, :, idx, 0], 1e0 * o.dat[0, :, :, idx, 1],
1e0 * o.dat[0, :, :, idx, 2],
1e0 * o.dat[0, :, :, idx, 3]])])
print(stokelist.shape,'...')
self.nl = stokelist.shape[1]
self.deepl = deep_network(name, logtau, self.nl)
prediction = self.deepl.read_and_predict(stokelist)
nx, ny, dum = prediction[0, :, :, :].shape
prediction = np.reshape(prediction[0, :, :, :], (nx, ny, 6, len(logtau)))
noriginaltau = len(original_logtau)
# Fill the model with the prediction
print('[INFO] Writing in STiC format')
m = sp.model(nx=nx, ny=ny, nt=1, ndep=noriginaltau)
from tqdm import tqdm
for ix in tqdm(range(nx)):
for iy in range(ny):
temp = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 0, :]))
vlos = np.interp(original_logtau, logtau, prediction[ix, iy, 1, :])
vturb = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 2, :]))
Bln = np.interp(original_logtau, logtau, prediction[ix, iy, 3, :])
Bho = np.interp(original_logtau, logtau, np.abs(prediction[ix, iy, 4, :]))
Bazi = np.interp(original_logtau, logtau, prediction[ix, iy, 5, | |
The identity provider input for data plane
authentication.
:type data_plane_authentication_identity_input:
~azure.mgmt.recoveryservicessiterecovery.models.IdentityProviderInput
"""
_validation = {
'machine_name': {'required': True},
'authentication_identity_input': {'required': True},
'resource_access_identity_input': {'required': True},
}
_attribute_map = {
'machine_name': {'key': 'machineName', 'type': 'str'},
'machine_id': {'key': 'machineId', 'type': 'str'},
'bios_id': {'key': 'biosId', 'type': 'str'},
'authentication_identity_input': {'key': 'authenticationIdentityInput', 'type': 'IdentityProviderInput'},
'resource_access_identity_input': {'key': 'resourceAccessIdentityInput', 'type': 'IdentityProviderInput'},
'data_plane_authentication_identity_input': {'key': 'dataPlaneAuthenticationIdentityInput', 'type': 'IdentityProviderInput'},
}
def __init__(
self,
*,
machine_name: str,
authentication_identity_input: "IdentityProviderInput",
resource_access_identity_input: "IdentityProviderInput",
machine_id: Optional[str] = None,
bios_id: Optional[str] = None,
data_plane_authentication_identity_input: Optional["IdentityProviderInput"] = None,
**kwargs
):
super(AddRecoveryServicesProviderInputProperties, self).__init__(**kwargs)
self.machine_name = machine_name
self.machine_id = machine_id
self.bios_id = bios_id
self.authentication_identity_input = authentication_identity_input
self.resource_access_identity_input = resource_access_identity_input
self.data_plane_authentication_identity_input = data_plane_authentication_identity_input
class AddVCenterRequest(msrest.serialization.Model):
"""Input required to add vCenter.
:param properties: The properties of an add vCenter request.
:type properties: ~azure.mgmt.recoveryservicessiterecovery.models.AddVCenterRequestProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'AddVCenterRequestProperties'},
}
def __init__(
self,
*,
properties: Optional["AddVCenterRequestProperties"] = None,
**kwargs
):
super(AddVCenterRequest, self).__init__(**kwargs)
self.properties = properties
class AddVCenterRequestProperties(msrest.serialization.Model):
"""The properties of an add vCenter request.
:param friendly_name: The friendly name of the vCenter.
:type friendly_name: str
:param ip_address: The IP address of the vCenter to be discovered.
:type ip_address: str
:param process_server_id: The process server Id from where the discovery is orchestrated.
:type process_server_id: str
:param port: The port number for discovery.
:type port: str
:param run_as_account_id: The account Id which has privileges to discover the vCenter.
:type run_as_account_id: str
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'process_server_id': {'key': 'processServerId', 'type': 'str'},
'port': {'key': 'port', 'type': 'str'},
'run_as_account_id': {'key': 'runAsAccountId', 'type': 'str'},
}
def __init__(
self,
*,
friendly_name: Optional[str] = None,
ip_address: Optional[str] = None,
process_server_id: Optional[str] = None,
port: Optional[str] = None,
run_as_account_id: Optional[str] = None,
**kwargs
):
super(AddVCenterRequestProperties, self).__init__(**kwargs)
self.friendly_name = friendly_name
self.ip_address = ip_address
self.process_server_id = process_server_id
self.port = port
self.run_as_account_id = run_as_account_id
class AgentDetails(msrest.serialization.Model):
"""Agent details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar agent_id: The Id of the agent running on the server.
:vartype agent_id: str
:ivar machine_id: The Id of the machine to which the agent is registered.
:vartype machine_id: str
:ivar bios_id: The machine BIOS Id.
:vartype bios_id: str
:ivar fqdn: The machine FQDN.
:vartype fqdn: str
:ivar disks: The disks.
:vartype disks: list[~azure.mgmt.recoveryservicessiterecovery.models.AgentDiskDetails]
"""
_validation = {
'agent_id': {'readonly': True},
'machine_id': {'readonly': True},
'bios_id': {'readonly': True},
'fqdn': {'readonly': True},
'disks': {'readonly': True},
}
_attribute_map = {
'agent_id': {'key': 'agentId', 'type': 'str'},
'machine_id': {'key': 'machineId', 'type': 'str'},
'bios_id': {'key': 'biosId', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'disks': {'key': 'disks', 'type': '[AgentDiskDetails]'},
}
def __init__(
self,
**kwargs
):
super(AgentDetails, self).__init__(**kwargs)
self.agent_id = None
self.machine_id = None
self.bios_id = None
self.fqdn = None
self.disks = None
class AgentDiskDetails(msrest.serialization.Model):
"""Agent disk details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar disk_id: The disk Id.
:vartype disk_id: str
:ivar disk_name: The disk name.
:vartype disk_name: str
:ivar is_os_disk: A value indicating whether the disk is the OS disk.
:vartype is_os_disk: str
:ivar capacity_in_bytes: The disk capacity in bytes.
:vartype capacity_in_bytes: long
:ivar lun_id: The lun of disk.
:vartype lun_id: int
"""
_validation = {
'disk_id': {'readonly': True},
'disk_name': {'readonly': True},
'is_os_disk': {'readonly': True},
'capacity_in_bytes': {'readonly': True},
'lun_id': {'readonly': True},
}
_attribute_map = {
'disk_id': {'key': 'diskId', 'type': 'str'},
'disk_name': {'key': 'diskName', 'type': 'str'},
'is_os_disk': {'key': 'isOSDisk', 'type': 'str'},
'capacity_in_bytes': {'key': 'capacityInBytes', 'type': 'long'},
'lun_id': {'key': 'lunId', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AgentDiskDetails, self).__init__(**kwargs)
self.disk_id = None
self.disk_name = None
self.is_os_disk = None
self.capacity_in_bytes = None
self.lun_id = None
class Resource(msrest.serialization.Model):
"""Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar type: Resource Type.
:vartype type: str
:param location: Resource Location.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
class Alert(Resource):
"""Implements the Alert class.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar type: Resource Type.
:vartype type: str
:param location: Resource Location.
:type location: str
:param properties: Alert related data.
:type properties: ~azure.mgmt.recoveryservicessiterecovery.models.AlertProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'AlertProperties'},
}
def __init__(
self,
*,
location: Optional[str] = None,
properties: Optional["AlertProperties"] = None,
**kwargs
):
super(Alert, self).__init__(location=location, **kwargs)
self.properties = properties
class AlertCollection(msrest.serialization.Model):
"""Collection of alerts.
:param value: The list of alerts.
:type value: list[~azure.mgmt.recoveryservicessiterecovery.models.Alert]
:param next_link: The value of next link.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Alert"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(AlertCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class AlertProperties(msrest.serialization.Model):
"""The properties of an alert.
:param send_to_owners: A value indicating whether to send email to subscription administrator.
:type send_to_owners: str
:param custom_email_addresses: The custom email address for sending emails.
:type custom_email_addresses: list[str]
:param locale: The locale for the email notification.
:type locale: str
"""
_attribute_map = {
'send_to_owners': {'key': 'sendToOwners', 'type': 'str'},
'custom_email_addresses': {'key': 'customEmailAddresses', 'type': '[str]'},
'locale': {'key': 'locale', 'type': 'str'},
}
def __init__(
self,
*,
send_to_owners: Optional[str] = None,
custom_email_addresses: Optional[List[str]] = None,
locale: Optional[str] = None,
**kwargs
):
super(AlertProperties, self).__init__(**kwargs)
self.send_to_owners = send_to_owners
self.custom_email_addresses = custom_email_addresses
self.locale = locale
class ApplyRecoveryPointInput(msrest.serialization.Model):
"""Input to apply recovery point.
All required parameters must be populated in order to send to Azure.
:param properties: Required. The input properties to apply recovery point.
:type properties:
~azure.mgmt.recoveryservicessiterecovery.models.ApplyRecoveryPointInputProperties
"""
_validation = {
'properties': {'required': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'ApplyRecoveryPointInputProperties'},
}
def __init__(
self,
*,
properties: "ApplyRecoveryPointInputProperties",
**kwargs
):
super(ApplyRecoveryPointInput, self).__init__(**kwargs)
self.properties = properties
class ApplyRecoveryPointInputProperties(msrest.serialization.Model):
"""Input properties to apply recovery point.
All required parameters must be populated in order to send to Azure.
:param recovery_point_id: The recovery point Id.
:type recovery_point_id: str
:param provider_specific_details: Required. Provider specific input for applying recovery
point.
:type provider_specific_details:
~azure.mgmt.recoveryservicessiterecovery.models.ApplyRecoveryPointProviderSpecificInput
"""
_validation = {
'provider_specific_details': {'required': True},
}
_attribute_map = {
'recovery_point_id': {'key': 'recoveryPointId', 'type': 'str'},
'provider_specific_details': {'key': 'providerSpecificDetails', 'type': 'ApplyRecoveryPointProviderSpecificInput'},
}
def __init__(
self,
*,
provider_specific_details: "ApplyRecoveryPointProviderSpecificInput",
recovery_point_id: Optional[str] = None,
**kwargs
):
super(ApplyRecoveryPointInputProperties, self).__init__(**kwargs)
self.recovery_point_id = recovery_point_id
self.provider_specific_details = provider_specific_details
class JobDetails(msrest.serialization.Model):
"""Job details based on specific job type.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AsrJobDetails, ExportJobDetails, FailoverJobDetails, SwitchProtectionJobDetails, TestFailoverJobDetails.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the type of job details (see JobDetailsTypes enum for
possible values).Constant filled by server.
:type instance_type: str
:param affected_object_details: The affected object properties like source server, source
cloud, target server, target cloud etc. based on the workflow object details.
:type affected_object_details: dict[str, str]
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'affected_object_details': {'key': 'affectedObjectDetails', 'type': '{str}'},
}
_subtype_map = {
'instance_type': {'AsrJobDetails': 'AsrJobDetails', 'ExportJobDetails': 'ExportJobDetails', 'FailoverJobDetails': 'FailoverJobDetails', 'SwitchProtectionJobDetails': 'SwitchProtectionJobDetails', 'TestFailoverJobDetails': 'TestFailoverJobDetails'}
}
def __init__(
self,
*,
affected_object_details: Optional[Dict[str, str]] = None,
**kwargs
):
super(JobDetails, self).__init__(**kwargs)
self.instance_type = None # type: Optional[str]
self.affected_object_details = affected_object_details
class AsrJobDetails(JobDetails):
"""This class represents job details based on specific job type.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the type of job details (see JobDetailsTypes enum | |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class slm_session_brief(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-dot1ag-operational - based on the path /cfm-state/slm/slm-session-brief. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Brief display of SLM configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__type','__status','__domain_name','__ma_name','__src_mep','__tgt_mep','__cos','__start_time','__stop_time','__session_index',)
_yang_name = 'slm-session-brief'
_rest_name = 'slm-session-brief'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
self.__tgt_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
self.__cos = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="cos", rest_name="cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint8', is_config=False)
self.__start_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__ma_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__domain_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__src_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
self.__stop_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="stop-time", rest_name="stop-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__session_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="session-index", rest_name="session-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint32', is_config=False)
self.__type = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'cfm-state', u'slm', u'slm-session-brief']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'cfm-state', u'slm', u'slm-session-brief']
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /cfm_state/slm/slm_session_brief/type (boolean)
YANG Description: session type
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /cfm_state/slm/slm_session_brief/type (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: session type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
def _get_status(self):
"""
Getter method for status, mapped from YANG variable /cfm_state/slm/slm_session_brief/status (boolean)
YANG Description: session status
"""
return self.__status
def _set_status(self, v, load=False):
"""
Setter method for status, mapped from YANG variable /cfm_state/slm/slm_session_brief/status (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_status() directly.
YANG Description: session status
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """status must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)""",
})
self.__status = t
if hasattr(self, '_set'):
self._set()
def _unset_status(self):
self.__status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
def _get_domain_name(self):
"""
Getter method for domain_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/domain_name (string)
YANG Description: domain name
"""
return self.__domain_name
def _set_domain_name(self, v, load=False):
"""
Setter method for domain_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/domain_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_domain_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_domain_name() directly.
YANG Description: domain name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """domain_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__domain_name = t
if hasattr(self, '_set'):
self._set()
def _unset_domain_name(self):
self.__domain_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_ma_name(self):
"""
Getter method for ma_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/ma_name (string)
YANG Description: service name
"""
return self.__ma_name
def _set_ma_name(self, v, load=False):
"""
Setter method for ma_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/ma_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_ma_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ma_name() directly.
YANG Description: service name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ma_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__ma_name = t
if hasattr(self, '_set'):
self._set()
def _unset_ma_name(self):
self.__ma_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_src_mep(self):
"""
Getter method for src_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/src_mep (uint16)
YANG Description: source mep
"""
return self.__src_mep
def _set_src_mep(self, v, load=False):
"""
Setter method for src_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/src_mep (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_mep is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_mep() directly.
YANG Description: source mep
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_mep must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)""",
})
self.__src_mep = t
if hasattr(self, '_set'):
self._set()
def _unset_src_mep(self):
self.__src_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
def _get_tgt_mep(self):
"""
Getter method for tgt_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/tgt_mep (uint16)
YANG Description: target mep
"""
return self.__tgt_mep
def _set_tgt_mep(self, v, load=False):
"""
Setter method for tgt_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/tgt_mep (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_tgt_mep is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tgt_mep() directly.
YANG Description: target mep
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': | |
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines :class:`TheOracle` task and the :class:`DbClient` RPC class for talking
to it.
.. autoclass:: TheOracle
:members:
.. autoclass:: DbClient
:members:
"""
import pickle
from .. import const, protocols, transport, tasks
from ..states import BuildState, DownloadState
from .db import Database, ProjectVersionsRow, ProjectFilesRow, RewritePendingRow
class TheOracle(tasks.Task):
"""
This task provides an RPC-like interface to the database; it handles
requests such as registering a new package, version, or build, and
answering queries about the hashes of files. The primary clients of this
class are :class:`~.slave_driver.SlaveDriver`,
:class:`~.the_scribe.TheScribe`, and :class:`~.cloud_gazer.CloudGazer`.
Note that because database requests are notoriously variable in length the
client RPC class (:class:`DbClient`) doesn't *directly* talk to
:class:`TheOracle`. Rather, multiple instances of :class:`TheOracle` are
spawned and :class:`~.seraph.Seraph` sits in front of these acting as a
simple load-sharing router for the RPC clients.
"""
name = 'master.the_oracle'
instance = 0
def __init__(self, config):
TheOracle.instance += 1
self.name = '%s_%d' % (TheOracle.name, TheOracle.instance)
super().__init__(config)
self.db = Database(config.dsn)
db_queue = self.socket(
transport.REQ, protocol=protocols.the_oracle)
db_queue.hwm = 10
db_queue.connect(const.ORACLE_QUEUE)
self.register(db_queue, self.handle_db_request)
db_queue.send(b'READY')
def close(self):
self.db.close()
super().close()
def handle_db_request(self, queue):
"""
Handle incoming requests from :class:`DbClient` instances.
"""
try:
addr, msg, data = queue.recv_addr_msg()
except IOError as exc:
self.logger.error(str(exc))
# REQ sockets *must* send a reply even when stuff goes wrong
# otherwise the send/recv cycle that REQ/REP depends upon breaks.
# Here we've got a badly formed request and we can't even get the
# reply address, so we just make one up (empty). This message
# won't go anywhere (bogus address) but that doesn't matter as we
# just want to get the socket back to receiving state
addr, msg, data = b'', '', str(exc)
try:
handler = {
'ALLPKGS': lambda: self.do_allpkgs(),
'ALLVERS': lambda: self.do_allvers(),
'NEWPKG': lambda: self.do_newpkg(*data),
'NEWVER': lambda: self.do_newver(*data),
'SKIPPKG': lambda: self.do_skippkg(*data),
'SKIPVER': lambda: self.do_skipver(*data),
'LOGDOWNLOAD': lambda: self.do_logdownload(data),
'LOGBUILD': lambda: self.do_logbuild(data),
'DELBUILD': lambda: self.do_delbuild(*data),
'PKGFILES': lambda: self.do_pkgfiles(data),
'PROJVERS': lambda: self.do_projvers(data),
'PROJFILES': lambda: self.do_projfiles(data),
'VERFILES': lambda: self.do_verfiles(*data),
'GETSKIP': lambda: self.do_getskip(*data),
'PKGEXISTS': lambda: self.do_pkgexists(data),
'VEREXISTS': lambda: self.do_verexists(*data),
'GETABIS': lambda: self.do_getabis(),
'GETPYPI': lambda: self.do_getpypi(),
'SETPYPI': lambda: self.do_setpypi(data),
'GETSTATS': lambda: self.do_getstats(),
'GETSEARCH': lambda: self.do_getsearch(),
'FILEDEPS': lambda: self.do_filedeps(data),
'SAVERWP': lambda: self.do_saverwp(data),
'LOADRWP': lambda: self.do_loadrwp(),
}[msg]
result = handler()
except Exception as exc:
self.logger.error('Error handling db request: %s', msg)
msg, data = 'ERROR', str(exc)
else:
msg, data = 'OK', result
queue.send_addr_msg(addr, msg, data) # see note above
def do_allpkgs(self):
"""
Handler for "ALLPKGS" message, sent by :class:`DbClient` to request the
set of all packages define known to the database.
"""
return self.db.get_all_packages()
def do_allvers(self):
"""
Handler for "ALLVERS" message, sent by :class:`DbClient` to request the
set of all (package, version) tuples known to the database.
"""
return self.db.get_all_package_versions()
def do_newpkg(self, package, skip):
"""
Handler for "NEWPKG" message, sent by :class:`DbClient` to register a
new package.
"""
return self.db.add_new_package(package, skip)
def do_newver(self, package, version, released, skip):
"""
Handler for "NEWVER" message, sent by :class:`DbClient` to register a
new (package, version) tuple.
"""
return self.db.add_new_package_version(package, version, released, skip)
def do_skippkg(self, package, reason):
"""
Handler for "SKIPPKG" message, sent by :class:`DbClient` to skip
building all versions of a package.
"""
self.db.skip_package(package, reason)
def do_skipver(self, package, version, reason):
"""
Handler for "SKIPVER" message, sent by :class:`DbClient` to skip
building a specific version of a package.
"""
self.db.skip_package_version(package, version, reason)
def do_logdownload(self, download):
"""
Handler for "LOGDOWNLOAD" message, sent by :class:`DbClient` to
register a new download.
"""
self.db.log_download(DownloadState.from_message(download))
def do_logbuild(self, build):
"""
Handler for "LOGBUILD" message, sent by :class:`DbClient` to register a
new build result.
"""
build = BuildState.from_message(build)
self.db.log_build(build)
return build.build_id
def do_delbuild(self, package, version):
"""
Handler for "DELBUILD" message, sent by :class:`DbClient` to remove all
builds (and files and downloads by cascade) for *version* of *package*.
"""
self.db.delete_build(package, version)
def do_pkgfiles(self, package):
"""
Handler for "PKGFILES" message, sent by :class:`DbClient` to request
details of all wheels assocated with *package*.
"""
return self.db.get_package_files(package)
def do_projvers(self, package):
"""
Handler for "PROJVERS" message, sent by :class:`DbClient` to request
build and skip details of all versions of *package*.
"""
return self.db.get_project_versions(package)
def do_projfiles(self, package):
"""
Handler for "PROJFILES" message, sent by :class:`DbClient` to request
file details of all versions of *package*.
"""
return self.db.get_project_files(package)
def do_verfiles(self, package, version):
"""
Handler for "VERFILES" message, sent by :class:`DbClient` to request
the filenames of all wheels associated with *version* of *package*.
"""
return self.db.get_version_files(package, version)
def do_getskip(self, package, version):
"""
Handler for "GETSKIP" message, send by :class:`DbClient` to request
the reason for skipping builds of *version* of *package*.
"""
return self.db.get_version_skip(package, version)
def do_pkgexists(self, package):
"""
Handler for "PKGEXISTS" message, sent by :class:`DbClient` to request
whether or not the specified *package* exists.
"""
return self.db.test_package(package)
def do_verexists(self, package, version):
"""
Handler for "VEREXISTS" message, sent by :class:`DbClient` to request
whether or not the specified *version* of *package* exists.
"""
return self.db.test_package_version(package, version)
def do_getabis(self):
"""
Handler for "GETABIS" message, sent by :class:`DbClient` to request the
list of all ABIs to build for.
"""
return self.db.get_build_abis()
def do_getpypi(self):
"""
Handler for "GETPYPI" message, sent by :class:`DbClient` to request the
record of the last serial number from the PyPI changelog.
"""
return self.db.get_pypi_serial()
def do_setpypi(self, serial):
"""
Handler for "SETPYPI" message, sent by :class:`DbClient` to update the
last seen serial number from the PyPI changelog.
"""
self.db.set_pypi_serial(serial)
def do_getstats(self):
"""
Handler for "GETSTATS" message, sent by :class:`DbClient` to request
the latest database statistics, returned as a list of (field, value)
tuples.
"""
return self.db.get_statistics()
def do_getsearch(self):
"""
Handler for "GETSEARCH" message, sent by :class:`DbClient` to request
the recent download statistics, returned as a mapping of package to
(downloads_recent, downloads_all) tuples.
"""
return self.db.get_search_index()
def do_filedeps(self, filename):
"""
Handler for "FILEDEPS" message, sent by :class:`DbClient` to request
dependencies for *filename*, returned as a dict mapping tool names
to dependency sets.
"""
return self.db.get_file_dependencies(filename)
def do_saverwp(self, queue):
"""
Handler for "SAVERWP" message, sent by :class:`DbClient` to request
that *queue* is saved to the ``rewrites_pending`` table.
"""
return self.db.save_rewrites_pending(queue)
def do_loadrwp(self):
"""
Handler for "LOADRWP" message, sent by :class:`DbClient` to request
the content of the ``rewrites_pending`` table.
"""
return self.db.load_rewrites_pending()
class DbClient:
"""
RPC client class for talking to :class:`TheOracle`.
"""
def __init__(self, config, logger=None):
self.ctx = transport.Context()
self.db_queue = self.ctx.socket(
transport.REQ, protocol=reversed(protocols.the_oracle),
logger=logger)
self.db_queue.hwm = 10
self.db_queue.connect(config.db_queue)
def close(self):
self.db_queue.close()
def _execute(self, msg, data=protocols.NoData):
# If sending blocks this either means we're shutting down, or
# something's gone horribly wrong (either way, raising EAGAIN is fine)
self.db_queue.send_msg(msg, data, flags=transport.NOBLOCK)
status, result = self.db_queue.recv_msg()
if status == 'OK':
return result
else:
raise IOError(result)
def add_new_package(self, package, skip=''):
"""
See :meth:`.db.Database.add_new_package`.
"""
return self._execute('NEWPKG', [package, skip])
def add_new_package_version(self, | |
to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'required' in params:
query_params.append(('required', params['required'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/merge-fields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CollectionOfMergeFields', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_merge_field(self, list_id, merge_id, **kwargs): # noqa: E501
"""Get merge field # noqa: E501
Get information about a specific merge field in a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_merge_field(list_id, merge_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str merge_id: The id for the merge field. (required)
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:return: MergeField
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_merge_field_with_http_info(list_id, merge_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_merge_field_with_http_info(list_id, merge_id, **kwargs) # noqa: E501
return data
def get_list_merge_field_with_http_info(self, list_id, merge_id, **kwargs): # noqa: E501
"""Get merge field # noqa: E501
Get information about a specific merge field in a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_merge_field_with_http_info(list_id, merge_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str merge_id: The id for the merge field. (required)
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:return: MergeField
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'merge_id', 'exclude_fields', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_merge_field" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'merge_id' is set
if ('merge_id' not in params or
params['merge_id'] is None):
raise ValueError("Missing the required parameter `merge_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'merge_id' in params:
path_params['merge_id'] = params['merge_id'] # noqa: E501
query_params = []
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/merge-fields/{merge_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MergeField', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_segment(self, list_id, segment_id, **kwargs): # noqa: E501
"""Get segment info # noqa: E501
Get information about a specific segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_segment(list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param bool include_cleaned: Include cleaned members in response
:param bool include_transactional: Include transactional members in response
:param bool include_unsubscribed: Include unsubscribed members in response
:return: List7
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_segment_with_http_info(list_id, segment_id, **kwargs) # noqa: E501
else:
(data) = self.get_segment_with_http_info(list_id, segment_id, **kwargs) # noqa: E501
return data
def get_segment_with_http_info(self, list_id, segment_id, **kwargs): # noqa: E501
"""Get segment info # noqa: E501
Get information about a specific segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_segment_with_http_info(list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param bool include_cleaned: Include cleaned members in response
:param bool include_transactional: Include transactional members in response
:param bool include_unsubscribed: Include unsubscribed members in response
:return: List7
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment_id', 'fields', 'exclude_fields', 'include_cleaned', 'include_transactional', 'include_unsubscribed'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'segment_id' is set
if ('segment_id' not in params or
params['segment_id'] is None):
raise ValueError("Missing the required parameter `segment_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'segment_id' in params:
path_params['segment_id'] = params['segment_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'include_cleaned' in params:
query_params.append(('include_cleaned', params['include_cleaned'])) # noqa: E501
if 'include_transactional' in params:
query_params.append(('include_transactional', params['include_transactional'])) # noqa: E501
if 'include_unsubscribed' in params:
query_params.append(('include_unsubscribed', params['include_unsubscribed'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/segments/{segment_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List7', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_segment_members_list(self, list_id, segment_id, **kwargs): # noqa: E501
"""List members in segment # noqa: E501
Get information about members in a saved segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
| |
of optical depth grid
self.L = self.M = len(self.x)
self.ll = self.mm = np.arange(self.L)
self.logE = np.log10(self.E)
self.n0 = min(self.nn)
self.dE = np.diff(self.E)
self.dlogE = np.diff(self.logE)
# Pre-compute cross-sections
self.sigma_E = np.array([np.array([self.sigma(E, i) for E in self.E]) \
for i in range(3)])
self.log_sigma_E = np.log10(self.sigma_E)
def load(self, fn):
"""
Read optical depth table.
"""
#if (rank == 0) and self.pf['verbose']:
# print("Loading {!s}...".format(fn))
if type(fn) is dict:
self.E0 = fn['E'].min()
self.E1 = fn['E'].max()
self.E = fn['E']
self.z = fn['z']
self.x = self.z + 1
self.N = self.E.size
self.R = self.x[1] / self.x[0]
self.tau = fn['tau']
elif re.search('hdf5', fn):
f = h5py.File(self.tabname, 'r')
self.E0 = min(f[('photon_energy')])
self.E1 = max(f[('photon_energy')])
self.E = np.array(f[('photon_energy')])
self.z = np.array(f[('redshift')])
self.x = self.z + 1
self.N = self.E.size
self.R = self.x[1] / self.x[0]
self.tau = self._tau = np.array(f[('tau')])
f.close()
elif re.search('pkl', fn):
data = read_pickle_file(fn, nloads=1, verbose=False)
self.E0 = data['E'].min()
self.E1 = data['E'].max()
self.E = data['E']
self.z = data['z']
self.x = self.z + 1
self.N = self.E.size
self.R = self.x[1] / self.x[0]
self.tau = self._tau = data['tau']
else:
f = open(self.tabname, 'r')
hdr = f.readline().split()[1:]
tmp = []
for element in hdr:
tmp.append(float(element[element.rfind('=')+1:]))
zmin, zmax, self.E0, self.E1 = tmp
self.tau = self._tau = np.loadtxt(self.tabname)
self.N = self.tau.shape[1]
self.x = np.logspace(np.log10(1+zmin), np.log10(1.+zmax),
int(self.tau.shape[0]))
self.z = self.x - 1.
self.E = np.logspace(np.log10(self.E0), np.log10(self.E1), self.N)
# Correct for inconsistencies between parameter file and table
if self.pf['tau_Emin'] > self.E0:
Ediff = self.E - self.pf['tau_Emin']
i_E0 = np.argmin(np.abs(Ediff))
if Ediff[i_E0] < 0:
i_E0 += 1
self.tau[:,0:i_E0] = np.inf
if self.pf['tau_Emax'] < self.E1:
Ediff = self.E - self.pf['tau_Emax']
i_E0 = np.argmin(np.abs(Ediff))
if Ediff[i_E0] < 0:
i_E0 += 1
self.tau[:,i_E0+1:] = np.inf
self.logx = np.log10(self.x)
self.logz = np.log10(self.z)
if self.pf['verbose']:
print("# Loaded {}.".format(fn))
return self.z, self.E, self.tau
def tau_name(self, prefix=None, suffix='pkl'):
"""
Return name of table based on its properties.
"""
# Return right away if we supplied a table by hand
if self.pf['tau_table'] is not None:
return self.pf['tau_table'], None
if suffix != self.pf['preferred_format']:
suffix = self.pf['preferred_format']
HorHe = 'He' if self.pf['include_He'] else 'H'
zf = self.pf['final_redshift']
zi = self.pf['first_light_redshift']
L, N = self.tau_shape()
E0 = self.pf['tau_Emin']
E1 = self.pf['tau_Emax']
#if self.ionization_history is not None:
# fn = lambda z1, z2, E1, E2: \
# ('optical_depth_{0!s}_{1}x{2}_z_{3}-{4}_logE_{5:.2g}-' +\
# '{6:.2g}.{7!s}').format(HorHe, L, N, z1, z2, E1, E2, suffix)
#else:
fn = lambda z1, z2, E1, E2: \
'optical_depth_{0!s}_{1!s}_{2}x{3}_z_{4:.0f}-{5:.0f}_logE_{6:.2g}-{7:.2g}.{8!s}'.format(self.cosm.get_prefix(), HorHe, L, N, z1, z2,
E1, E2, suffix)
return fn(zf, zi, np.log10(E0), np.log10(E1)), fn
def find_tau(self, prefix=None):
"""
Find an optical depth table.
"""
fn, fn_func = self.tau_name()
#if rank == 0 and self.pf['verbose']:
# print(("Looking for optical depth table equivalent to " +\
# "{!s}...").format(fn))
if prefix is None:
ares_dir = ARES
if not ares_dir:
print("No ARES environment variable.")
return None
if self.pf['tau_path'] is None:
input_dirs = ['{!s}/input/optical_depth'.format(ARES)]
else:
input_dirs = [self.pf['tau_path']]
else:
if isinstance(prefix, basestring):
input_dirs = [prefix]
else:
input_dirs = prefix
guess = '{0!s}/{1!s}'.format(input_dirs[0], fn)
if os.path.exists(guess):
return guess
# Find exactly what table should be
zmin, zmax, Nz, lEmin, lEmax, chem, pre, post = self._parse_tab(fn)
ok_matches = []
perfect_matches = []
# Loop through input directories
for input_dir in input_dirs:
# Loop over files in input_dir, look for best match
for fn1 in os.listdir(input_dir):
if re.search('hdf5', fn1) and (not have_h5py):
continue
tab_name = '{0!s}/{1!s}'.format(input_dir, fn1)
try:
zmin_f, zmax_f, Nz_f, lEmin_f, lEmax_f, chem_f, p1, p2 = \
self._parse_tab(fn1)
except:
continue
# Dealbreakers
if zmax_f < zmax:
continue
if chem_f != chem:
continue
if Nz_f < Nz:
continue
# Continue with possible matches
for fmt in ['pkl', 'hdf5']:
if fn1 == fn and fmt == self.pf['preferred_format']:
perfect_matches.append(tab_name)
continue
if c and fmt == self.pf['preferred_format']:
perfect_matches.append(tab_name)
continue
# If number of redshift bins and energy range right...
if re.search(pre, fn1) and re.search(post, fn1):
if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:
perfect_matches.append(tab_name)
else:
ok_matches.append(tab_name)
# If number of redshift bins is right...
elif re.search(pre, fn1):
if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:
perfect_matches.append(tab_name)
else:
ok_matches.append(tab_name)
if perfect_matches:
return perfect_matches[0]
elif ok_matches:
return ok_matches[0]
else:
return None
def _parse_tab(self, fn):
"""
"""
tmp1, tmp2 = fn.split('_z_')
pre = tmp1[0:tmp1.rfind('x')]
red, tmp3 = fn.split('_logE_')
post = '_logE_' + tmp3.replace('.hdf5', '')
# Find exactly what table should be
zmin, zmax = list(map(float, red[red.rfind('z')+2:].partition('-')[0::2]))
logEmin, logEmax = list(map(float, tmp3[tmp3.rfind('E')+1:tmp3.rfind('.')].partition('-')[0::2]))
Nz = pre[pre.rfind('_')+1:]
# Hack off Nz string and optical_depth_
chem = pre.strip(Nz)[14:-1]#.strip('optical_depth_')
return zmin, zmax, int(Nz), logEmin, logEmax, chem, pre, post
def _fetch_tau(self, pop, zpf, Epf):
"""
Look for optical depth tables. Supply corrected energy and redshift
arrays if there is a mistmatch between those generated from information
in the parameter file and those found in the optical depth table.
.. note:: This will only be called from UniformBackground, and on
populations which are using the generator framework.
Parameters
----------
popid : int
ID # for population of interest.
zpf : np.ndarray
What the redshifts should be according to the parameter file.
Epf : np.ndarray
What the energies should be according to the parameter file.
Returns
-------
Energies and redshifts, potentially revised from Epf and zpf.
"""
# First, look in CWD or $ARES (if it exists)
if self.pf['tau_table'] is None:
self.tabname = self.find_tau(self.pf['tau_prefix'])
else:
self.tabname = self.pf['tau_table']
if self.tabname is None:
return zpf, Epf, None
if not os.path.exists(self.tabname):
raise IOError("Optical depth table {} does not exist!".format(self.tabname))
# If we made it this far, we found a table that may be suitable
ztab, Etab, tau = self.load(self.tabname)
# Return right away if there's no potential for conflict
if (zpf is None) and (Epf is None):
return ztab, Etab, tau
# Figure out if the tables need fixing
zmax_ok = (round(ztab.max(), 2) >= self.pf['first_light_redshift'])
zmin_ok = \
(ztab.min() <= zpf.min()) or \
np.allclose(ztab.min(), zpf.min())
Emin_ok = \
(Etab.min() <= Epf.min()) or \
np.allclose(Etab.min(), Epf.min())
# Results insensitive to Emax (so long as its relatively large)
# so be lenient with this condition (100 eV or 1% difference
# between parameter file and lookup table)
Emax_ok = np.allclose(Etab.max(), Epf.max(), atol=100., rtol=1e-2)
# Check redshift bounds
if not (zmax_ok and zmin_ok):
if not zmax_ok:
tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)
sys.exit(1)
else:
if self.pf['verbose']:
tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)
if not (Emax_ok and Emin_ok):
if self.pf['verbose']:
tau_tab_E_mismatch(pop, self.tabname, Emin_ok, Emax_ok, Etab)
if Etab.max() < Epf.max():
sys.exit(1)
# Correct for inconsistencies between parameter file and table
# By effectively masking out those elements with tau -> inf
if Epf.min() > Etab.min():
Ediff = Etab - Epf.min()
i_E0 = np.argmin(np.abs(Ediff))
if Ediff[i_E0] < 0:
i_E0 += 1
i_E0 -= 1
#tau[:,0:i_E0+1] = np.inf
else:
i_E0 = 0
if Epf.max() < Etab.max():
Ediff = Etab - Epf.max()
i_E1 = np.argmin(np.abs(Ediff))
if Ediff[i_E1] < 0:
i_E1 += 1
#tau[:,i_E1+1:] = np.inf
else:
i_E1 = None
self.z_fetched = ztab
self.E_fetched = Etab[i_E0:i_E1]
self.tau_fetched = tau[:,i_E0:i_E1]
# We're done!
return ztab, Etab[i_E0:i_E1], tau[:,i_E0:i_E1]
def tau_shape(self):
"""
Determine dimensions of optical depth table.
Unfortunately, this is a bit redundant with the procedure in
self._init_xrb, but that's the way it goes.
"""
# Set up log-grid in parameter x = 1 + z
x = np.logspace(np.log10(1+self.pf['final_redshift']),
np.log10(1+self.pf['first_light_redshift']),
int(self.pf['tau_redshift_bins']))
z = x - 1.
logx = np.log10(x)
logz = np.log10(z)
# Constant ratio between elements in x-grid
R = x[1] / x[0]
logR = np.log10(R)
E0 = self.pf['tau_Emin']
# Create mapping to frequency space
E = 1. * E0
n = 1
while E < self.pf['tau_Emax']:
E = E0 * R**(n - 1)
n += 1
# Set attributes for dimensions of optical depth grid
L = len(x)
# Frequency grid must be index 1-based.
N = num_freq_bins(L, zi=self.pf['first_light_redshift'],
zf=self.pf['final_redshift'], Emin=E0,
Emax=self.pf['tau_Emax'])
N -= 1
return L, N
def save(self, fn=None, prefix=None, suffix='pkl', clobber=False):
"""
Write optical depth table to disk.
Parameters
----------
fn : str
Full filename (including suffix). Will override prefix and suffix
parameters.
"""
if rank != 0:
return
if fn is None:
if prefix is None:
fn, func = self.tau_name(prefix=None, suffix=suffix)
else:
| |
will rise or fall
# Note: R = Rising, F = Falling
#--------------------------------------------------
# wR = (self.dV > 0) # boolean array
# wF = np.invert(wR) # boolean array
# n_rising = wR.sum()
# n_falling = wF.sum()
# print(' n_rising = ' + str(n_rising))
# print(' n_falling = ' + str(n_falling))
#-----------------------------------------
# For debugging: save initial value of h
#-----------------------------------------
# start_h = self.h_table.copy()
#-----------------------------------------
# Process cells where dzw > 0.
# Compute unused capacity of each layer,
# working from the bottom layer upward.
#-------------------------------------------
# Assume porosity = saturated water content
#-----------------------------------------------
# The amount of water, as a depth, that can
# be present in a layer (if soil were removed)
# is (tk - yk)*pk.
# Since pk in (0,1), this is < (tk - yk).
# dz_k = (tk - yk) >= 0.
# dzw_avail_k = dz_k * pk >= 0.
#-----------------------------------------------
# Note: range(start, stop, step), and last
# value is 0 if stop == -1. # CONFIRMED
#-----------------------------------------------
dzw_pos = self.dzw.copy()
dzw_pos[ dzw_pos < 0 ] = 0.0
for k in range((self.n_layers - 1), -1, -1):
yk = self.y[k,:,:]
tk = self.th[k] # (thickness of layer)
pk = self.qs[k] # (porosity of layer)
SCALAR_THICKNESS = (tk.size == 1)
SCALAR_POROSITY = (pk.size == 1)
dz_k = (tk - yk) # (becomes grid due to y)
dzw_avail_k = dz_k * pk # (becomes grid due to y)
dzw_avail_k = np.minimum( dzw_avail_k, 0) # (shouldn't be needed)
#------------------------------------------------------
# Where the increase in dzw exceeds layer capacity,
# raise water table, set yk = tk, consume some of dzw
#------------------------------------------------------
# w1 works even if w1.sum() = 0
#------------------------------------------------------
w1 = (dzw_pos > dzw_avail_k) # (boolean array)
self.h_table[w1] += dz_k[w1]
if (SCALAR_THICKNESS):
yk[w1] = tk
else:
yk[w1] = tk[w1] # (filled)
dzw_pos[w1] -= dzw_avail_k[w1]
#---------------------------------------------------------
# Where the increase in dzw is less than layer capacity,
# raise water table, increase yk, consume all of dzw.
#---------------------------------------------------------
w2 = np.invert(w1)
if (SCALAR_POROSITY):
dh = dzw_pos[w2] / pk
else:
dh = dzw_pos[w2] / pk[w2]
self.h_table[w2] += dh
yk[w2] += dh
dzw_pos[w2] = 0 #### np.float64(0)
#-------------------------------------
self.y[k,:,:] = yk # (replace a layer in y)
#------------------------------------------------
# Where dzw is still gt 0, we must add it to h
# since we have exhausted the capacity of the
# soil layers. This will bring h above the DEM
# surface, z. The increase in h will result in
# surface runoff via a positive seepage rate.
#------------------------------------------------
# self.h_table += dzw_pos # (should work?)
w3 = (dzw_pos > 0)
self.h_table[w3] += dzw_pos[w3]
dzw_pos[w3] = 0
n3 = w3.sum()
if (n3 > 0):
print('Baseflow at:', n3, 'grid cells.')
#-------------------------------
# Process cells where dzw < 0,
# working from top layer down
#-------------------------------
dzw_neg = self.dzw.copy()
dzw_neg[ dzw_neg > 0 ] = 0.0
for k in range(self.n_layers):
yk = self.y[k,:,:]
tk = self.th[k] # (thickness of layer)
pk = self.qs[k] # (porosity of layer)
SCALAR_THICKNESS = (tk.size == 1)
SCALAR_POROSITY = (pk.size == 1)
dzw_avail_k = yk * pk # (becomes grid due to y)
#------------------------------------------------------
# Where the decrease in dzw exceeds water in layer,
# lower water table, set yk = 0, consume some of dzw
#------------------------------------------------------
w1 = (np.absolute(dzw_neg) > dzw_avail_k) # (boolean array)
self.h_table[w1] -= yk[w1]
yk[w1] = 0.0
dzw_neg[w1] += dzw_avail_k[w1] #(neg + pos)
#---------------------------------------------------------
# Where the decrease in dzw is less than water in layer,
# lower water table, decrease yk, consume all of dzw.
#---------------------------------------------------------
# NB! pk=0 => dzw_avail_k=0, so OK to divide by pk
#----------------------------------------------------
w2 = np.invert(w1)
if (SCALAR_POROSITY):
dh = dzw_neg[w2] / pk
else:
dh = dzw_neg[w2] / pk[w2]
self.h_table[w2] += dh
yk[w2] += dh
dzw_neg[w2] = 0.0
#------------------------------------------------
self.y[k,:,:] = yk # (replace a layer in y)
#------------------------------------------------
# Where dzw is still lt 0, we must subtract it
# from h; all soil layers are now empty. This
# will bring h below the depth of the lowest
# soil layer. Should we assume that porosity
# is the same as for the lowest layer or should
# bottom of bottom layer be impermeable?
#------------------------------------------------
# This is where we should use depth to bedrock.
#------------------------------------------------
w3 = (dzw_neg < 0)
if (SCALAR_POROSITY):
dh = dzw_neg[w3] / pk
else:
dh = dzw_neg[w3] / pk[w3]
self.h_table[w3] += dh
# dzw_neg[w3] = 0.0 # (shouldn't be needed here)
#------------------------------------------
# (2021-07-27) Do we need this as well ?
#------------------------------------------
self.h_table[ self.d8.noflow_IDs ] = 0.0
#-------------------------
# We shouldn't need this
#-------------------------
# self.dzw[:] = 0.0
# update_water_table()
#-------------------------------------------------------------------
def update_water_table_OLD(self):
#------------------------------------------------------------------
# Notes: h = elevation of water table [m]
# h2 = temp version of h
# Q_gw = total subsurface flux [m^3/s]
# Rg = rate at which water from the surface
# arrives at the water table [m/s]
# da = pixel area [m^2]
# dt = GW timestep [sec]
# w1 = IDs of pixels that flow in direction 1
# p1 = IDs of parent pixels for "w1 pixels"
# Note: h and wetted-depths, y, are updated
#------------------------------------------------------------------
# Notes: There seems to be an implicit assumption here
# that Ks is nondecreasing towards the surface.
# Once an element is saturated there is no internal
# storage and the amount of water flowing in through
# its faces must equal the amount that is flowing out.
# So if the amount flowing in from the sides exceeds
# the amount flowing out, (which will occur when the
# flow is convergent) then the excess must flow
# through the upper or lower faces. With saturated
# soil all the way down to an impermeable bedrock
# boundary, this means that it must flow through the
# upper face. But all that enters in a given time step
# can only flow through the upper face if Ks in the
# element above is high enough to accommodate it.
#------------------------------------------------------------------
# NB! There is a fair amount of IDL "where subscripting"
# used in this function. I2PY does not handle this
# correctly yet, so it was fixed by hand using ".flat", etc.
# See NOTES at the end of this file.
#------------------------------------------------------------------
#--------------------------------------------
# Compute dzw = total amount of water to be
# added to or removed from the soil column
# during the subsurface flow timestep
#--------------------------------------------
# Initialize dzw with outflow term.
# Doesn't involve neighbor pixels.
#------------------------------------
Rg = self.Rg # (using new framework, 5/18/12)
dzw = self.dt * (Rg - self.Q_gw / self.da)
#----------------
# For debugging
#----------------
## if (np.size( dzw ) == 1):
## msg = array([' ', 'ERROR: dzw is not an array. ', ' '])
## result = GUI_Message(msg, INFO=True)
## sys.exit()
#----------------
# For debugging
#----------------
#print 'dt =', dt
#print 'Rg =', Rg
#------------------------------------
#print 'Qg_min =', self.Q_gw.min()
#print 'Qg_max =', self.Q_gw.max()
#------------------------------------
dz_min = dzw.min()
dz_max = dzw.max() #***********************
print(' dz_min = ' + str(dz_min))
print(' dz_max = ' + str(dz_max))
## print ' '
#-------------------------------------------
# Local synonyms (Any performance hit ??)
#-------------------------------------------
p1 = self.d8.p1 ; w1 = self.d8.w1
p2 = self.d8.p2 ; w2 = self.d8.w2
p3 = self.d8.p3 ; w3 = self.d8.w3
p4 = self.d8.p4 ; w4 = self.d8.w4
p5 = self.d8.p5 ; w5 = self.d8.w5
p6 = self.d8.p6 ; w6 = self.d8.w6
p7 = self.d8.p7 ; w7 = self.d8.w7
p8 = self.d8.p8 ; w8 = self.d8.w8
#-----------------------------------------
# Add contributions from neighbor pixels
#-----------------------------------------
dt = self.dt ############# CHECK dt #########
if (np.size( self.da ) == 1):
factor = (dt / self.da)
if (self.d8.p1_OK):
dzw[p1] += (self.Q_gw[w1] * factor)
if (self.d8.p2_OK): | |
<filename>varCall_filtering/scripts/helpers.py<gh_stars>1-10
import ConfigParser
import os
import re
import sys
import subprocess
from threading import Thread
def mpileup_cmdgen(args,case_name,source_dir):
#generate command for mpileup
cmd = "qsub -sync y -t " + str(args.start) + "-" + str(args.end) \
+ " -V " \
+ "-N " + case_name \
+ " -e " + args.output + "/vcfcall/logs/"+ case_name +".vcfcall.e " \
+ "-o " + args.output + "/vcfcall/logs/"+ case_name +".vcfcall.o " \
+ "-cwd -l mem=10G,time=1:: " \
+ source_dir + "/parallel_pileup.sh" \
+ " --bam " + args.inputdir + "/" + case_name + ".bam"\
+ " --ref " + args.ref \
+ " --outputdir " + args.output + "/vcfcall/"+ case_name
if(args.debug):
print('[Performing mpileup]')
print(cmd)
return cmd
def vcf_concat_cmdgen(args,case_name):
#generate command for vcf-concat
vcflist = []
for i in range(args.start,args.end+1):
vcfname = args.output + "/vcfcall/"+case_name+"/raw_" + str(i) + ".vcf"
vcflist.append(vcfname)
vcfstr = " ".join(vcflist)
cmd = "vcf-concat "+ vcfstr + " > " \
+ args.output + "/vcfcall/" + case_name + ".vcf"
if args.debug:
print('[Concatenating vcf files and sorting]')
print(cmd)
return cmd
def snpeffarray_cmdgen(args,case_name,source_dir):
cmd = "qsub -V -b y -sync y -t 1-23 -cwd -l mem=10G,time=2:: -pe smp 2 " \
+ "-N " + case_name \
+ " -e " + args.output + "/annotate/logs/ " \
+ "-o " + args.output+"/annotate/"+case_name+".o " \
+ source_dir + "/parallelsnp.sh" \
+ " -outputdir " + args.output \
+ " -snpeff " + args.snpeff \
+ " -case_name " + case_name \
+ " -input " + args.output +"/annotate"
if args.debug:
print('[Annotating ' + case_name + ' with snpEff]')
print(cmd)
return cmd
def snpsiftarray_cmdgen(args,case_name,vcf,source_dir):
cmd = "qsub -V -b y -sync y -t 1-23 -cwd -l mem=10G,time=2:: -pe smp 2 " \
+ "-N " + case_name \
+ " -e " + args.output + "/annotate/logs/ " \
+ "-o " + args.output+"/annotate/"+case_name+".o " \
+ source_dir + "/parallelsift.sh" \
+ " -outputdir " + args.output \
+ " -snpeff " + args.snpeff \
+ " -case_name " + case_name \
+ " -input " + args.output +"/annotate" \
+ " -vcf " + vcf
if args.debug:
print('[Annotating ' + case_name + ' with snpSift]')
print(cmd)
return cmd
def snpdbnsfparray_cmdgen(args,case_name,dbnsfp,source_dir,dbnsfp_header):
cmd = "qsub -V -b y -sync y -t 1-23 -cwd -l mem=10G,time=4:: -pe smp 2 " \
+ "-N " + case_name \
+ " -e " + args.output + "/annotate/logs/ " \
+ "-o " + args.output+"/annotate/logs/"+case_name+".o " \
+ source_dir + "/paralleldbnsfp.sh" \
+ " -outputdir " + args.output \
+ " -snpeff " + args.snpeff \
+ " -case_name " + case_name \
+ " -input " + args.output +"/annotate" \
+ " -dbnsfp " + dbnsfp \
+ " -header " + dbnsfp_header
if args.debug:
print('[Annotating ' + case_name + ' with dbnsfp]')
print(cmd)
return cmd
def vcf_snp_concat_cmdgen(args,case_name):
#generate command for vcf-concat
vcflist = []
for i in range(1,23) + ['MISC']:
vcfname = args.output + "/annotate/"+case_name+"."+ str(i) + ".eff.vcf"
vcflist.append(vcfname)
vcfstr = " ".join(vcflist)
cmd = "vcf-concat "+ vcfstr + " > " \
+ args.output + "/annotate/" + case_name + ".eff.all.vcf"
if args.debug:
print('[Concatenating ' + case_name + ' vcf files]')
print(cmd)
return cmd
def oneEff_cmdgen(args,case_name,source_dir):
cmd = "cat "+ args.output + "/annotate/" + case_name + ".eff.all.vcf | " \
+ source_dir+"/vcfEffOnePerLine.pl > " \
+ args.output +"/annotate/"+case_name+ ".vcf"
if(args.debug):
print('[Splitting vcf effects to one per line]')
print(cmd)
return cmd
def filterarray_cmdgen(args,case_name,source_dir,whichscript):
cmd = "qsub -V -b y -sync y -t 1-23 -cwd -l mem=10G,time=4:: -pe smp 2 " \
+ "-N " + case_name \
+ " -e " + args.output + "/filter/logs/ " \
+ "-o " + args.output+"/filter/logs/ " \
+ source_dir + "/parallelfilter.sh" \
+ " -outputdir " + args.output \
+ " -case_name " + case_name \
+ " -source_dir " + source_dir \
+ " -whichscript " + whichscript \
+ " -inputdir " + args.output +"/filter"
if args.debug:
print('[Filtering ' + case_name + ']')
print(cmd)
return cmd
def get_filenames(inputdir,extension):
input_filenames = []
#get list of .bam/.vcf filenames in input directory
#and remove '.bam'/'.vcf' suffix
if extension == "bam":
pattern = "^.*\.bam$"
if extension == "vcf":
pattern = "^.*\.vcf$"
if extension == "tsv":
pattern = "^.*\.tsv$"
for (dirpath, dirnames, filenames) in os.walk(inputdir):
for filename in filenames:
if bool(re.search(pattern,filename)) \
and not bool(re.search("^\d",filename)):
input_filenames.append(filename[:-4])
break
if len(input_filenames) == 0:
sys.exit("[ERROR] no files found in input directory")
return input_filenames
def purge(directory, pattern):
#function to purge files matching a pattern
for f in os.listdir(directory):
if re.search(pattern, f):
os.remove(os.path.join(directory, f))
def parse_config(args):
Config = ConfigParser.ConfigParser()
Config.read(args.config)
for i in [(args.inputdir, 'inputdir', 'main'),
(args.output, 'output', 'main'),
(args.steps, 'steps', 'main'),
(args.cluster, 'cluster', 'main'),
(args.ref, 'ref', 'varcall'),
(args.snpeff, 'snpeff', 'annotate'),
(args.annovcf, 'annovcf', 'annotate'),
(args.dbnsfp, 'dbnsfp', 'annotate'),
(args.vcftype, 'vcftype', 'filter')
]:
if not i[0] and i[1] in ConfigSectionMap(Config, i[2]):
vars(args)[i[1]] = ConfigSectionMap(Config, i[2])[i[1]]
return args
def ConfigSectionMap(Config, section):
"""Process config file"""
# https://wiki.python.org/moin/ConfigParserExamples
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def runShellCmd(cmd):
proc = subprocess.Popen(
cmd,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
print cmd
if proc.wait() != 0:
sys.exit("[ERROR] process '" + cmd + "' terminated with non-zero exit code")
def check_main_args(args):
if args.inputdir == None:
sys.exit("[ERROR]: Missing required '--inputdir' argument.")
if args.output == None:
sys.exit("[ERROR]: Missing required '--output' argument.")
if args.steps == None:
sys.exit("[ERROR]: Missing required '--steps' argument.")
if args.cluster == None:
sys.exit("[ERROR]: Missing rquired '--cluster' argument.")
#check for '/' at end of directories. remove if there
if re.search(".*\/$",args.inputdir):
args.inputdir = args.inputdir[:-1]
if re.search(".*\/$",args.output):
args.output = args.output[:-1]
def check_varcall_args(args):
if args.ref == None:
sys.exit("[ERROR]: Missing required '--ref' argument.")
if args.end <= args.start:
sys.exit("[ERROR]: '--end' argument >= '--start' argument")
def check_anno_args(args):
if args.snpeff == None:
sys.exit("[ERROR]: Missing required '--snpeff' argument.")
if args.dbnsfp == None:
sys.exit("[ERROR]: Missing required '--dbnsfp' argument.")
if args.annovcf == None:
sys.exit("[ERROR]: Missing required '--annovcf' argument.")
def check_filt_args(args):
if args.vcftype == None:
sys.exit("[ERROR]: Missing required '--vcftype' argument.")
def check_merge_args(args):
if args.mergename == None:
sys.exit("[ERROR]: Missing required '--mergename' argument.")
def multithread(function,arguments,input_filenames):
threads = []
for case_name in input_filenames:
t = Thread(target = function, args=(case_name,arguments))
threads.append(t)
#set inputdir as vcf_call's output
for i in threads:
i.start();
for i in threads:
i.join();
def snpeff_cmdgen(args,case_name):
#generate command for snpeff
cmd = "qsub -V -b y -sync y -N " + case_name \
+ " -l mem=10G,time=2:: -pe smp 2 " \
+ "-e " + args.output + "/annotate/logs/"+ case_name +".snpeff.e " \
+ "-o " + args.output+"/annotate/"+case_name+".eff.vcf " \
+ "java -Xmx6G " \
+ "-jar "+ args.snpeff+"/snpEff.jar -c "+ args.snpeff+"/snpEff.config" \
+ " GRCh37.71 " \
+ "-noStats -v -lof -canon " \
+ "-no-downstream -no-intergenic -no-intron -no-upstream -no-utr " \
+ args.inputdir+"/"+case_name+".vcf"\
+ " > " + args.output + "/annotate/logs/"+ case_name +".snpeff.o"
if(args.debug):
print('[Annotating with snpEff]')
print(cmd)
return cmd
def snpsift_cmdgen(args,case_name,vcf):
#generate command for snpsift annotate
cmd = "qsub -V -b y -sync y -N " + case_name \
+ " -l mem=10G,time=2:: -pe smp 2 " \
+ "-e " + args.output + "/annotate/logs/"+ case_name +".snpeff.e " \
+ "-o " + args.output+"/annotate/"+case_name+".eff.vcf.tmp " \
+ "java -Xmx6G " \
+ "-jar "+ args.snpeff+"/SnpSift.jar annotate -v " \
+ vcf + " " + args.output+"/annotate/"+case_name+".eff.vcf " \
+ " > " + args.output + "/annotate/logs/"+ case_name +".snpeff.o"
if(args.debug):
print(cmd)
return cmd
def snpdbnsfp_cmdgen(args,case_name,dbnsfp,header):
#generate command for snpsift dbnsfp
cmd = "qsub -V -b y -sync y -N " + case_name \
+ " -l mem=10G,time=2:: -pe smp 2 " \
+ "-e " + args.output + "/annotate/logs/"+ case_name +".snpeff.e " \
+ "-o " + args.output+"/annotate/"+case_name+".eff.all.vcf " \
+ "java -Xmx6G " \
+ "-jar "+ args.snpeff+"/SnpSift.jar dbnsfp " \
+ dbnsfp + " -v -f " + header + " " \
+ args.output+"/annotate/"+case_name+".eff.vcf " \
+ " > " + args.output + "/annotate/logs/"+ case_name +".snpeff.o"
if(args.debug):
print(cmd)
return cmd
def split_vcf(args,case_name,step):
if args.debug:
print("[Splitting vcf file by chromosome]")
for chrom in range(1,23):
if args.debug:
print("[Chromosome " | |
# -*- coding: utf-8 -*-
"""
Workflow and workflow proxy base class definitions.
"""
__all__ = ["BaseWorkflow", "workflow_property", "cached_workflow_property"]
import sys
import functools
import logging
from collections import OrderedDict
from abc import abstractmethod
import luigi
import six
from law.task.base import Task, ProxyTask
from law.target.collection import TargetCollection, SiblingFileCollection
from law.parameter import NO_STR, NO_INT, CSVParameter
logger = logging.getLogger(__name__)
_forward_attributes = ("requires", "output", "run", "complete")
class BaseWorkflowProxy(ProxyTask):
"""
Base class of all workflow proxies.
.. py:classattribute:: workflow_type
type: string
The named type of the workflow. This attribute refers to the value of the ``--workflow``
parameter on the command line to select a particular workflow.
.. py:attribute:: task
type: Task
Reference to the actual *workflow* task.
"""
workflow_type = None
add_workflow_run_decorators = True
def __init__(self, *args, **kwargs):
super(BaseWorkflowProxy, self).__init__(*args, **kwargs)
# find decorators for this proxy's run method that can be configured on the actual task
if self.add_workflow_run_decorators:
for prefix in [self.workflow_type + "_", ""]:
attr = "{}workflow_run_decorators".format(prefix)
decorators = getattr(self.task, attr, None)
if decorators is not None:
# found decorators, so unbound, decorate and re-bound
run_func = self.run.__func__
for decorator in decorators:
run_func = decorator(run_func)
self.run = run_func.__get__(self)
break
def complete(self):
"""
Custom completion check that invokes the task's *workflow_complete* if it is callable, or
just does the default completion check otherwise.
"""
if callable(self.task.workflow_complete):
return self.task.workflow_complete()
else:
return super(BaseWorkflowProxy, self).complete()
def requires(self):
"""
Returns the default workflow requirements in an ordered dictionary, which is updated with
the return value of the task's *workflow_requires* method.
"""
reqs = OrderedDict()
reqs.update(self.task.workflow_requires())
return reqs
def output(self):
"""
Returns the default workflow outputs in an ordered dictionary. At the moment this is just
the collection of outputs of the branch tasks, stored with the key ``"collection"``.
"""
if self.task.target_collection_cls is not None:
cls = self.task.target_collection_cls
elif self.task.outputs_siblings:
cls = SiblingFileCollection
else:
cls = TargetCollection
targets = luigi.task.getpaths(self.task.get_branch_tasks())
collection = cls(targets, threshold=self.threshold(len(targets)))
return OrderedDict([("collection", collection)])
def threshold(self, n=None):
"""
Returns the threshold number of tasks that need to be complete in order to consider the
workflow as being complete itself. This takes into account the
:py:attr:`law.BaseWorkflow.acceptance` parameter of the workflow. The threshold is passed
to the :py:class:`law.TargetCollection` (or :py:class:`law.SiblingFileCollection`) within
:py:meth:`output`. By default, the maximum number of tasks is taken from the length of the
branch map. For performance purposes, you can set this value, *n*, directly.
"""
if n is None:
n = len(self.task.branch_map())
acceptance = self.task.acceptance
return (acceptance * n) if acceptance <= 1 else acceptance
def workflow_property(func):
"""
Decorator to declare a property that is stored only on a workflow but makes it also accessible
from branch tasks. Internally, branch tasks are re-instantiated with ``branch=-1``, and its
decorated property is invoked. You might want to use this decorator in case of a property that
is common (and mutable) to a workflow and all its branch tasks, e.g. for static data. Example:
.. code-block:: python
class MyTask(Workflow):
def __init__(self, *args, **kwargs):
super(MyTask, self).__init__(*args, **kwargs)
if self.is_workflow():
self._common_data = some_demanding_computation()
@workflow_property
def common_data(self):
# this method is always called with *self* is the *workflow*
return self._common_data
"""
@functools.wraps(func)
def wrapper(self):
return func(self.as_workflow())
return property(wrapper)
def cached_workflow_property(func=None, attr=None, setter=True):
"""
Decorator to declare an attribute that is stored only on a workflow and also cached for
subsequent calls. Therefore, the decorated method is expected to (lazily) provide the value to
cache. The resulting value is stored as ``_workflow_cached_<func.__name__>`` on the workflow,
which can be overwritten by setting the *attr* argument. By default, a setter is provded to
overwrite the cache value. Set *setter* to *False* to disable this feature. Example:
.. code-block:: python
class MyTask(Workflow):
@cached_workflow_property
def common_data(self):
# this method is always called with *self* is the *workflow*
return some_demanding_computation()
@cached_workflow_property(attr="my_own_property", setter=False)
def common_data2(self):
return some_other_computation()
"""
def wrapper(func):
_attr = attr or "_workflow_cached_" + func.__name__
@functools.wraps(func)
def getter(self):
wf = self.as_workflow()
if not hasattr(wf, _attr):
setattr(wf, _attr, func(wf))
return getattr(wf, _attr)
_setter = None
if setter:
def _setter(self, value):
wf = self.as_workflow()
setattr(wf, _attr, value)
_setter.__name__ = func.__name__
return property(fget=getter, fset=_setter)
return wrapper if not func else wrapper(func)
class BaseWorkflow(Task):
"""
Base class of all workflows.
.. py:classattribute:: workflow
type: luigi.Parameter
Workflow type that refers to the workflow proxy implementation at instantiation / execution
time. Empty default value.
.. py:classattribute:: acceptance
type: luigi.FloatParameter
Number of complete tasks to consider the workflow successful. Values larger than one are
interpreted as absolute numbers, and as fractions otherwise. Defaults to *1.0*.
.. py:classattribute:: tolerance
type: luigi.FloatParameter
Number of failed tasks to still consider the workflow successful. Values larger than one are
interpreted as absolute numbers, and as fractions otherwise. Defaults to *0.0*.
.. py:classattribute:: branch
type: luigi.IntParameter
The branch number to run this task for. *-1* means that this task is the actual *workflow*,
rather than a *branch* task. Defaults to *-1*.
.. py:classattribute:: start_branch
type: luigi.IntParameter
First branch to process. Defaults to *0*.
.. py:classattribute:: end_branch
type: luigi.IntParameter
First branch that is *not* processed (pythonic). Defaults to *-1*.
.. py:classattribute:: branches
type: law.CSVParameter
Explicit list of branches to process. Empty default value.
.. py:classattribute:: workflow_proxy_cls
type: BaseWorkflowProxy
Reference to the workflow proxy class associated to this workflow.
.. py:classattribute:: workflow_complete
type: None, callable
Custom completion check that is used by the workflow's proxy when callable.
.. py:classattribute:: outputs_siblings
type: bool
Flag that denotes whether the outputs of all branches of this workflow are stored in the same
directory. If *True*, the :py:meth:`BaseWorkflowProxy.output` method will use a
:py:class:`law.SiblingFileCollection`, or a plain :py:class:`law.TargetCollection` otherwise.
.. py:classattribute:: target_collection_cls
type: TargetCollection
Configurable target collection class to use. When set, the attribute has precedence over the
:py:attr:`outputs_siblings` flag.
.. py:classattribute:: force_contiguous_branches
type: bool
Flag that denotes if this workflow is forced to use contiguous branch numbers, starting from
0. If *False*, an exception is raised otherwise.
.. py:classattribute:: workflow_property
type: function
Reference to :py:func:`workflow_property`.
.. py:classattribute:: cached_workflow_property
type: function
Reference to :py:func:`cached_workflow_property`.
.. py:classattribute:: workflow_run_decorators
type: sequence, None
Sequence of decorator functions that will be conveniently used to decorate the workflow
proxy's run method. This way, there is no need to subclass and reset the
:py:attr:`workflow_proxy_cls` just to add a decorator. The value is *None* by default.
.. py:attribute:: branch_map
read-only
type: dict
Shorthand for :py:meth:`get_branch_map`.
.. py:attribute:: branch_data
read-only
Shorthand for ``self.branch_map[self.branch]``.
"""
workflow = luigi.Parameter(default=NO_STR, significant=False, description="the type of the "
"workflow to use")
acceptance = luigi.FloatParameter(default=1.0, significant=False, description="number of "
"finished tasks to consider the task successful, relative fraction (<= 1) or absolute "
"value (> 1), default: 1.0")
tolerance = luigi.FloatParameter(default=0.0, significant=False, description="number of failed "
"tasks to still consider the task successful, relative fraction (<= 1) or absolute value "
"(> 1), default: 0.0")
pilot = luigi.BoolParameter(significant=False, description="disable requirements of the "
"workflow to let branch tasks resolve requirements on their own")
branch = luigi.IntParameter(default=-1, description="the branch number/index to run this "
"task for, -1 means this task is the workflow, default: -1")
start_branch = luigi.IntParameter(default=NO_INT, description="the branch to start at, "
"default: 0")
end_branch = luigi.IntParameter(default=NO_INT, description="the branch to end at, NO_INT "
"means end, default: NO_INT")
branches = CSVParameter(default=[], significant=False, description="branches to use")
workflow_proxy_cls = BaseWorkflowProxy
workflow_complete = None
outputs_siblings = False
target_collection_cls = None
force_contiguous_branches = False
workflow_property = None
cached_workflow_property = None
workflow_run_decorators = None
exclude_db = True
exclude_params_branch = {"print_deps", "print_status", "remove_output", "workflow",
"acceptance", "tolerance", "pilot", "start_branch", "end_branch", "branches"}
exclude_params_workflow = {"branch"}
def __init__(self, *args, **kwargs):
super(BaseWorkflow, self).__init__(*args, **kwargs)
# determine workflow proxy class to instantiate
if self.is_workflow():
classes = self.__class__.mro()
for cls in classes:
if not issubclass(cls, BaseWorkflow):
continue
if self.workflow in (NO_STR, cls.workflow_proxy_cls.workflow_type):
self.workflow = cls.workflow_proxy_cls.workflow_type
self.workflow_proxy = cls.workflow_proxy_cls(task=self)
logger.debug("created workflow proxy instance of type '{}'".format(
cls.workflow_proxy_cls.workflow_type))
break
else:
raise ValueError("unknown workflow type {}".format(self.workflow))
# cached attributes for the workflow
self._branch_map = None
self._branch_tasks = None
else:
# cached attributes for branches
self._workflow_task = None
def _forward_attribute(self, attr):
return attr in _forward_attributes and self.is_workflow()
def __getattribute__(self, attr, proxy=True, force=False):
if proxy and attr != "__class__":
if force or (attr != | |
#!/usr/bin/env python
#
# BEDOPS
# Copyright (C) 2011-2017 <NAME>, <NAME> and <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import argparse
import errno
import subprocess
import json
import logging
name = "update-sort-bed-migrate-candidates"
citation = " citation: http://bioinformatics.oxfordjournals.org/content/28/14/1919.abstract"
authors = " authors: <NAME> and <NAME>"
version = " version: 2.4.30"
usage = """ $ update-sort-bed-migrate-candidates [ --dry-run ] [ --debug ]
[ --write-list |
--resort-immediately |
--resort-in-parallel-via-slurm
[ --slurm-memory <MB> ]
[ --slurm-partition <SLURM partition> ]
[ --slurm-workdir <working directory> ]
[ --slurm-output <SLURM output directory> ]
[ --slurm-error <SLURM error directory> ]
]
[ --bedops-root-dir <bedops directory> ]
[ --bedextract-path <path> ]
[ --sort-bed-path <path> ]
[ --unstarch-path <path> ]
[ --starch-path <path> ]
[ --starchcat-path <path> ]
[ --update-sort-bed-slurm-path <path> ]
[ --update-sort-bed-starch-slurm-path <path> ]
--parent-dir <parent directory>
[ --non-recursive-search ]"""
help = """
The "update-sort-bed-migrate-candidates" utility recursively locates BED and
Starch files in the specified parent directory and tests if they require
re-sorting to conform to the updated, post-v2.4.20 "sort-bed" order.
Files with the extensions starch, bstarch, gstarch, bed, or bed[g|G]raph in
the parent directory are tested. Files without these extensions are ignored.
If the "--non-recursive-search" option is added, this utility will only search
within the specified parent directory, and go no deeper.
This utility offers one of three (exclusive) actions for migration:
1. Using "--write-list", files that require re-sorting can have their paths
written to standard output, which can be written to a file to be processed
later on, as desired.
2. Using "--resort-immediately", qualifying files can be resorted immediately
after all candidates are found, through a local, serial application of
'sort-bed'.
3. Using "--resort-in-parallel-via-slurm", candidate files can be migrated by
applying the 'update-sort-bed-slurm' script to resort in parallel on a
computational cluster managed with a SLURM job scheduler.
Note that one of these three options must be chosen, to perform the
stated action, and only one option can be selected.
When using "--resort-immediately" or "--resort-in-parallel-via-slurm", the
resorted files will have the name of the original BED or Starch file. The
original files will have their old name, with the ".backup" extension.
Use the "--bedops-root-dir" option to specify the directory containing the
BEDOPS toolkit binaries to be used for migration. You can provide more specific
paths to individual binaries, using the following options:
Add the "--bedextract-path", "--sort-bed-path", "--unstarch-path",
"--starch-path", "--starchcat-path", "--update-sort-bed-slurm", and/or
"--update-sort-bed-starch-slurm" options to specify custom paths to versions
of these tools, if desired. These values will be passed along to downstream
helper scripts that use them.
---
Suggestions:
1. Add the "--dry-run" option to "--resort-immediately" or the
"--resort-in-parallel-via-slurm" options to see the behavior before any
filesystem actions are performed. Remove "--dry-run" to perform the
specified work.
2. If you use the resort-via-SLURM option, consider using "--slurm-memory",
"--slurm-partition", "--slurm-workdir", "--slurm-output", and
"--slurm-error" options to match the setup of your particular cluster
environment and inputs.
3. Add "--debug" option to log debug statements to get more detail about
internal operation of update process.
"""
def main():
parser = argparse.ArgumentParser(prog=name, usage=usage, add_help=False)
parser.add_argument('--help', '-h', action='store_true', dest='help')
parser.add_argument('--write-list', '-l', action="store_true", dest='write_list')
parser.add_argument('--resort-immediately', '-i', action="store_true", dest='resort_immediately')
parser.add_argument('--resort-in-parallel-via-slurm', '-s', action="store_true", dest='resort_in_parallel_via_slurm')
parser.add_argument('--slurm-memory', '-m', type=str, action="store", dest='slurm_memory')
parser.add_argument('--slurm-partition', '-p', type=str, action="store", dest='slurm_partition')
parser.add_argument('--slurm-workdir', '-w', type=str, action="store", dest='slurm_workdir')
parser.add_argument('--slurm-output', '-r', type=str, action="store", dest='slurm_output')
parser.add_argument('--slurm-error', '-e', type=str, action="store", dest='slurm_error')
parser.add_argument('--bedops-root-dir', '-b', type=str, action="store", dest='bedops_root_dir')
parser.add_argument('--bedextract-path', '-x', type=str, action="store", dest='bedextract_path')
parser.add_argument('--sort-bed-path', '-o', type=str, action="store", dest='sort_bed_path')
parser.add_argument('--unstarch-path', '-u', type=str, action="store", dest='unstarch_path')
parser.add_argument('--starch-path', '-t', type=str, action="store", dest='starch_path')
parser.add_argument('--starchcat-path', '-z', type=str, action="store", dest='starchcat_path')
parser.add_argument('--update-sort-bed-slurm-path', '-y', type=str, action="store", dest='update_sort_bed_slurm_path')
parser.add_argument('--update-sort-bed-starch-slurm-path', '-q', type=str, action="store", dest='update_sort_bed_starch_slurm_path')
parser.add_argument('--dry-run', '-n', action="store_true", dest='dry_run')
parser.add_argument('--debug', '-d', action="store_true", dest='debug')
parser.add_argument('--parent-dir', '-a', type=str, action="store", dest='parent_dir')
parser.add_argument('--non-recursive-search', '-v', action="store_true", dest='non_recursive_search')
args = parser.parse_args()
action_counter = 0
if args.write_list:
action_counter += 1
if args.resort_immediately:
action_counter += 1
if args.resort_in_parallel_via_slurm:
action_counter += 1
if args.help or not args.parent_dir or action_counter != 1:
sys.stdout.write(name + '\n')
sys.stdout.write(citation + '\n')
sys.stdout.write(version + '\n')
sys.stdout.write(authors + '\n\n')
sys.stdout.write(usage + '\n')
sys.stdout.write(help)
if args.help:
sys.exit(os.EX_OK)
else:
sys.exit(errno.EINVAL)
logger = None
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
bedextract_path = None
if logger: logger.info('Locating \"bedextract\" binary')
if not args.bedextract_path and not args.bedops_root_dir:
bedextract_path = find_binary('bedextract')
if not bedextract_path:
if logger: logger.info('Could not locate \"bedextract\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS bedextract\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.bedextract_path:
bedextract_path = os.path.join(args.bedops_root_dir, 'bedextract')
elif args.bedextract_path:
bedextract_path = args.bedextract_path
elif not cmd_exists('bedextract'):
if logger: logger.info('Could not locate \"bedextract\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS bedextract\n")
sys.exit(errno.EEXIST)
if logger: logger.info('Location of \"bedextract\" is set to [%s]' % (bedextract_path))
sort_bed_path = None
if logger: logger.info('Locating \"sort-bed\" binary')
if not args.sort_bed_path and not args.bedops_root_dir:
sort_bed_path = find_binary('sort-bed')
if not sort_bed_path:
if logger: logger.info('Could not locate \"sort-bed\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS sort-bed\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.sort_bed_path:
sort_bed_path = os.path.join(args.bedops_root_dir, 'sort-bed')
elif args.sort_bed_path:
sort_bed_path = args.sort_bed_path
elif not cmd_exists('sort-bed'):
if logger: logger.info('Could not locate \"sort-bed\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS sort-bed\n")
sys.exit(errno.EEXIST)
if logger: logger.info('Location of \"sort-bed\" is set to [%s]' % (sort_bed_path))
unstarch_path = None
if logger: logger.info('Locating \"unstarch\" binary')
if not args.unstarch_path and not args.bedops_root_dir:
unstarch_path = find_binary('unstarch')
if not unstarch_path:
if logger: logger.info('Could not locate \"unstarch\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS unstarch\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.unstarch_path:
unstarch_path = os.path.join(args.bedops_root_dir, 'unstarch')
elif args.unstarch_path:
unstarch_path = args.unstarch_path
elif not cmd_exists('unstarch'):
if logger: logger.info('Could not locate \"unstarch\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS unstarch\n")
sys.exit(errno.EEXIST)
if logger: logger.info('Location of \"unstarch\" is set to [%s]' % (unstarch_path))
starch_path = None
if logger: logger.info('Locating \"starch\" binary')
if not args.starch_path and not args.bedops_root_dir:
starch_path = find_binary('starch')
if not starch_path:
if logger: logger.info('Could not locate \"starch\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS starch\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.starch_path:
starch_path = os.path.join(args.bedops_root_dir, 'starch')
elif args.starch_path:
starch_path = args.starch_path
elif not cmd_exists('starch'):
if logger: logger.info('Could not locate \"starch\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS starch\n")
sys.exit(errno.EEXIST)
if logger: logger.info('Location of \"starch\" is set to [%s]' % (starch_path))
starchcat_path = None
if logger: logger.info('Locating \"starchcat\" binary')
if not args.starchcat_path and not args.bedops_root_dir:
starchcat_path = find_binary('starchcat')
if not starchcat_path:
if logger: logger.info('Could not locate \"starchcat\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS starchcat\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.starchcat_path:
starchcat_path = os.path.join(args.bedops_root_dir, 'starchcat')
elif args.starchcat_path:
starchcat_path = args.starchcat_path
elif not cmd_exists('starchcat'):
if logger: logger.info('Could not locate \"starchcat\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS starchcat\n")
sys.exit(errno.EEXIST)
if logger: logger.info('Location of \"starchcat\" is set to [%s]' % (starchcat_path))
update_sort_bed_slurm_path = None
if logger: logger.info('Locating \"update-sort-bed-slurm\" script')
if not args.update_sort_bed_slurm_path and not args.bedops_root_dir:
update_sort_bed_slurm_path = find_binary('update-sort-bed-slurm')
if not update_sort_bed_slurm_path:
if logger: logger.info('Could not locate \"update-sort-bed-slurm\" binary')
sys.stderr.write("ERROR: This script must be run on a system with BEDOPS update-sort-bed-slurm\n")
sys.exit(errno.EEXIST)
elif args.bedops_root_dir and not args.update_sort_bed_slurm_path:
update_sort_bed_slurm_path = os.path.join(args.bedops_root_dir, 'update-sort-bed-slurm')
elif args.update_sort_bed_slurm_path:
update_sort_bed_slurm_path = args.update_sort_bed_slurm_path
elif not cmd_exists('update-sort-bed-slurm'):
if logger: logger.info('Could not locate \"update-sort-bed-slurm\" binary')
sys.stderr.write("ERROR: This script | |
<reponame>Cloudxtreme/tmux-cssh-gui
#!/usr/bin/env python
import gtk
import gtk.glade
import subprocess
import shlex
from service import environment
from model.setting import Setting
from service.settings import Settings
from service.config import Config
from gui.entrydlg import EntryDlg
class TMUXCSSHGUI:
# Constant: Name of main window
__NAME_MAIN_WINDOW_GLADE_XML = 'main.glade'
__NAME_MAIN_WINDOW = 'windowMain'
__NAME_BEDITSETTING = 'bEditSetting'
__NAME_BRUNSETTING = 'bRunSetting'
__NAME_BREMOVESETTING = 'bRemoveSetting'
__NAME_NONE_SELECTED_LABEL = 'lChooseInfo'
__NAME_SETTINGS_FORM = 'vboxSettingsForm'
__NAME_VBOX_SC = 'vboxSC'
__NAME_VBOX_FILENAME = 'vboxFilename'
__NAME_VBOX_CONFIGSETTINGS = 'vboxConfigSettings'
# Constant: Name of treeview list holding the settings
__NAME_TREEVIEW_SETTINGS_LIST = 'treeviewSettings'
__windowStateMaximized=False
def __init__(self):
"""Constructor"""
# Initialize configs
self.__config=Config()
# Read glade file
self.__gladeXML=gtk.glade.XML(environment.NAME_GLADE_XML_PATH+self.__NAME_MAIN_WINDOW_GLADE_XML)
# Connect signals to self
self.__gladeXML.signal_autoconnect(self)
# Get treeview list for settings
self.__treeviewSettings=self.__gladeXML.get_widget(self.__NAME_TREEVIEW_SETTINGS_LIST)
# Create render for displaying the text
renderer=gtk.CellRendererText()
# Create column
column=gtk.TreeViewColumn('Settings name/key', renderer)
column.set_attributes(renderer, text=0)
# Add column to list
self.__treeviewSettings.append_column(column)
# Create list
self.__liststoreSettings=gtk.ListStore(str)
# Add list to treeview list
self.__treeviewSettings.set_model(self.__liststoreSettings)
# Show main window
self.__windowMain=self.__gladeXML.get_widget(self.__NAME_MAIN_WINDOW)
# Size
windowSizeX=self.__config.get(Config.SECTION_WINDOW, Config.OPTION_WINDOW_SIZE_X)
windowSizeY=self.__config.get(Config.SECTION_WINDOW, Config.OPTION_WINDOW_SIZE_Y)
if windowSizeX is not None and windowSizeY is not None:
self.__windowMain.resize(int(windowSizeX), (int(windowSizeY)))
# End if
# Position
windowPositionX=self.__config.get(Config.SECTION_WINDOW, Config.OPTION_WINDOW_POSITION_X)
windowPositionY=self.__config.get(Config.SECTION_WINDOW, Config.OPTION_WINDOW_POSITION_Y)
if windowPositionX is not None and windowPositionY is not None:
self.__windowMain.move(int(windowPositionX), int(windowPositionY))
# End if
# Check, if window should be started minimized
startMinimized=self.__config.getBool(Config.SECTION_WINDOW, Config.OPTION_WINDOW_START_MINIMIZED)
if startMinimized is None or not startMinimized:
# Set default value
self.__config.set(Config.SECTION_WINDOW, Config.OPTION_WINDOW_START_MINIMIZED, False)
# Show window
self.__windowMain.show()
# End if
# Get several gui components
# Menu items
self.__miEdit=self.__gladeXML.get_widget('menuItemEdit')
self.__miRemove=self.__gladeXML.get_widget('menuItemRemove')
# Settings-list button area
self.__bEditSetting=self.__gladeXML.get_widget(self.__NAME_BEDITSETTING)
self.__bRemoveSetting=self.__gladeXML.get_widget(self.__NAME_BREMOVESETTING)
self.__bRunSetting=self.__gladeXML.get_widget(self.__NAME_BRUNSETTING)
# Form area
self.__noneSelectedLabel=self.__gladeXML.get_widget(self.__NAME_NONE_SELECTED_LABEL)
self.__settingsForm=self.__gladeXML.get_widget(self.__NAME_SETTINGS_FORM)
# Boxes for server connection string -sc
self.__vboxSC=self.__gladeXML.get_widget(self.__NAME_VBOX_SC)
self.__vboxFilename=self.__gladeXML.get_widget(self.__NAME_VBOX_FILENAME)
self.__vboxConfigSettings=self.__gladeXML.get_widget(self.__NAME_VBOX_CONFIGSETTINGS)
# Create and setup systray-icon/.menu
self.__systrayIcon=gtk.StatusIcon()
self.__systrayIcon.set_from_stock(gtk.STOCK_ABOUT)
self.__systrayIcon.connect('activate', self.on_systrayIcon_activate)
self.__systrayIcon.connect('popup-menu', self.on_systrayIcon_popup_menu)
self.__systrayIcon.set_title(environment.APP_NAME)
self.__systrayIcon.set_tooltip(environment.APP_NAME)
# Load settings
self.__loadSettings()
# End def
def __loadSettings(self):
# Initialize settings-service-object
self.__serviceSettings=Settings()
# Update settings list in gui
self.__updateGUISettingsList()
# End def
def __updateGUISettingsList(self):
# Reset internal selection
self.__currentSelectedSetting=None
# Clear list
self.__liststoreSettings.clear()
# Insert items
for settingEntry in self.__serviceSettings.yieldSettings():
self.__liststoreSettings.append([settingEntry.getKey()])
# End for
# End def
def __updateGUI(self):
# Update list
self.__updateGUISettingsList()
# Form area
self.__switchFormVisibility()
# End def
def on_destroy(self, object):
# Call quit
self.__quit()
# End def
def __quit(self):
# Actually quit
gtk.main_quit()
# End def
def __runSetting(self, position):
cmnd=self.__config.get(Config.SECTION_TERMINAL, Config.OPTION_TERMINAL_COMMAND)
params=self.__config.get(Config.SECTION_TERMINAL, Config.OPTION_TERMINAL_PARAMETER)
if cmnd is None and params is None:
cmnd=environment.TERMINAL_EMULATOR_CMND
self.__config.set(Config.SECTION_TERMINAL, Config.OPTION_TERMINAL_COMMAND, cmnd)
params=environment.TERMINAL_EMULATOR_PARAMETER
self.__config.set(Config.SECTION_TERMINAL, Config.OPTION_TERMINAL_PARAMETER, params)
dlg=gtk.MessageDialog(self.__windowMain, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
u'Terminal-Emulator-Command not set yet. Edit the config-file ('+environment.APP_CONFIG_FILE+'). Setting default.')
dlg.run()
dlg.destroy()
# End if
# Command and parameters available
if cmnd and params:
setting=self.__serviceSettings[position];
subprocess.Popen(cmnd+' '+params+' "'+environment.TMUX_CSSH_COMMAND+' '+setting.getValue()+'" ', shell=True)
# End if
# End def
def on_treeviewSettings_cursor_changed(self, treeviewObject):
self.__settingSelected=True
# Get selected rows from treeview object
selectedRows=treeviewObject.get_selection().get_selected_rows()[1]
# Clear current settings selection
self.__currentSelectedSetting=None
if len(selectedRows)>0:
# Get selected row number for settings-object
selectedRow=selectedRows[0][0]
# Fill form
if selectedRow>=0 and selectedRow<len(self.__serviceSettings):
self.__currentSelectedSetting=selectedRow
self.__showFormFromSettingsObject(self.__serviceSettings[selectedRow])
else:
self.__showFormFromSettingsObject(None)
# End if
# End if
self.__settingSelected=False
# End def
def on_treeviewSettings_row_activated(self, *params):
self.__runSetting(self.__currentSelectedSetting)
# End def
def __switchFormVisibility(self):
# Setting activated in list
if self.__currentSelectedSetting is None:
# Menu items
self.__miEdit.set_sensitive(False)
self.__miRemove.set_sensitive(False)
# Button area in Settings-List
self.__bEditSetting.set_sensitive(False)
self.__bRemoveSetting.set_sensitive(False)
self.__bRunSetting.set_sensitive(False)
# Form area
self.__noneSelectedLabel.show()
self.__settingsForm.hide()
# No setting activated
else:
# Menu items
self.__miEdit.set_sensitive(True)
self.__miRemove.set_sensitive(True)
# Button area in Settings-List
self.__bEditSetting.set_sensitive(True)
self.__bRemoveSetting.set_sensitive(True)
self.__bRunSetting.set_sensitive(True)
# Form area
self.__noneSelectedLabel.hide()
self.__settingsForm.show()
# End if
# End def
def __showFormFromSettingsObject(self, settingObject):
# Switch form visibility
self.__switchFormVisibility()
# Get array with parsed settings from setting object
settingsFromObject=self.__serviceSettings.getParsedParametersFromSettingObject(settingObject=settingObject)
# Add user to form
fUser=self.__gladeXML.get_widget('fUser')
fUser.set_text(settingsFromObject.user[0].strip() if settingsFromObject.user is not None else '')
# Add certificate/identity to form
fIdentity=self.__gladeXML.get_widget('fCertificateIdentity')
if fIdentity.get_filename() is not None:
fIdentity.unselect_filename(fIdentity.get_filename())
# End if
if settingsFromObject.certificateIdentity is not None:
fIdentity.set_filename(settingsFromObject.certificateIdentity[0])
# End if
# Add server connection strings to form
# Remove old
for vboxSCChildren in self.__vboxSC.get_children():
self.__vboxSC.remove(vboxSCChildren)
# End for
# Add new
if settingsFromObject.sshConnectString is not None:
for serverConnectionString in settingsFromObject.sshConnectString:
self.__addSCItem(serverConnectionString[0])
# End for
# End if
# Add additional ssh args to form
fAdditionalSSHArguments=self.__gladeXML.get_widget('fAdditionalSSHArguments')
fAdditionalSSHArguments.set_text(settingsFromObject.additionalSSHArgs[0].strip() if settingsFromObject.additionalSSHArgs is not None else '')
# Add TMUX Session name to form
fTMUXSessionName=self.__gladeXML.get_widget('fTMUXSessionName')
fTMUXSessionName.set_text(settingsFromObject.tmuxSessionName[0].strip() if settingsFromObject.tmuxSessionName is not None else '')
# Add Epoch time to form
cbSetEpochTime=self.__gladeXML.get_widget('cbSetEpochTime')
cbSetEpochTime.set_active(settingsFromObject.setEpochTime)
# Add new session to form
cbNewSession=self.__gladeXML.get_widget('cbNewSession')
cbNewSession.set_active(settingsFromObject.newSession)
# Add quiet mode to form
cbQuietMode=self.__gladeXML.get_widget('cbQuietMode')
cbQuietMode.set_active(settingsFromObject.quietMode)
# Add dont-synchronization mode to form
cbDontSynchronizeMode=self.__gladeXML.get_widget('cbDontSynchronizeMode')
cbDontSynchronizeMode.set_active(not settingsFromObject.synchronizePanes)
# Add filenames to form
# Remove old
for vboxFilenameChildren in self.__vboxFilename.get_children():
self.__vboxFilename.remove(vboxFilenameChildren)
# End for
# Add new
if settingsFromObject.filename is not None:
for filename in settingsFromObject.filename:
self.__addFilenameItem(filename[0])
# End for
# End if
# Add config settings to form
# Remove old
for vboxConfigSettingsChildren in self.__vboxConfigSettings.get_children():
self.__vboxConfigSettings.remove(vboxConfigSettingsChildren)
# End for
# Add new
if settingsFromObject.configSetting is not None:
for configSetting in settingsFromObject.configSetting:
self.__addConfigSettingItem(configSetting[0])
# End for
# End if
# End def
def __saveFormData(self):
if not self.__settingSelected and self.__currentSelectedSetting is not None:
# Get current setting
setting=self.__serviceSettings[self.__currentSelectedSetting]
# Gather settings from form
valueArray=[]
# Set epoch time
f=self.__gladeXML.get_widget('cbSetEpochTime')
if f.get_active(): valueArray.append('-set')
# New session
f=self.__gladeXML.get_widget('cbNewSession')
if f.get_active(): valueArray.append('-ns')
# Quiet mode
f=self.__gladeXML.get_widget('cbQuietMode')
if f.get_active(): valueArray.append('-q')
# Quiet mode
f=self.__gladeXML.get_widget('cbDontSynchronizeMode')
if f.get_active(): valueArray.append('-ds')
# TMUX Session name
f=self.__gladeXML.get_widget('fTMUXSessionName')
if f.get_text().strip(): valueArray.append('-ts "'+f.get_text().strip()+'"')
# SSH user name
f=self.__gladeXML.get_widget('fUser')
if f.get_text().strip(): valueArray.append('-u "'+f.get_text().strip()+'"')
# Certificate / Identity
f=self.__gladeXML.get_widget('fCertificateIdentity')
if f.get_filename() is not None: valueArray.append('-c "'+f.get_filename()+'"')
# Additional SSH arguments
f=self.__gladeXML.get_widget('fAdditionalSSHArguments')
if f.get_text().strip(): valueArray.append('-sa "'+f.get_text().strip()+'"')
# Server connection strings, -sc
self.__addValuesFromVBox(valueArray, self.__vboxSC, '-sc')
# Filenames with server connection strings, -f
self.__addValuesFromVBox(valueArray, self.__vboxFilename, '-f')
# Config settings -cs
self.__addValuesFromVBox(valueArray, self.__vboxConfigSettings, '-cs')
# Store to settings
setting.setValue(' '.join(valueArray))
self.__serviceSettings[self.__currentSelectedSetting]=setting
# Save settings
# already saved with [...]-__setitem__
#self.__serviceSettings.save()
# End if
# End def
def __addValuesFromVBox(self, valueArray, vbox, parameterShortcut):
# Walk through all hbox children
for childHBOX in vbox.get_children():
# Get HBOX children
childHBOXChildren=childHBOX.get_children()
if len(childHBOXChildren)>0:
# Get second child of hbox, for it's the actually value
f=childHBOXChildren[1]
# Entry field
if type(f)==gtk.Entry and f.get_text().strip(): valueArray.append(parameterShortcut+' "'+f.get_text().strip()+'"')
# FileChooseButton
if type(f)==gtk.FileChooserButton and f.get_filename() is not None: valueArray.append(parameterShortcut+' "'+f.get_filename()+'"')
# Combobox field
if type(f)==gtk.ComboBoxEntry and f.get_active_text().strip(): valueArray.append(parameterShortcut+' "'+f.get_active_text().strip()+'"')
# End if
# End for
# End def
def on_bAddSetting_clicked(self, view):
settingName=EntryDlg(labeltext="Insert the name of the new setting:").Run()
# Not empty, add config setting
if settingName and settingName is not None:
# Create and add setting
setting=Setting()
setting.setKey(settingName)
setting.setValue('')
self.__serviceSettings.add(setting)
# Refresh list
self.__updateGUI()
# Set new item as selected
self.__treeviewSettings.set_cursor(len(self.__liststoreSettings)-1)
# End if
# End def
def on_bEditSetting_clicked(self, view):
# Get setting
setting=self.__serviceSettings[self.__currentSelectedSetting];
dlg=EntryDlg(labeltext="Insert the new name of the setting:", defaultvalue=setting.getKey(), parentWindow=self.__windowMain)
settingName=dlg.Run()
del dlg
# Not empty, add config setting
if settingName and settingName is not None:
# Update setting
setting.setKey(settingName)
self.__serviceSettings[self.__currentSelectedSetting]=setting
# Refresh list
self.__updateGUI()
# End if
# End def
def on_bRemoveSetting_clicked(self, view):
# Get setting
setting=self.__serviceSettings[self.__currentSelectedSetting];
# Create and display confirm dialog
dlg=gtk.MessageDialog(self.__windowMain, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, u'Do you want to remove the config setting \''+setting.getKey()+'\' ?')
response=dlg.run()
dlg.destroy()
# Ok, delete setting
if response==gtk.RESPONSE_OK:
# Delete
del self.__serviceSettings[self.__currentSelectedSetting]
# Refresh list
self.__updateGUI()
# End if
# End def
def on_bRunSetting_clicked(self, view):
self.__runSetting(self.__currentSelectedSetting)
# End def
def __addSCItem(self, value=""):
# Create new Hbox
hbox=gtk.HBox(False, 5)
# Add label to hbox
lLabel=gtk.Label('-sc')
lLabel.set_property('width-request', 50)
lLabel.set_property('xalign', 0)
lLabel.show()
hbox.pack_start(lLabel, False)
# Add entry field to hbox
fEntry=gtk.Entry()
fEntry.set_text(value)
fEntry.connect('focus-out-event', self.on_formitem_changed)
fEntry.show()
hbox.pack_start(fEntry)
# Add remove button to hbox
bRemove=gtk.Button('X')
bRemove.connect('clicked', self.on_remove_item, self.__vboxSC, hbox)
bRemove.show();
hbox.pack_start(bRemove, False)
# Show hbox
hbox.show()
self.__vboxSC.add(hbox)
# Focus entry field
if not value: fEntry.grab_focus()
# End def
def on_bAddSC_clicked(self, view):
self.__addSCItem()
# End def
def on_remove_item(self, view, *data):
# Remove hbox from vbox
if len(data)>0:
vbox=data[0]
hbox=data[1]
vbox.remove(hbox)
self.__saveFormData()
# End if
# End def
def on_formitem_changed(self, *params):
if not self.__settingSelected:
self.__saveFormData()
# End if
# End def
def __addFilenameItem(self, value=''):
# Create new hbox
hbox=gtk.HBox(False, 5)
# Add label to hbox
lLabel=gtk.Label('-f')
lLabel.set_property('width-request', 50)
lLabel.set_property('xalign', 0)
lLabel.show()
hbox.pack_start(lLabel, False)
# Add entry field to hbox
fFilename=gtk.FileChooserButton('File for -sc items')
fFilename.set_filename(value)
fFilename.connect('file-set', self.on_formitem_changed)
fFilename.show()
hbox.pack_start(fFilename)
# Add remove button to hbox
bRemove=gtk.Button('X')
bRemove.connect('clicked', self.on_remove_item, self.__vboxFilename, hbox)
bRemove.show();
hbox.pack_start(bRemove, False)
# Show hbox
hbox.show()
self.__vboxFilename.add(hbox)
# Focus file field
if not value: fFilename.grab_focus()
# End def
def on_bAddFilename_clicked(self, view):
self.__addFilenameItem()
# End | |
0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in xrange(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in xrange(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in xrange(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(unichr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(unichr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in xrange(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does | |
in self.cif.keys():
cifpat = self.cif[pattern]
if (
x_ordinate not in cifpat
or y_ordinate not in cifpat
and y_ordinate != "Pattern number"
or z_ordinate not in cifpat
):
continue
# we now know that both x and z are in the pattern
x = cifpat[x_ordinate]
z = cifpat[z_ordinate]
y = i if y_ordinate == "Pattern number" else cifpat[y_ordinate]
z_norm = cifpat[z_norm_ordinate]
z_norm = z_norm * cifpat["_pd_proc_ls_weight"] if "_pd_proc_ls_weight" in cifpat else z_norm / cifpat[z_ordinate + "_err"] ** 2
# interpolation only works if the x-ordinate is increasing.
# if it doesn't, I need to flip all the ordinates to maintain
# the relative ordering.
if x[0] > x[-1]:
x = np.flip(x)
y = np.flip(y)
z = np.flip(z)
z_norm = np.flip(z_norm)
# keep track of min, max, and average step size so
# I can do a naive linear interpolation to grid the data
min_x = min(min_x, min(x))
max_x = max(max_x, max(x))
x_step += np.average(np.diff(x))
xs.append(x)
ys.append(y)
zs.append(z)
znorms.append(z_norm)
plot_list.append(pattern)
i += 1
x_step /= i
# create the x interpolation array and interpolate each diffraction pattern
xi = np.arange(min_x, max_x, math.fabs(x_step))
for j in range(len(xs)):
zs[j] = np.interp(xi, xs[j], zs[j], left=float("nan"), right=float("nan"))
znorms[j] = np.interp(xi, xs[j], znorms[j], left=float("nan"), right=float("nan"))
# https://stackoverflow.com/a/33943276/36061
# https://stackoverflow.com/a/38025451/36061
xx, yy = np.meshgrid(xi, ys)
zz = np.array(zs)
zn = np.array(znorms)
return xx, yy, zz, zn, plot_list
def get_all_qpa(self) -> dict:
all_phases = set()
# get all phase names
for pattern in self.cif:
cifpat = self.cif[pattern]
if "str" not in cifpat:
continue
for phase in cifpat["str"]:
all_phases.add(cifpat["str"][phase]["_pd_phase_name"])
d = {phase: [] for phase in all_phases}
# get all phase wt%
for i, pattern in enumerate(self.cif, start=1):
cifpat = self.cif[pattern]
if "str" not in cifpat:
continue
for phase in cifpat["str"]:
try:
qpa = cifpat["str"][phase]["_pd_phase_mass_%"]
except KeyError:
qpa = None
phase_name = cifpat["str"][phase]["_pd_phase_name"]
d[phase_name].append(qpa)
for phase, qpa_list in d.items():
if len(qpa_list) != i:
d[phase].append(np.nan)
return d
def single_update_plot(self, pattern: str, x_ordinate: str, y_ordinates: List[str],
plot_hkls: dict, plot_diff: bool, plot_cchi2: bool, plot_norm_int: bool,
axis_scale: dict,
fig: Figure, ax: Axes) -> Tuple[Figure, Axes, Tuple, Tuple]:
# todo: look at https://stackoverflow.com/a/63152341/36061 for idea on zooming
single_height_px = fig.get_size_inches()[1] * self.dpi if fig is not None else 382 # this is needed for the hkl position calculations
# if single_fig is None or single_ax is None:
zoomed_x_lim = None
zoomed_y_lim = None
if fig:
zoomed_x_lim = ax.get_xlim()
zoomed_y_lim = ax.get_ylim()
fig.clear()
fig = Figure(figsize=(6, 3), dpi=self.dpi)
ax = fig.add_subplot()
# fig.set_tight_layout(True) # https://github.com/matplotlib/matplotlib/issues/21970 https://github.com/matplotlib/matplotlib/issues/11059
ax.margins(x=0)
cifpat = self.cif[pattern]
x = _scale_x_ordinate(cifpat[x_ordinate], axis_scale)
ys = []
if plot_norm_int and y_ordinates[0] != "None":
y = cifpat[y_ordinates[0]]
if "_pd_proc_ls_weight" in cifpat:
y_norm = y * np.maximum(cifpat["_pd_proc_ls_weight"], 1e-6)
else:
y_norm = y / cifpat[y_ordinates[0] + "_err"] ** 2
else:
y_norm = np.ones(len(x))
for y in y_ordinates:
if y != "None":
ys.append(_scale_y_ordinate(cifpat[y] * y_norm, axis_scale))
else:
ys.append(None)
# need to calculate diff after the y axis transforms to get the right magnitudes
if plot_diff:
ydiff = ys[0] - ys[1]
ys.append(ydiff)
else:
ys.append(None)
y_ordinates.append("Diff")
min_plot = 999999999
max_plot = -min_plot
cchi2_zero = 0
for y, y_name, y_type in zip(ys, y_ordinates, self.single_y_style.keys()):
debug(f"{y=} {y_name=} {y_type=}")
if y is not None:
if y_name == "Diff":
offset = min_plot - np.nanmax(y)
y += offset
# this is to plot the 'zero' line for the diff plot
ax.plot(x, [offset] * len(x), color="black", marker=None, linestyle=(0, (5, 10)), linewidth=1) # "loosely dashed"
label = f" {y_name}" if not plot_norm_int else f" {y_name} (norm.)"
ax.plot(x, y, label=label,
color=self.single_y_style[y_type]["color"], marker=self.single_y_style[y_type]["marker"],
linestyle=self.single_y_style[y_type]["linestyle"], linewidth=self.single_y_style[y_type]["linewidth"],
markersize=float(self.single_y_style[y_type]["linewidth"]) * 3
)
# keep track of min and max to plot hkl ticks and diff correctly
min_plot = min(min_plot, np.nanmin(y))
max_plot = max(max_plot, np.nanmax(y))
if y_name != "Diff":
cchi2_zero = min_plot
if plot_hkls["above"] or plot_hkls["below"]:
single_hovertexts, single_hkl_artists = self.plot_hkls(plot_hkls["below"], cifpat,
x_ordinate, x, ys, axis_scale,
min_plot, max_plot, 0, True, self.dpi,
single_height_px, ax)
add_hovertext_to_each_point(single_hkl_artists, single_hovertexts)
if plot_cchi2:
flip_cchi2 = x_ordinate in {"d", "_pd_proc_d_spacing"}
ax2 = self.single_plot_cchi2(cifpat, x, [y_ordinates[0], y_ordinates[1]], axis_scale, cchi2_zero, flip_cchi2, ax)
if not plot_cchi2:
ax.legend(frameon=False, loc='upper right') # loc='best')
if plot_norm_int:
y_axis_title = "Normalised counts"
elif "intensity" in y_ordinates[0]:
y_axis_title = "Intensity (arb. units)"
else:
y_axis_title = "Counts"
wavelength = parse_cif.get_from_cif(cifpat, "wavelength")
x_axis_title, y_axis_title = _scale_xy_title(_x_axis_title(x_ordinate, wavelength), y_axis_title, axis_scale)
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
fig.suptitle(pattern, x=fig.subplotpars.left, horizontalalignment="left")
fig.subplots_adjust(top=0.9)
subtitle = make_subtitle_string(cifpat, yobs=y_ordinates[0], ycalc=y_ordinates[1])
if subtitle:
ax.set_title(subtitle, loc="left")
if x_ordinate in {"d", "_pd_proc_d_spacing"}:
ax.invert_xaxis()
# https://stackoverflow.com/a/30506077/36061
if plot_cchi2:
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
# now I need to set up all the checks to see if I want to push new views onto the home stack
# and how to reset the zoom.
reset_zoomed_to_plt_x_min = False
reset_zoomed_to_plt_x_max = False
reset_zoomed_to_plt_y_min = False
reset_zoomed_to_plt_y_max = False
data_x_lim = ax.get_xlim()
data_y_lim = ax.get_ylim()
zoomed_x_lim = ax.get_xlim() if not zoomed_x_lim else zoomed_x_lim
zoomed_y_lim = ax.get_ylim() if not zoomed_y_lim else zoomed_y_lim
# here go the rules on changing zoom to match the current data
if (
self.previous_single_plot_state["pattern"] != pattern
# if the zoom view of the data is the entire data, then the view of the new data is the
# entire view of the new data, irrespective of the limits of the previous zoom
and isclose_listlike(self.previous_single_plot_state["data_x_lim"], zoomed_x_lim)
and isclose_listlike(self.previous_single_plot_state["data_y_lim"], zoomed_y_lim)
):
reset_zoomed_to_plt_x_min = True
reset_zoomed_to_plt_x_max = True
reset_zoomed_to_plt_y_min = True
reset_zoomed_to_plt_y_max = True
if self.previous_single_plot_state["x_ordinate"] != x_ordinate:
reset_zoomed_to_plt_x_min = True
reset_zoomed_to_plt_x_max = True
if self.previous_single_plot_state["y_ordinates"] != y_ordinates:
reset_zoomed_to_plt_y_min = True
reset_zoomed_to_plt_y_max = True
if self.previous_single_plot_state["plot_diff"] != plot_diff:
reset_zoomed_to_plt_y_min = True
if self.previous_single_plot_state["plot_hkls"] != plot_hkls:
reset_zoomed_to_plt_y_min = True
if self.previous_single_plot_state["plot_norm_int"] != plot_norm_int:
# _, (ymin, ymax) = get_zoomed_data_min_max(ax, zoomed_x_lim, data_y_lim)
# yrange = (ymax - ymin)
# ymid = yrange / 2
# yrange = (yrange * 1.07) / 2
zoomed_y_lim = data_y_lim
if self.previous_single_plot_state["axis_scale"] not in [axis_scale, {}]:
ordinate, _ = get_first_different_kv_pair(self.previous_single_plot_state["axis_scale"], axis_scale)
if ordinate == "x":
zoomed_xmin = rescale_val(zoomed_x_lim[0], self.previous_single_plot_state["axis_scale"], axis_scale, ordinate)
zoomed_xmax = rescale_val(zoomed_x_lim[1], self.previous_single_plot_state["axis_scale"], axis_scale, ordinate)
zoomed_x_lim = (zoomed_xmin, zoomed_xmax)
elif ordinate == "y":
zoomed_ymin = rescale_val(zoomed_y_lim[0], self.previous_single_plot_state["axis_scale"], axis_scale, ordinate)
zoomed_ymax = rescale_val(zoomed_y_lim[1], self.previous_single_plot_state["axis_scale"], axis_scale, ordinate)
zoomed_y_lim = (zoomed_ymin, zoomed_ymax)
if reset_zoomed_to_plt_x_min:
zoomed_x_lim = (data_x_lim[0], zoomed_x_lim[1])
if reset_zoomed_to_plt_x_max:
zoomed_x_lim = (zoomed_x_lim[0], data_x_lim[1])
if reset_zoomed_to_plt_y_min:
zoomed_y_lim = (data_y_lim[0], zoomed_y_lim[1])
if reset_zoomed_to_plt_y_max:
zoomed_y_lim = (zoomed_y_lim[0], data_y_lim[1])
# Now it's time to set up the previous-state dictionary for the next time this function is called.
# Need to do it right at the end so nothing else changes
self.previous_single_plot_state["pattern"] = pattern
self.previous_single_plot_state["x_ordinate"] = x_ordinate
self.previous_single_plot_state["y_ordinates"] = y_ordinates
self.previous_single_plot_state["plot_hkls"] = plot_hkls
self.previous_single_plot_state["plot_diff"] = plot_diff
self.previous_single_plot_state["plot_cchi2"] = plot_cchi2
self.previous_single_plot_state["plot_norm_int"] = plot_norm_int
self.previous_single_plot_state["axis_scale"] = axis_scale
self.previous_single_plot_state["data_x_lim"] = ax.get_xlim()
self.previous_single_plot_state["data_y_lim"] = ax.get_ylim()
self.previous_single_plot_state["zoomed_x_lim"] = zoomed_x_lim
self.previous_single_plot_state["zoomed_y_lim"] = zoomed_y_lim
return fig, ax, zoomed_x_lim, zoomed_y_lim
def plot_hkls(self, plot_below: bool, cifpat: dict, x_ordinate: str, x, ys,
axis_scale: dict, y_min: float, y_max: float, hkl_y_offset: float,
single_plot: bool,
dpi: int, single_height_px: int, ax: Axes):
def interp(_hkl_x, _x, _y):
return np.interp(_hkl_x, _x, _y, left=float("nan"), right=float("nan"))
hovertexts = []
hkl_artists = []
y_range = y_max - y_min
hkl_markersize_pt = 6
hkl_markersize_px = hkl_markersize_pt * 72 / dpi
num_hkl_rows = len(cifpat["str"].keys())
hkl_tick_vertical_spacing = (((y_range / (single_height_px - hkl_markersize_px * num_hkl_rows)) * single_height_px) - y_range) / num_hkl_rows
hkl_x_ordinate = PlotCIF.hkl_x_ordinate_mapping[x_ordinate]
for i, phase in enumerate(cifpat["str"]):
if hkl_x_ordinate not in cifpat["str"][phase]:
continue
hkl_x = _scale_x_ordinate(cifpat["str"][phase][hkl_x_ordinate], axis_scale)
if plot_below:
if single_plot:
hkl_y = np.array([y_min - 4 * (i + 1) * hkl_tick_vertical_spacing] * len(hkl_x))
else:
hkl_y = np.array([min(ys[0])] * len(hkl_x))
markerstyle = 3
scalar = 1.0
else: # plot above
markerstyle = 7 # a pointing-down triangle with the down tip being the point described by th x,y coordinate
scalar = 1.04
yobs = ys[0] # ys is already scaled to the y-axis scale
ycalc = ys[1]
if yobs is None:
hkl_y = interp(hkl_x, x, ycalc)
elif ycalc is None:
hkl_y = interp(hkl_x, x, yobs)
else:
hkl_y = np.maximum(interp(hkl_x, x, ycalc), interp(hkl_x, x, yobs))
phase_wt_pct = f'– {cifpat["str"][phase]["_pd_phase_mass_%"]} wt%' if "_pd_phase_mass_%" in cifpat["str"][phase] else ""
hkl_y = hkl_y * scalar + hkl_y_offset
idx = i % len(TABLEAU_COLOR_VALUES)
phasename = cifpat["str"][phase]["_pd_phase_name"] if "_pd_phase_name" in cifpat["str"][phase] else phase
hkl_tick, | |
import unittest
from myunittest_settings import *
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from find_dups import *
class TestUM(unittest.TestCase):
def setUp(self):
pass
def test_getHashAlgorithms_MD5(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = True
self.assertDictEqual(getHashAlgorithms(1), hashAlgorithms)
def test_getHashAlgorithms_SHA1(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = True
hashAlgorithms['useMD5'] = False
self.assertDictEqual(getHashAlgorithms(2), hashAlgorithms)
def test_getHashAlgorithms_SHA224(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = True
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
self.assertDictEqual(getHashAlgorithms(4), hashAlgorithms)
def test_getHashAlgorithms_SHA256(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = True
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
self.assertDictEqual(getHashAlgorithms(8), hashAlgorithms)
def test_getHashAlgorithms_SHA384(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = True
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
self.assertDictEqual(getHashAlgorithms(16), hashAlgorithms)
def test_getHashAlgorithms_SHA512(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = True
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
self.assertDictEqual(getHashAlgorithms(32), hashAlgorithms)
def test_getHashAlgorithms_All(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = True
hashAlgorithms['useSHA384'] = True
hashAlgorithms['useSHA256'] = True
hashAlgorithms['useSHA224'] = True
hashAlgorithms['useSHA1'] = True
hashAlgorithms['useMD5'] = True
self.assertDictEqual(getHashAlgorithms(63), hashAlgorithms)
def test_getHashAlgorithms_InvalidHigh(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = True
self.assertDictEqual(getHashAlgorithms(100), hashAlgorithms)
def test_getHashAlgorithms_InvalidLow(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = True
self.assertDictEqual(getHashAlgorithms(-1), hashAlgorithms)
def test_getHashAlgorithms_InvalidType(self):
hashAlgorithms = {}
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = True
self.assertDictEqual(getHashAlgorithms('A'), hashAlgorithms)
def test_hashfile_InvalidFile(self):
hashAlgorithms = {}
hashAlgorithms['useMD5']=True
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\invalidpath\\invalidfile.txt', DEFAULT_BLOCKSIZE, hashAlgorithms),0)
def test_hashfile_InvalidBlocksize(self):
#Invalid blocksize should default to 65536
hashAlgorithms = {}
hashAlgorithms['useMD5']=True
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', -1, hashAlgorithms),'b7356a4b8764b54b3e3119dc2394bc7e')
def test_hashfile_MD5(self):
#Check MD5 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=True
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'b7356a4b8764b54b3e3119dc2394bc7e')
def test_hashfile_SHA1(self):
#Check SHA1 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=False
hashAlgorithms['useSHA1']=True
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'b38005fd56fa2de86f6458cb73d0d794912e94c0')
def test_hashfile_SHA224(self):
#Check SHA224 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=False
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=True
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'4f97a461d81e2aab7e1d7e0b208271317b07a6fe12d0fbbb1919fdc7')
def test_hashfile_SHA256(self):
#Check SHA256 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=False
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=True
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'2ec3d40c866e3e2829dbbaade913e97da18eae9a67ae786da7e430a5f1186716')
def test_hashfile_SHA384(self):
#Check SHA384 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=False
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=True
hashAlgorithms['useSHA512']=False
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'c8482ee90ab9cd3f50915d466c108cdef06b515954b53630aa2964a120adc883099314a4a6e47fb25daaf49ac1143070')
def test_hashfile_SHA512(self):
#Check SHA512 is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=False
hashAlgorithms['useSHA1']=False
hashAlgorithms['useSHA224']=False
hashAlgorithms['useSHA256']=False
hashAlgorithms['useSHA384']=False
hashAlgorithms['useSHA512']=True
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'8531c07a2237475934675ac39ef71e8d49dc1c2c48482eead108910112e233bdfa10f60da906a1759b265e6b0db9cd7eaa5e9ec70175c615cc31c3f22529fa05')
def test_hashfile_AllHashes(self):
#Check Full Concatenation of all hashing algorithms is calculated correctly
hashAlgorithms = {}
hashAlgorithms['useMD5']=True
hashAlgorithms['useSHA1']=True
hashAlgorithms['useSHA224']=True
hashAlgorithms['useSHA256']=True
hashAlgorithms['useSHA384']=True
hashAlgorithms['useSHA512']=True
self.assertEqual(hashfile(rootdir + '\\testfiles\\file1.log', DEFAULT_BLOCKSIZE, hashAlgorithms),'b7356a4b8764b54b3e3119dc2394bc7eb38005fd56fa2de86f6458cb73d0d794912e94c04f97a461d81e2aab7e1d7e0b208271317b07a6fe12d0fbbb1919fdc72ec3d40c866e3e2829dbbaade913e97da18eae9a67ae786da7e430a5f1186716c8482ee90ab9cd3f50915d466c108cdef06b515954b53630aa2964a120adc883099314a4a6e47fb25daaf49ac11430708531c07a2237475934675ac39ef71e8d49dc1c2c48482eead108910112e233bdfa10f60da906a1759b265e6b0db9cd7eaa5e9ec70175c615cc31c3f22529fa05')
def test_loadDefaultScanOptions(self):
testDict = {}
testDict['FilterMode'] = DEFAULT_FILTERMODE
testDict['FilterFile'] = DEFAULT_FILTERFILE
testDict['SubDirs'] = DEFAULT_SUBDIRS
testDict['MaxFileSize'] = DEFAULT_MAXFILESIZE
testDict['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
testDict['Blocksize'] = DEFAULT_BLOCKSIZE
testDict['CSVOutput'] = DEFAULT_CSV
testDict['HashAlgorithm'] = DEFAULT_HASHALGORITHM
self.assertDictEqual(loadDefaultScanOptions(), testDict)
def test_loadCommandLineScanOptionsValidArgs(self):
cmdArg = {}
scanOptions = {}
expectedScanOptions = {}
cmdArg['configFile'] = rootdir + '\\valid-config.txt'
cmdArg['filterMode'] = 'INCLUDE'
cmdArg['filterFile'] = rootdir + '\\include-filters.txt'
cmdArg['filters'] = '*.log'
cmdArg['subDirs'] = 'FALSE'
cmdArg['maxFileSize'] = 100000
cmdArg['includeEmptyFiles'] = 'TRUE'
cmdArg['blocksize'] = 131072
cmdArg['hashAlgorithm'] = 3
cmdArg['csvOutput'] = rootdir + '\\myresults.csv'
cmdArg['directories'] = 'c:'
scanOptions['FilterMode'] = DEFAULT_FILTERMODE
scanOptions['FilterFile'] = DEFAULT_FILTERFILE
scanOptions['SubDirs'] = DEFAULT_SUBDIRS
scanOptions['MaxFileSize'] = DEFAULT_MAXFILESIZE
scanOptions['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
scanOptions['Blocksize'] = DEFAULT_BLOCKSIZE
scanOptions['HashAlgorithm'] = DEFAULT_HASHALGORITHM
scanOptions['CSVOutput'] = DEFAULT_CSV
expectedScanOptions['FilterMode'] = 'INCLUDE'
expectedScanOptions['FilterFile'] = rootdir + '\\include-filters.txt'
expectedScanOptions['SubDirs'] = 'FALSE'
expectedScanOptions['MaxFileSize'] = 100000
expectedScanOptions['IncludeEmptyFiles'] = 'TRUE'
expectedScanOptions['Blocksize'] = 131072
expectedScanOptions['HashAlgorithm'] = 3
expectedScanOptions['CSVOutput'] = rootdir + '\\myresults.csv'
self.assertDictEqual(loadCommandLineScanOptions(cmdArg, scanOptions), expectedScanOptions)
def test_loadCommandLineScanOptionsInvalidArgs(self):
cmdArg = {}
scanOptions = {}
expectedScanOptions = {}
cmdArg['configFile'] = rootdir + '\\valid-config.txt'
cmdArg['filterMode'] = 'INVALID_MODE'
cmdArg['filterFile'] = rootdir + '\\invalid_filter_path.txt'
cmdArg['filters'] = '*.log'
cmdArg['subDirs'] = 'INVALID_OPTION'
cmdArg['maxFileSize'] = -100000
cmdArg['includeEmptyFiles'] = 'INVALID_OPTION'
cmdArg['blocksize'] = 0
cmdArg['hashAlgorithm'] = 3
cmdArg['csvOutput'] = rootdir + '\\myresults.csv'
cmdArg['directories'] = 'c:'
scanOptions['FilterMode'] = DEFAULT_FILTERMODE
scanOptions['FilterFile'] = DEFAULT_FILTERFILE
scanOptions['SubDirs'] = DEFAULT_SUBDIRS
scanOptions['MaxFileSize'] = DEFAULT_MAXFILESIZE
scanOptions['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
scanOptions['Blocksize'] = DEFAULT_BLOCKSIZE
scanOptions['HashAlgorithm'] = DEFAULT_HASHALGORITHM
scanOptions['CSVOutput'] = DEFAULT_CSV
expectedScanOptions['FilterMode'] = DEFAULT_FILTERMODE
expectedScanOptions['FilterFile'] = DEFAULT_FILTERFILE
expectedScanOptions['SubDirs'] = DEFAULT_SUBDIRS
expectedScanOptions['MaxFileSize'] = 100000
expectedScanOptions['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
expectedScanOptions['Blocksize'] = DEFAULT_BLOCKSIZE
expectedScanOptions['HashAlgorithm'] = 3
expectedScanOptions['CSVOutput'] = rootdir + '\\myresults.csv'
self.assertDictEqual(loadCommandLineScanOptions(cmdArg, scanOptions), expectedScanOptions)
def test_getConfigurationsValidArgs(self):
cmdArg = {}
expectedScanOptions = {}
cmdArg['configFile'] = rootdir + '\\valid-config.txt'
cmdArg['filterMode'] = 'INCLUDE'
cmdArg['filterFile'] = rootdir + '\\include-filters.txt'
cmdArg['filters'] = '*.log'
cmdArg['subDirs'] = 'FALSE'
cmdArg['maxFileSize'] = 100000
cmdArg['includeEmptyFiles'] = 'TRUE'
cmdArg['blocksize'] = 131072
cmdArg['hashAlgorithm'] = 3
cmdArg['csvOutput'] = rootdir + '\\myresults.csv'
cmdArg['directories'] = 'c:'
expectedScanOptions['FilterMode'] = 'INCLUDE'
expectedScanOptions['FilterFile'] = rootdir + '\\include-filters.txt'
expectedScanOptions['SubDirs'] = 'FALSE'
expectedScanOptions['MaxFileSize'] = 100000
expectedScanOptions['IncludeEmptyFiles'] = 'TRUE'
expectedScanOptions['Blocksize'] = 131072
expectedScanOptions['HashAlgorithm'] = 3
expectedScanOptions['CSVOutput'] = rootdir + '\\myresults.csv'
self.assertDictEqual(getConfigurations(cmdArg), expectedScanOptions)
def test_getConfigurationsInvalidArgs(self):
cmdArg = {}
expectedScanOptions = {}
cmdArg['configFile'] = rootdir + '\\valid-config.txt'
cmdArg['filterMode'] = 'INVALID_MODE'
cmdArg['filterFile'] = rootdir + '\\invalid_filter_path.txt'
cmdArg['filters'] = '*.log'
cmdArg['subDirs'] = 'INVALID_OPTION'
cmdArg['maxFileSize'] = -100000
cmdArg['includeEmptyFiles'] = 'INVALID_OPTION'
cmdArg['blocksize'] = 0
cmdArg['hashAlgorithm'] = 3
cmdArg['csvOutput'] = rootdir + '\\myresults.csv'
cmdArg['directories'] = 'c:'
expectedScanOptions['FilterMode'] = 'INCLUDE'
expectedScanOptions['FilterFile'] = rootdir + '\\include-filters.txt'
expectedScanOptions['SubDirs'] = 'FALSE'
expectedScanOptions['MaxFileSize'] = 100000
expectedScanOptions['IncludeEmptyFiles'] = 'TRUE'
expectedScanOptions['Blocksize'] = 131072
expectedScanOptions['HashAlgorithm'] = 3
expectedScanOptions['CSVOutput'] = rootdir + '\\myresults.csv'
self.assertDictEqual(getConfigurations(cmdArg), expectedScanOptions)
def test_loadConfigFileScanOptionsValidValues(self):
testDict = {}
testDict['FilterMode'] = 'INCLUDE'
testDict['FilterFile'] = rootdir + '\\include-filters.txt'
testDict['SubDirs'] = 'FALSE'
testDict['MaxFileSize'] = 100000
testDict['IncludeEmptyFiles'] = 'TRUE'
testDict['Blocksize'] = 131072
testDict['HashAlgorithm'] = 3
testDict['CSVOutput'] = rootdir + '\\results.csv'
self.assertDictEqual(loadConfigFileScanOptions(rootdir + '\\valid-config.txt'), testDict)
def test_loadConfigFileScanOptionsInValidValues(self):
testDict = {}
testDict['FilterMode'] = DEFAULT_FILTERMODE
testDict['FilterFile'] = DEFAULT_FILTERFILE
testDict['SubDirs'] = DEFAULT_SUBDIRS
testDict['MaxFileSize'] = DEFAULT_MAXFILESIZE
testDict['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
testDict['Blocksize'] = DEFAULT_BLOCKSIZE
testDict['HashAlgorithm'] = DEFAULT_HASHALGORITHM
testDict['CSVOutput'] = DEFAULT_CSV
self.assertDictEqual(loadConfigFileScanOptions(rootdir + '\\bad-config.txt'), testDict)
def test_loadConfigFileScanOptionsConfigNotFound(self):
testDict = {}
testDict['FilterMode'] = DEFAULT_FILTERMODE
testDict['FilterFile'] = DEFAULT_FILTERFILE
testDict['SubDirs'] = DEFAULT_SUBDIRS
testDict['MaxFileSize'] = DEFAULT_MAXFILESIZE
testDict['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
testDict['Blocksize'] = DEFAULT_BLOCKSIZE
testDict['HashAlgorithm'] = DEFAULT_HASHALGORITHM
testDict['CSVOutput'] = DEFAULT_CSV
self.assertDictEqual(loadConfigFileScanOptions(rootdir + '\\invalidpath\\invalid-config-path.txt'), testDict)
def test_findDup_NonZero_NoSubDirs(self):
#Only non-zero sized files of any size should be found
dups = {}
results = []
scanOptions = loadDefaultScanOptions()
scanOptions['SubDirs'] = 'FALSE'
filters = []
dups = findDup(rootdir + '\\testfiles', filters, scanOptions)
results = list(filter(lambda x: len(x) > 1, dups.values()))
expectedresults = [[rootdir + '\\testfiles\\bigfile1 - Copy.txt', rootdir + '\\testfiles\\bigfile1.txt'],
[rootdir + '\\testfiles\\file1 - Copy.log', rootdir + '\\testfiles\\file1.log']]
self.assertListEqual(results, expectedresults)
def test_findDup_NonZero_SubDirs(self):
#Only non-zero sized files of any size should be found
dups = {}
results = []
scanOptions = loadDefaultScanOptions()
filters = []
dups = findDup(rootdir + '\\testfiles', filters, scanOptions)
results = list(filter(lambda x: len(x) > 1, dups.values()))
expectedresults = [[rootdir + '\\testfiles\\bigfile1 - Copy.txt', rootdir + '\\testfiles\\bigfile1.txt', rootdir + '\\testfiles\\childdir\\childdir-bigfile1.txt'],
[rootdir + '\\testfiles\\file1 - Copy.log', rootdir + '\\testfiles\\file1.log', rootdir + '\\testfiles\\childdir\\childdir-file1.log']]
self.assertListEqual(results, expectedresults)
def test_findDup_NoSubDirs(self):
#Zero-sized and non-zero sized files of any size should be found
dups = {}
results = []
scanOptions = loadDefaultScanOptions()
scanOptions['IncludeEmptyFiles'] = 'TRUE'
scanOptions['SubDirs'] = 'FALSE'
filters = []
dups = findDup(rootdir + '\\testfiles', filters, scanOptions)
results = list(filter(lambda x: len(x) > 1, dups.values()))
expectedresults = [[rootdir + '\\testfiles\\bigfile1 - Copy.txt', rootdir + '\\testfiles\\bigfile1.txt'],
[rootdir + '\\testfiles\\emptyfile1.txt', rootdir + '\\testfiles\\emptyfile2.txt'],
[rootdir + '\\testfiles\\file1 - Copy.log', rootdir + '\\testfiles\\file1.log']]
self.assertListEqual(results, expectedresults)
def test_findDup_SubDirs(self):
#Zero-sized and non-zero sized files of any size should be found
dups = {}
results = []
scanOptions = loadDefaultScanOptions()
scanOptions['IncludeEmptyFiles'] = 'TRUE'
filters = []
dups = findDup(rootdir + '\\testfiles', filters, scanOptions)
results = list(filter(lambda x: len(x) > 1, dups.values()))
expectedresults = [[rootdir + '\\testfiles\\bigfile1 - Copy.txt', rootdir + '\\testfiles\\bigfile1.txt', rootdir + '\\testfiles\\childdir\\childdir-bigfile1.txt'],
[rootdir + '\\testfiles\\emptyfile1.txt', rootdir + '\\testfiles\\emptyfile2.txt', rootdir + '\\testfiles\\childdir\\childdir-emptyfile1.txt'],
[rootdir + '\\testfiles\\file1 - Copy.log', rootdir + '\\testfiles\\file1.log', rootdir + '\\testfiles\\childdir\\childdir-file1.log']]
self.assertListEqual(results, expectedresults)
def test_findDup_IncludeOnlyLog_NoSubDirs(self):
#Zero-sized and non-zero sized files of any size should be found, but only .log files
dups = {}
results = []
scanOptions = loadDefaultScanOptions()
scanOptions['IncludeEmptyFiles'] = 'TRUE'
scanOptions['FilterMode'] = 'INCLUDE'
scanOptions['SubDirs'] = 'FALSE'
filters = ['*.log']
dups | |
tbi_ids, write_to_outstore = True)
output_gvcf_id = concat_job.rv(0)
output_gvcf_index_id = concat_job.rv(1)
elif options.caller == 'dragen':
dragen_job = merge_chr_bams_job.addChildJobFn(run_dragen_gvcf, context, sample_name, merge_chr_bams_job.rv(0), dragen_ref_index_name, udp_data_dir, helix_username, write_to_outstore = True)
output_gvcf_id = dragen_job.rv(0)
output_gvcf_index_id = dragen_job.rv(1)
if options.indel_realign_bams:
return (merge_chr_bams_job.rv(0), merge_chr_bams_job.rv(1), output_gvcf_id, output_gvcf_index_id, processed_bam_ids)
else:
return (merge_chr_bams_job.rv(0), merge_chr_bams_job.rv(1), output_gvcf_id, output_gvcf_index_id)
def run_deepTrio_gvcf(job, context, options,
proband_name, maternal_name, paternal_name,
proband_chr_bam_id, maternal_chr_bam_id, paternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id, output_child_vcf=False, call_parents=True, deeptrio_child_model_file_id=None, deeptrio_parent_model_file_id=None, deepvariant_model_file_id=None):
"""
Realign trio chromosome BAMs and run DeepTrio on them.
"""
# Extract contig name
ref_fasta_name = os.path.basename(ref_fasta_id)
if '38' in ref_fasta_name and 'no_segdup' not in ref_fasta_name:
contig_name = 'chr{}'.format(re.search('bam_chr(2[0-2]|1\d|\d|X|Y|MT|M|ABOlocus)', os.path.basename(proband_chr_bam_id)).group(1))
else:
contig_name = re.search('bam_(2[0-2]|1\d|\d|X|Y|MT|M|ABOlocus)', os.path.basename(proband_chr_bam_id)).group(1)
RealtimeLogger.info("Starting gvcf DeepTrio calling for trio on contig {}".format(contig_name))
child_job = Job()
call_variant_jobs = Job()
job.addChild(child_job)
child_job.addFollowOn(call_variant_jobs)
# Run DeepVariant for haploid and sex contigs, run DeepTrio for autosomes
if contig_name in ['X','Y', 'MT', 'ABOlocus', 'chrX', 'chrY', 'chrM']:
child_call_variants_job = call_variant_jobs.addChildJobFn(run_deepvariant_gvcf, context,
proband_name, proband_chr_bam_id,
ref_fasta_id, ref_fasta_index_id,
store_output_vcf=output_child_vcf,
deepvariant_model_file_id=deepvariant_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
if call_parents:
parent1_call_variants_job = call_variant_jobs.addChildJobFn(run_deepvariant_gvcf, context,
paternal_name, paternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id,
deepvariant_model_file_id=deepvariant_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
parent2_call_variants_job = call_variant_jobs.addChildJobFn(run_deepvariant_gvcf, context,
maternal_name, maternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id,
deepvariant_model_file_id=deepvariant_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
else:
make_examples_job = child_job.addChildJobFn(run_deeptrio_make_examples, context, options,
contig_name, proband_name, maternal_name, paternal_name,
proband_chr_bam_id, maternal_chr_bam_id, paternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
child_call_variants_job = call_variant_jobs.addChildJobFn(run_deeptrio_call_variants, context, options,
proband_name, contig_name,
ref_fasta_id, ref_fasta_index_id,
make_examples_job.rv(0), make_examples_job.rv(1), store_output_vcf=output_child_vcf,
deeptrio_model_file_id=deeptrio_child_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
if call_parents:
parent1_call_variants_job = call_variant_jobs.addChildJobFn(run_deeptrio_call_variants, context, options,
paternal_name, contig_name,
ref_fasta_id, ref_fasta_index_id,
make_examples_job.rv(2), make_examples_job.rv(3),
deeptrio_model_file_id=deeptrio_parent_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
parent2_call_variants_job = call_variant_jobs.addChildJobFn(run_deeptrio_call_variants, context, options,
maternal_name, contig_name,
ref_fasta_id, ref_fasta_index_id,
make_examples_job.rv(4), make_examples_job.rv(5),
deeptrio_model_file_id=deeptrio_parent_model_file_id,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
proband_gvcf_file_id = child_call_variants_job.rv(0)
proband_gvcf_index_file_id = child_call_variants_job.rv(1)
if call_parents:
maternal_gvcf_file_id = parent2_call_variants_job.rv(0)
maternal_gvcf_index_file_id = parent2_call_variants_job.rv(1)
paternal_gvcf_file_id = parent1_call_variants_job.rv(0)
paternal_gvcf_index_file_id = parent1_call_variants_job.rv(1)
return (proband_gvcf_file_id, proband_gvcf_index_file_id, maternal_gvcf_file_id, maternal_gvcf_index_file_id, paternal_gvcf_file_id, paternal_gvcf_index_file_id)
else:
proband_vcf_file_id = child_call_variants_job.rv(2)
proband_vcf_index_file_id = child_call_variants_job.rv(3)
return (proband_gvcf_file_id, proband_gvcf_index_file_id, proband_vcf_file_id, proband_vcf_index_file_id)
def run_deepvariant_gvcf(job, context, sample_name, chr_bam_id, ref_fasta_id,
ref_fasta_index_id, store_output_vcf=False, deepvariant_model_file_id=None):
RealtimeLogger.info("Starting deepvariant haplotypecalling gvcfs")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the sample bam for variant calling
bam_name = os.path.basename(chr_bam_id)
bam_path = os.path.join(work_dir, bam_name)
job.fileStore.readGlobalFile(chr_bam_id, bam_path)
bam_name = os.path.splitext(bam_name)[0]
ref_fasta_name = os.path.basename(ref_fasta_id)
ref_fasta_path = os.path.join(work_dir, ref_fasta_name)
job.fileStore.readGlobalFile(ref_fasta_id, ref_fasta_path)
ref_fasta_index_path = os.path.join(work_dir, '{}.fai'.format(ref_fasta_name))
job.fileStore.readGlobalFile(ref_fasta_index_id, ref_fasta_index_path)
# Gather the deepvariant model if they exist
deepvaraint_model = ""
if deepvariant_model_file_id:
model_file_path = os.path.join(work_dir, os.path.basename(deepvariant_model_file_id))
job.fileStore.readGlobalFile(deepvariant_model_file_id, model_file_path)
context.runner.call(job, ['tar', '-xzf', os.path.basename(model_file_path)], work_dir = work_dir)
model_dir_name = os.path.splitext(os.path.splitext(os.path.basename(model_file_path))[0])[0]
model_filename = ""
with os.scandir(os.path.join(work_dir, model_dir_name)) as entries:
for entry in entries:
model_filename = os.path.splitext(entry.name)[0]
deepvaraint_model = os.path.join(model_dir_name, model_filename)
else:
deepvaraint_model = "/opt/models/wgs/model.ckpt"
# Extract contig name
if '38' in ref_fasta_name and 'no_segdup' not in ref_fasta_name:
contig_name = 'chr{}'.format(re.search('bam_chr(2[0-2]|1\d|\d|X|Y|MT|M|ABOlocus)', bam_name).group(1))
else:
contig_name = re.search('bam_(2[0-2]|1\d|\d|X|Y|MT|M|ABOlocus)', bam_name).group(1)
# Run variant calling commands
command = ['samtools', 'index', '-@', str(job.cores), os.path.basename(bam_path)]
context.runner.call(job, command, work_dir = work_dir, tool_name='samtools')
command = ['/opt/deepvariant/bin/run_deepvariant',
'--num_shards', job.cores,
'--customized_model', deepvaraint_model,
'--model_type', 'WGS',
'--make_examples_extra_args', 'min_mapping_quality=1',
'--regions', contig_name,
'--ref', os.path.basename(ref_fasta_path),
'--reads', os.path.basename(bam_path),
'--output_gvcf', '{}_{}_deepvariant.g.vcf.gz'.format(sample_name, contig_name),
'--output_vcf', '{}_{}_deepvariant.vcf.gz'.format(sample_name, contig_name)]
context.runner.call(job, command, work_dir = work_dir, tool_name='deepvariant')
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', '{}_{}_deepvariant.g.vcf.gz'.format(sample_name, contig_name)], work_dir=work_dir)
# Write output to intermediate store
output_gvcf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deepvariant.g.vcf.gz'.format(sample_name, contig_name)))
output_gvcf_index_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deepvariant.g.vcf.gz.tbi'.format(sample_name, contig_name)))
if store_output_vcf:
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', '{}_{}_deepvariant.vcf.gz'.format(sample_name, contig_name)], work_dir=work_dir)
output_vcf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deepvariant.vcf.gz'.format(sample_name, contig_name)))
output_vcf_index_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deepvariant.vcf.gz.tbi'.format(sample_name, contig_name)))
return output_gvcf_file_id, output_gvcf_index_file_id, output_vcf_file_id, output_vcf_index_file_id
else:
return output_gvcf_file_id, output_gvcf_index_file_id
def run_deeptrio_make_examples(job, context, options,
contig_name,
proband_name, maternal_name, paternal_name,
proband_chr_bam_id, maternal_chr_bam_id, paternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id):
"""
Run DeepTrio make examples on trio bams.
"""
RealtimeLogger.info("Starting DeepTrio make_examples for trio")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the trio bams
proband_bam_path = os.path.join(work_dir, os.path.basename(proband_chr_bam_id))
job.fileStore.readGlobalFile(proband_chr_bam_id, proband_bam_path)
maternal_bam_path = os.path.join(work_dir, '{}_{}'.format(os.path.basename(maternal_chr_bam_id), maternal_name))
job.fileStore.readGlobalFile(maternal_chr_bam_id, maternal_bam_path)
paternal_bam_path = os.path.join(work_dir, '{}_{}'.format(os.path.basename(paternal_chr_bam_id), paternal_name))
job.fileStore.readGlobalFile(paternal_chr_bam_id, paternal_bam_path)
ref_fasta_name = os.path.basename(ref_fasta_id)
ref_fasta_path = os.path.join(work_dir, os.path.basename(ref_fasta_id))
job.fileStore.readGlobalFile(ref_fasta_id, ref_fasta_path)
ref_fasta_index_path = os.path.join(work_dir, '{}.fai'.format(ref_fasta_name))
job.fileStore.readGlobalFile(ref_fasta_index_id, ref_fasta_index_path)
command = ['samtools', 'index', '-@', str(job.cores), os.path.basename(proband_bam_path)]
context.runner.call(job, command, work_dir = work_dir, tool_name='samtools')
command = ['samtools', 'index', '-@', str(job.cores), os.path.basename(maternal_bam_path)]
context.runner.call(job, command, work_dir = work_dir, tool_name='samtools')
command = ['samtools', 'index', '-@', str(job.cores), os.path.basename(paternal_bam_path)]
context.runner.call(job, command, work_dir = work_dir, tool_name='samtools')
cmd_list = []
cmd_list.append(['seq', '0', '{}'.format(str(int(job.cores)-1))])
cmd_list.append(['parallel', '-q', '--halt', '2', '--line-buffer', '/opt/deepvariant/bin/deeptrio/make_examples',
'--mode', 'calling',
'--ref', os.path.basename(ref_fasta_path),
'--reads_parent1', os.path.basename(paternal_bam_path),
'--reads_parent2', os.path.basename(maternal_bam_path),
'--reads', os.path.basename(proband_bam_path),
'--examples', '\"./make_examples.tfrecord@{}.gz\"'.format(str(job.cores)),
'--sample_name', proband_name,
'--sample_name_parent1', paternal_name,
'--sample_name_parent2', maternal_name,
'--gvcf', '\"./gvcf.tfrecord@{}.gz\"'.format(str(job.cores)),
'--min_mapping_quality', '\"1\"',
'--pileup_image_height_child', '\"60\"',
'--pileup_image_height_parent', '\"40\"',
'--regions', contig_name,
'--task', '{}'])
chain_cmds = [' '.join(p) for p in cmd_list]
command = ['/bin/bash', '-c', 'set -eo pipefail && {}'.format(' | '.join(chain_cmds))]
context.runner.call(job, command, work_dir = work_dir, tool_name='deeptrio')
for sample_id, file_type in itertools.product(['child', 'parent1', 'parent2'],['make_examples','gvcf']):
cmd_list = []
cmd_list.append(['ls'])
cmd_list.append(['grep', '\"{}_{}.tfrecord\"'.format(file_type, sample_id)])
cmd_list.append(['tar', '-czf', '{}_{}.tfrecord.tar.gz'.format(file_type, sample_id), '-T', '-'])
chain_cmds = [' '.join(p) for p in cmd_list]
command = ['/bin/bash', '-c', 'set -eo pipefail && {}'.format(' | '.join(chain_cmds))]
context.runner.call(job, command, work_dir = work_dir)
proband_examples_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'make_examples_child.tfrecord.tar.gz'))
paternal_examples_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'make_examples_parent1.tfrecord.tar.gz'))
maternal_examples_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'make_examples_parent2.tfrecord.tar.gz'))
proband_nonvariant_site_tf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'gvcf_child.tfrecord.tar.gz'))
paternal_nonvariant_site_tf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'gvcf_parent1.tfrecord.tar.gz'))
maternal_nonvariant_site_tf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, 'gvcf_parent2.tfrecord.tar.gz'))
return (proband_examples_file_id, proband_nonvariant_site_tf_file_id, paternal_examples_file_id, paternal_nonvariant_site_tf_file_id, maternal_examples_file_id, maternal_nonvariant_site_tf_file_id)
def run_deeptrio_call_variants(job, context, options,
sample_name, contig_name,
ref_fasta_id, ref_fasta_index_id,
examples_file_id, nonvariant_site_tf_file_id, store_output_vcf=False, deeptrio_model_file_id=None):
"""
Run DeepTrio call variants on a trio sample.
"""
RealtimeLogger.info("Starting DeepTrio call_variants for a trio sample")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the reference, nonvariant site, and call variant files for the sample
ref_fasta_name = os.path.basename(ref_fasta_id)
ref_fasta_path = os.path.join(work_dir, os.path.basename(ref_fasta_id))
job.fileStore.readGlobalFile(ref_fasta_id, ref_fasta_path)
ref_fasta_index_path = os.path.join(work_dir, '{}.fai'.format(ref_fasta_name))
job.fileStore.readGlobalFile(ref_fasta_index_id, ref_fasta_index_path)
# We need the examples file
examples_file_path = os.path.join(work_dir, os.path.basename(examples_file_id))
job.fileStore.readGlobalFile(examples_file_id, examples_file_path)
nonvariant_site_tf_file_path = os.path.join(work_dir, os.path.basename(nonvariant_site_tf_file_id))
job.fileStore.readGlobalFile(nonvariant_site_tf_file_id, nonvariant_site_tf_file_path)
context.runner.call(job, ['tar', '-xzf', os.path.basename(examples_file_path)], work_dir = work_dir)
context.runner.call(job, ['tar', '-xzf', os.path.basename(nonvariant_site_tf_file_path)], work_dir = work_dir)
# Gather the deeptrio models if they exist
deeptrio_model = ""
if deeptrio_model_file_id:
model_file_path = os.path.join(work_dir, os.path.basename(deeptrio_model_file_id))
job.fileStore.readGlobalFile(deeptrio_model_file_id, model_file_path)
context.runner.call(job, ['tar', '-xzf', os.path.basename(model_file_path)], work_dir = work_dir)
model_dir_name = os.path.splitext(os.path.splitext(os.path.basename(model_file_path))[0])[0]
model_filename = ""
with os.scandir(os.path.join(work_dir, model_dir_name)) as entries:
for entry in entries:
model_filename = os.path.splitext(entry.name)[0]
deeptrio_model = os.path.join(model_dir_name, model_filename)
if 'parent1' in os.path.basename(examples_file_path):
examples_file = "make_examples_parent1.tfrecord@{}.gz".format(str(job.cores))
outfile_name = "call_variants_output_parent1.tfrecord.gz"
nonvariant_site_tfrecord_path = "gvcf_parent1.tfrecord@{}.gz".format(str(job.cores))
# Use the default model if none are provided
if not deeptrio_model_file_id:
deeptrio_model = "/opt/models/deeptrio/wgs/parent/model.ckpt"
if 'parent2' in os.path.basename(examples_file_path):
examples_file = "make_examples_parent2.tfrecord@{}.gz".format(str(job.cores))
outfile_name = "call_variants_output_parent2.tfrecord.gz"
nonvariant_site_tfrecord_path = "gvcf_parent2.tfrecord@{}.gz".format(str(job.cores))
# Use the default model if none are provided
if not deeptrio_model_file_id:
deeptrio_model = "/opt/models/deeptrio/wgs/parent/model.ckpt"
if 'child' in os.path.basename(examples_file_path):
examples_file = "make_examples_child.tfrecord@{}.gz".format(str(job.cores))
outfile_name = "call_variants_output_child.tfrecord.gz"
nonvariant_site_tfrecord_path = "gvcf_child.tfrecord@{}.gz".format(str(job.cores))
# Use the default model if none are provided
if not deeptrio_model_file_id:
deeptrio_model = "/opt/models/deeptrio/wgs/child/model.ckpt"
command = ['/opt/deepvariant/bin/call_variants',
'--outfile', outfile_name,
'--examples', examples_file,
'--checkpoint', deeptrio_model]
context.runner.call(job, command, work_dir = work_dir, tool_name='deeptrio')
command = ['/opt/deepvariant/bin/postprocess_variants',
'--ref', os.path.basename(ref_fasta_path),
'--infile', outfile_name,
'--outfile', '{}_{}_deeptrio.vcf.gz'.format(sample_name, contig_name),
'--nonvariant_site_tfrecord_path', nonvariant_site_tfrecord_path,
'--gvcf_outfile', '{}_{}_deeptrio.g.vcf.gz'.format(sample_name, contig_name)]
context.runner.call(job, command, work_dir = work_dir, tool_name='deeptrio')
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', '{}_{}_deeptrio.g.vcf.gz'.format(sample_name, contig_name)], work_dir=work_dir)
output_gvcf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deeptrio.g.vcf.gz'.format(sample_name, contig_name)))
output_gvcf_index_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deeptrio.g.vcf.gz.tbi'.format(sample_name, contig_name)))
if store_output_vcf:
context.runner.call(job, ['tabix', '-f', '-p', 'vcf', '{}_{}_deeptrio.vcf.gz'.format(sample_name, contig_name)], work_dir=work_dir)
output_vcf_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deeptrio.vcf.gz'.format(sample_name, contig_name)))
output_vcf_index_file_id = context.write_intermediate_file(job, os.path.join(work_dir, '{}_{}_deeptrio.vcf.gz.tbi'.format(sample_name, contig_name)))
return output_gvcf_file_id, output_gvcf_index_file_id, output_vcf_file_id, output_vcf_index_file_id
else:
return output_gvcf_file_id, output_gvcf_index_file_id
def run_pipeline_deepvariant_trio_call_gvcfs(job, context, options,
proband_name, maternal_name, paternal_name,
proband_chr_bam_ids, maternal_chr_bam_ids, paternal_chr_bam_ids,
ref_fasta_id, ref_fasta_index_id, ref_fasta_dict_id, run_parents=True,
deeptrio_child_model_file_id=None, deeptrio_parent_model_file_id=None, deepvariant_model_file_id=None):
"""
Call all the chromosomes for the trio using DeepVariant Trio caller and return a merged up gvcf vcf/tbi pair
"""
# ADD ABRA_INDELREALIGNMENT HOOK HERE
RealtimeLogger.info("Starting gvcf calling pipeline for proband, maternal, paternal trio: {}, {}, {}".format(proband_name,maternal_name,paternal_name))
child_job = Job()
job.addChild(child_job)
proband_gvcf_ids = []
proband_tbi_ids = []
proband_realign_bam_ids = []
proband_vcf_ids = []
proband_vcf_tbi_ids = []
maternal_gvcf_ids = []
maternal_tbi_ids = []
maternal_realign_bam_ids = []
paternal_gvcf_ids = []
paternal_tbi_ids = []
paternal_realign_bam_ids = []
for proband_chr_bam_id, maternal_chr_bam_id, paternal_chr_bam_id in zip(proband_chr_bam_ids, maternal_chr_bam_ids, paternal_chr_bam_ids):
chr_jobs = Job()
child_job.addChild(chr_jobs)
proband_chr_bam_indel_realign_job = chr_jobs.addChildJobFn(run_indel_realignment, context,
proband_name, proband_chr_bam_id,
ref_fasta_id, ref_fasta_index_id, ref_fasta_dict_id,
abra_realign=True, delete_input=True,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
proband_realign_bam_ids.append(proband_chr_bam_indel_realign_job.rv())
if run_parents:
maternal_chr_bam_indel_realign_job = chr_jobs.addChildJobFn(run_indel_realignment, context,
maternal_name, maternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id, ref_fasta_dict_id,
abra_realign=True, delete_input=False,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
maternal_realign_bam_ids.append(maternal_chr_bam_indel_realign_job.rv())
paternal_chr_bam_indel_realign_job = chr_jobs.addChildJobFn(run_indel_realignment, context,
paternal_name, paternal_chr_bam_id,
ref_fasta_id, ref_fasta_index_id, ref_fasta_dict_id,
abra_realign=True, delete_input=False,
cores=context.config.alignment_cores,
memory=context.config.alignment_mem,
| |
# E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 - mul_res0
result_w1 = e_d_1 + mul_res1
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put results
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# set flags
c = 0
ov_w0 = overflow(result_w0_ssov)
ov_w1 = overflow(result_w1_ssov)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0_ssov)
aov_w1 = advanced_overflow(result_w1_ssov)
av = aov_w1 | aov_w0
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADDSUS_H_C3_38_Inst(Instruction):
""" Packed Multiply-Add/Subtract Q Format instruction:
op = 0xC3
op2 = 0x38
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDSUS.H_C3_38'
op = "{0}{1}".format(bin(0xc)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(3)[2:].zfill(2))
op2_2 = "{0}".format(bin(8)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res1 = (0x7fffffff & sc1) | ((extract_16s(d_a,1) * extract_16s(d_b,1)) << n.value) & (sc1^0xffffffff)
mul_res0 = (0x7fffffff & sc0) | ((extract_16s(d_a,0) * extract_16s(d_b,0)) << n.value) & (sc0^0xffffffff)
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 - mul_res0
result_w1 = e_d_1 + mul_res1
# compute ssov
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put results
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# set flags
c = 0
ov_w0 = overflow(result_w0_ssov)
ov_w1 = overflow(result_w1_ssov)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0_ssov)
aov_w1 = advanced_overflow(result_w1_ssov)
av = aov_w1 | aov_w0
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADDSUS_H_C3_3B_Inst(Instruction):
""" Packed Multiply-Add/Subtract Q Format instruction:
op = 0xC3
op2 = 0x3B
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDSUS.H_C3_3B'
op = "{0}{1}".format(bin(0xc)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(3)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xb)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits(((d_a & 0xffff) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res1 = (0x7fffffff & sc1) | (((d_a & 0xffff) * (d_b >> 16)) << n.value) & (sc1^0xffffffff)
mul_res0 = (0x7fffffff & sc0) | (((d_a >> 16) * (d_b >> 16)) << n.value) & (sc0^0xffffffff)
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 - mul_res0
result_w1 = e_d_1 + mul_res1
# compute ssov
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put results
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# set flags
c = 0
ov_w0 = overflow(result_w0_ssov)
ov_w1 = overflow(result_w1_ssov)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0_ssov)
aov_w1 = advanced_overflow(result_w1_ssov)
av = aov_w1 | aov_w0
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADDSUM_H_C3_1E_Inst(Instruction):
""" Packed Multiply-Add/Subtract Q Format Multi-precision instruction:
op = 0xC3
op2 = 0x1E
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDSUM.H_C3_1E'
op = "{0}{1}".format(bin(0xc)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(1)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xe)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits(((d_a >> 16) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
result_w1 = (0x7fffffff & sc1) | ((((d_a >> 16) * (d_b & 0xffff)) << n.value) & (sc1^0xffffffff))
result_w0 = (0x7fffffff & sc0) | ((((d_a & 0xffff) * (d_b & 0xffff)) << n.value) & (sc0^0xffffffff))
sub1 = result_w1 - result_w0
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 + (sub1 << 16)
result_w1 = e_d_1 + (sub1 >> 16)
# put results
self.put(result_w0, "d{0}".format(self.data['c']))
self.put(result_w1, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= 32
result |= result_w0
# set flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADDSUM_H_C3_1D_Inst(Instruction):
""" Packed Multiply-Add/Subtract Q Format Multi-precision instruction:
op = 0xC3
op2 = 0x1D
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDSUM.H_C3_1D'
op = "{0}{1}".format(bin(0xc)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(1)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xd)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits(((d_a >> 16) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits(((d_a & 0xffff) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
result_w1 = (0x7fffffff & sc1) | ((((d_a >> 16) * (d_b & 0xffff)) << n.value) & (sc1^0xffff))
result_w0 = (0x7fffffff & sc0) | ((((d_a & 0xffff) * (d_b >> 16)) << n.value) & (sc0^0xffff))
sub1 = result_w1 - result_w0
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 + (sub1 << 16)
result_w1 = e_d_1 + (sub1 >> 16)
# put results
self.put(result_w0, "d{0}".format(self.data['c']))
self.put(result_w1, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= | |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2004-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
:mod:`silx.gui.plot.actions.histogram` provides actions relative to histograms
for :class:`.PlotWidget`.
The following QAction are available:
- :class:`PixelIntensitiesHistoAction`
"""
from __future__ import division
__authors__ = ["<NAME>", "<NAME>", "<NAME>"]
__date__ = "01/12/2020"
__license__ = "MIT"
from typing import Optional, Tuple
import numpy
import logging
import weakref
from .PlotToolAction import PlotToolAction
from silx.math.histogram import Histogramnd
from silx.math.combo import min_max
from silx.gui import qt
from silx.gui.plot import items
from silx.gui.widgets.ElidedLabel import ElidedLabel
from silx.gui.widgets.RangeSlider import RangeSlider
from silx.utils.deprecation import deprecated
_logger = logging.getLogger(__name__)
class _ElidedLabel(ElidedLabel):
"""QLabel with a default size larger than what is displayed."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setTextInteractionFlags(qt.Qt.TextSelectableByMouse)
def sizeHint(self):
hint = super().sizeHint()
nbchar = max(len(self.getText()), 12)
width = self.fontMetrics().boundingRect('#' * nbchar).width()
return qt.QSize(max(hint.width(), width), hint.height())
class _StatWidget(qt.QWidget):
"""Widget displaying a name and a value
:param parent:
:param name:
"""
def __init__(self, parent=None, name: str=''):
super().__init__(parent)
layout = qt.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
keyWidget = qt.QLabel(parent=self)
keyWidget.setText("<b>" + name.capitalize() + ":<b>")
layout.addWidget(keyWidget)
self.__valueWidget = _ElidedLabel(parent=self)
self.__valueWidget.setText("-")
self.__valueWidget.setTextInteractionFlags(
qt.Qt.TextSelectableByMouse | qt.Qt.TextSelectableByKeyboard)
layout.addWidget(self.__valueWidget)
def setValue(self, value: Optional[float]):
"""Set the displayed value
:param value:
"""
self.__valueWidget.setText(
"-" if value is None else "{:.5g}".format(value))
class _IntEdit(qt.QLineEdit):
"""QLineEdit for integers with a default value and update on validation.
:param QWidget parent:
"""
sigValueChanged = qt.Signal(int)
"""Signal emitted when the value has changed (on editing finished)"""
def __init__(self, parent=None):
super().__init__(parent)
self.__value = None
self.setAlignment(qt.Qt.AlignRight)
validator = qt.QIntValidator()
self.setValidator(validator)
validator.bottomChanged.connect(self.__updateSize)
validator.topChanged.connect(self.__updateSize)
self.__updateSize()
self.textEdited.connect(self.__textEdited)
def __updateSize(self, *args):
"""Update widget's maximum size according to bounds"""
bottom, top = self.getRange()
nbchar = max(len(str(bottom)), len(str(top)))
font = self.font()
font.setStyle(qt.QFont.StyleItalic)
fontMetrics = qt.QFontMetrics(font)
self.setMaximumWidth(
fontMetrics.boundingRect('0' * (nbchar + 1)).width()
)
self.setMaxLength(nbchar)
def __textEdited(self, _):
if self.font().style() != qt.QFont.StyleItalic:
font = self.font()
font.setStyle(qt.QFont.StyleItalic)
self.setFont(font)
# Use events rather than editingFinished to also trigger with empty text
def focusOutEvent(self, event):
self.__commitValue()
return super().focusOutEvent(event)
def keyPressEvent(self, event):
if event.key() in (qt.Qt.Key_Enter, qt.Qt.Key_Return):
self.__commitValue()
return super().keyPressEvent(event)
def __commitValue(self):
"""Update the value returned by :meth:`getValue`"""
value = self.getCurrentValue()
if value is None:
value = self.getDefaultValue()
if value is None:
return # No value, keep previous one
if self.font().style() != qt.QFont.StyleNormal:
font = self.font()
font.setStyle(qt.QFont.StyleNormal)
self.setFont(font)
if value != self.__value:
self.__value = value
self.sigValueChanged.emit(value)
def getValue(self) -> Optional[int]:
"""Return current value (None if never set)."""
return self.__value
def setRange(self, bottom: int, top: int):
"""Set the range of valid values"""
self.validator().setRange(bottom, top)
def getRange(self) -> Tuple[int, int]:
"""Returns the current range of valid values
:returns: (bottom, top)
"""
return self.validator().bottom(), self.validator().top()
def __validate(self, value: int, extend_range: bool):
"""Ensure value is in range
:param int value:
:param bool extend_range:
True to extend range if needed.
False to clip value if needed.
"""
if extend_range:
bottom, top = self.getRange()
self.setRange(min(value, bottom), max(value, top))
return numpy.clip(value, *self.getRange())
def setDefaultValue(self, value: int, extend_range: bool=False):
"""Set default value when QLineEdit is empty
:param int value:
:param bool extend_range:
True to extend range if needed.
False to clip value if needed
"""
self.setPlaceholderText(str(self.__validate(value, extend_range)))
if self.getCurrentValue() is None:
self.__commitValue()
def getDefaultValue(self) -> Optional[int]:
"""Return the default value or the bottom one if not set"""
try:
return int(self.placeholderText())
except ValueError:
return None
def setCurrentValue(self, value: int, extend_range: bool=False):
"""Set the currently displayed value
:param int value:
:param bool extend_range:
True to extend range if needed.
False to clip value if needed
"""
self.setText(str(self.__validate(value, extend_range)))
self.__commitValue()
def getCurrentValue(self) -> Optional[int]:
"""Returns the displayed value or None if not correct"""
try:
return int(self.text())
except ValueError:
return None
class HistogramWidget(qt.QWidget):
"""Widget displaying a histogram and some statistic indicators"""
_SUPPORTED_ITEM_CLASS = items.ImageBase, items.Scatter
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle('Histogram')
self.__itemRef = None # weakref on the item to track
layout = qt.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
# Plot
# Lazy import to avoid circular dependencies
from silx.gui.plot.PlotWindow import Plot1D
self.__plot = Plot1D(self)
layout.addWidget(self.__plot)
self.__plot.setDataMargins(0.1, 0.1, 0.1, 0.1)
self.__plot.getXAxis().setLabel("Value")
self.__plot.getYAxis().setLabel("Count")
posInfo = self.__plot.getPositionInfoWidget()
posInfo.setSnappingMode(posInfo.SNAPPING_CURVE)
# Histogram controls
controlsWidget = qt.QWidget(self)
layout.addWidget(controlsWidget)
controlsLayout = qt.QHBoxLayout(controlsWidget)
controlsLayout.setContentsMargins(4, 4, 4, 4)
controlsLayout.addWidget(qt.QLabel("<b>Histogram:<b>"))
controlsLayout.addWidget(qt.QLabel("N. bins:"))
self.__nbinsLineEdit = _IntEdit(self)
self.__nbinsLineEdit.setRange(2, 9999)
self.__nbinsLineEdit.sigValueChanged.connect(
self.__updateHistogramFromControls)
controlsLayout.addWidget(self.__nbinsLineEdit)
self.__rangeLabel = qt.QLabel("Range:")
controlsLayout.addWidget(self.__rangeLabel)
self.__rangeSlider = RangeSlider(parent=self)
self.__rangeSlider.sigValueChanged.connect(
self.__updateHistogramFromControls)
self.__rangeSlider.sigValueChanged.connect(self.__rangeChanged)
controlsLayout.addWidget(self.__rangeSlider)
controlsLayout.addStretch(1)
# Stats display
statsWidget = qt.QWidget(self)
layout.addWidget(statsWidget)
statsLayout = qt.QHBoxLayout(statsWidget)
statsLayout.setContentsMargins(4, 4, 4, 4)
self.__statsWidgets = dict(
(name, _StatWidget(parent=statsWidget, name=name))
for name in ("min", "max", "mean", "std", "sum"))
for widget in self.__statsWidgets.values():
statsLayout.addWidget(widget)
statsLayout.addStretch(1)
def getPlotWidget(self):
"""Returns :class:`PlotWidget` use to display the histogram"""
return self.__plot
def resetZoom(self):
"""Reset PlotWidget zoom"""
self.getPlotWidget().resetZoom()
def reset(self):
"""Clear displayed information"""
self.getPlotWidget().clear()
self.setStatistics()
def getItem(self) -> Optional[items.Item]:
"""Returns item used to display histogram and statistics."""
return None if self.__itemRef is None else self.__itemRef()
def setItem(self, item: Optional[items.Item]):
"""Set item from which to display histogram and statistics.
:param item:
"""
previous = self.getItem()
if previous is not None:
previous.sigItemChanged.disconnect(self.__itemChanged)
self.__itemRef = None if item is None else weakref.ref(item)
if item is not None:
if isinstance(item, self._SUPPORTED_ITEM_CLASS):
# Only listen signal for supported items
item.sigItemChanged.connect(self.__itemChanged)
self._updateFromItem()
def __itemChanged(self, event):
"""Handle update of the item"""
if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):
self._updateFromItem()
def __updateHistogramFromControls(self, *args):
"""Handle udates coming from histogram control widgets"""
hist = self.getHistogram(copy=False)
if hist is not None:
count, edges = hist
if (len(count) == self.__nbinsLineEdit.getValue() and
(edges[0], edges[-1]) == self.__rangeSlider.getValues()):
return # Nothing has changed
self._updateFromItem()
def __rangeChanged(self, first, second):
"""Handle change of histogram range from the range slider"""
tooltip = "Histogram range:\n[%g, %g]" % (first, second)
self.__rangeSlider.setToolTip(tooltip)
self.__rangeLabel.setToolTip(tooltip)
def _updateFromItem(self):
"""Update histogram and stats from the item"""
item = self.getItem()
if item is None:
self.reset()
return
if not isinstance(item, self._SUPPORTED_ITEM_CLASS):
_logger.error("Unsupported item", item)
self.reset()
return
# Compute histogram and stats
array = item.getValueData(copy=False)
if array.size == 0:
self.reset()
return
xmin, xmax = min_max(array, min_positive=False, finite=True)
if xmin is None or xmax is None: # All not finite data
self.reset()
return
guessed_nbins = min(1024, int(numpy.sqrt(array.size)))
# bad hack: get 256 bins in the case we have a B&W
if numpy.issubdtype(array.dtype, numpy.integer):
if guessed_nbins > xmax - xmin:
guessed_nbins = xmax - xmin
guessed_nbins = max(2, guessed_nbins)
# Set default nbins
self.__nbinsLineEdit.setDefaultValue(guessed_nbins, extend_range=True)
# Set slider range: do not keep the range value, but the relative pos.
previousPositions = self.__rangeSlider.getPositions()
if xmin == xmax: # Enlarge range is none
if xmin == 0:
range_ = -0.01, 0.01
else:
range_ = sorted((xmin * .99, xmin * 1.01))
else:
range_ = xmin, xmax
self.__rangeSlider.setRange(*range_)
self.__rangeSlider.setPositions(*previousPositions)
histogram = Histogramnd(
array.ravel().astype(numpy.float32),
n_bins=max(2, self.__nbinsLineEdit.getValue()),
histo_range=self.__rangeSlider.getValues(),
)
if len(histogram.edges) != 1:
_logger.error("Error while computing the histogram")
self.reset()
return
self.setHistogram(histogram.histo, histogram.edges[0])
self.resetZoom()
self.setStatistics(
min_=xmin,
max_=xmax,
mean=numpy.nanmean(array),
std=numpy.nanstd(array),
sum_=numpy.nansum(array))
def setHistogram(self, histogram, edges):
"""Set displayed histogram
:param histogram: Bin values (N)
:param edges: Bin edges (N+1)
"""
# Only useful if setHistogram is called directly
# TODO
#nbins = len(histogram)
#if nbins != self.__nbinsLineEdit.getDefaultValue():
# self.__nbinsLineEdit.setValue(nbins, extend_range=True)
#self.__rangeSlider.setValues(edges[0], edges[-1])
self.getPlotWidget().addHistogram(
histogram=histogram,
edges=edges,
legend='histogram',
fill=True,
color='#66aad7',
resetzoom=False)
def getHistogram(self, copy: bool=True):
"""Returns currently displayed histogram.
:param copy: True to get a copy,
False to get internal representation (Do not modify!)
:return: (histogram, edges) or None
"""
for item in self.getPlotWidget().getItems():
if item.getName() == 'histogram':
return (item.getValueData(copy=copy),
item.getBinEdgesData(copy=copy))
else:
return None
def setStatistics(self,
min_: Optional[float] = None,
max_: Optional[float] | |
__nutils_hash__(self):
h = hashlib.sha1('{}.{}:{}\0'.format(type(self).__module__, type(self).__qualname__, type(self)._version).encode())
for arg in self._args:
h.update(nutils_hash(arg))
for name in sorted(self._kwargs):
h.update(nutils_hash(name))
h.update(nutils_hash(self._kwargs[name]))
return h.digest()
def __getstate__(self):
raise Exception('getstate should never be called')
def __setstate__(self, state):
raise Exception('setstate should never be called')
def __str__(self):
return '{}({})'.format(self.__class__.__name__, ','.join(str(arg) for arg in self._args))
class SingletonMeta(ImmutableMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
cls._cache = weakref.WeakValueDictionary()
return cls
def _new(cls, args, kwargs):
key = args + tuple((key, kwargs[key]) for key in sorted(kwargs))
try:
self = cls._cache[key]
except KeyError:
cls._cache[key] = self = super()._new(args, kwargs)
return self
class Singleton(Immutable, metaclass=SingletonMeta):
'''
Subclass of :class:`Immutable` that creates a single instance per unique set
of initialization arguments.
Examples
--------
Consider the following class.
>>> class Plain(Singleton):
... def __init__(self, a, b):
... pass
Calling ``Plain`` with equivalent positional or keyword arguments produces
one instance:
>>> Plain(1, 2) is Plain(a=1, b=2)
True
Consider the folling class with annotations.
>>> class Annotated(Singleton):
... @apply_annotations
... def __init__(self, a:tuple, b:tuple):
... pass
Calling ``Annotated`` with either :class:`list`\\s of ``1, 2`` and ``3, 4``
or :class:`tuple`\\s gives a single instance:
>>> Annotated([1, 2], [3, 4]) is Annotated((1, 2), (3, 4))
True
'''
__slots__ = ()
__hash__ = Immutable.__hash__
__eq__ = object.__eq__
def strictint(value):
'''
Converts any type that is a subclass of :class:`numbers.Integral` (e.g.
:class:`int` and ``numpy.int64``) to :class:`int`, and fails otherwise.
Notable differences with the behavior of :class:`int`:
* :func:`strictint` does not convert a :class:`str` to an :class:`int`.
* :func:`strictint` does not truncate :class:`float` to an :class:`int`.
Examples
--------
>>> strictint(1), type(strictint(1))
(1, <class 'int'>)
>>> strictint(numpy.int64(1)), type(strictint(numpy.int64(1)))
(1, <class 'int'>)
>>> strictint(1.0)
Traceback (most recent call last):
...
ValueError: not an integer: 1.0
>>> strictint('1')
Traceback (most recent call last):
...
ValueError: not an integer: '1'
'''
if not isinstance(value, numbers.Integral):
raise ValueError('not an integer: {!r}'.format(value))
return builtins.int(value)
def strictfloat(value):
'''
Converts any type that is a subclass of :class:`numbers.Real` (e.g.
:class:`float` and ``numpy.float64``) to :class:`float`, and fails
otherwise. Notable difference with the behavior of :class:`float`:
* :func:`strictfloat` does not convert a :class:`str` to an :class:`float`.
Examples
--------
>>> strictfloat(1), type(strictfloat(1))
(1.0, <class 'float'>)
>>> strictfloat(numpy.float64(1.2)), type(strictfloat(numpy.float64(1.2)))
(1.2, <class 'float'>)
>>> strictfloat(1.2+3.4j)
Traceback (most recent call last):
...
ValueError: not a real number: (1.2+3.4j)
>>> strictfloat('1.2')
Traceback (most recent call last):
...
ValueError: not a real number: '1.2'
'''
if not isinstance(value, numbers.Real):
raise ValueError('not a real number: {!r}'.format(value))
return builtins.float(value)
def strictstr(value):
'''
Returns ``value`` unmodified if it is a :class:`str`, and fails otherwise.
Notable difference with the behavior of :class:`str`:
* :func:`strictstr` does not call ``__str__`` methods of objects to
automatically convert objects to :class:`str`\\s.
Examples
--------
Passing a :class:`str` to :func:`strictstr` works:
>>> strictstr('spam')
'spam'
Passing an :class:`int` will fail:
>>> strictstr(1)
Traceback (most recent call last):
...
ValueError: not a 'str': 1
'''
if not isinstance(value, str):
raise ValueError("not a 'str': {!r}".format(value))
return value
def _getname(value):
name = []
if hasattr(value, '__module__'):
name.append(value.__module__)
if hasattr(value, '__qualname__'):
name.append(value.__qualname__)
elif hasattr(value, '__name__'):
name.append(value.__name__)
else:
return str(value)
return '.'.join(name)
def _copyname(dst=None, *, src, suffix=''):
if dst is None:
return functools.partial(_copyname, src=src, suffix=suffix)
if hasattr(src, '__name__'):
dst.__name__ = src.__name__+suffix
if hasattr(src, '__qualname__'):
dst.__qualname__ = src.__qualname__+suffix
if hasattr(src, '__module__'):
dst.__module__ = src.__module__
return dst
class _strictmeta(type):
def __getitem__(self, cls):
def constructor(value):
if not isinstance(value, cls):
raise ValueError('expected an object of type {!r} but got {!r} with type {!r}'.format(cls.__qualname__, value, type(value).__qualname__))
return value
constructor.__qualname__ = constructor.__name__ = 'strict[{}]'.format(_getname(cls))
return constructor
def __call__(*args, **kwargs):
raise TypeError("cannot create an instance of class 'strict'")
class strict(metaclass=_strictmeta):
'''
Type checker. The function ``strict[cls](value)`` returns ``value``
unmodified if ``value`` is an instance of ``cls``, otherwise a
:class:`ValueError` is raised.
Examples
--------
The ``strict[int]`` function passes integers unmodified:
>>> strict[int](1)
1
Other types fail:
>>> strict[int]('1')
Traceback (most recent call last):
...
ValueError: expected an object of type 'int' but got '1' with type 'str'
'''
class _tuplemeta(type):
def __getitem__(self, itemtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(itemtype)))
def constructor(value):
return builtins.tuple(map(itemtype, value))
return constructor
@staticmethod
def __call__(*args, **kwargs):
return builtins.tuple(*args, **kwargs)
class tuple(builtins.tuple, metaclass=_tuplemeta):
'''
Wrapper of :class:`tuple` that supports a user-defined item constructor via
the notation ``tuple[I]``, with ``I`` the item constructor. This is
shorthand for ``lambda items: tuple(map(I, items))``. The item constructor
should be any callable that takes one argument.
Examples
--------
A tuple with items processed with :func:`strictint`:
>>> tuple[strictint]((False, 1, 2, numpy.int64(3)))
(0, 1, 2, 3)
If the item constructor raises an exception, the construction of the
:class:`tuple` failes accordingly:
>>> tuple[strictint]((1, 2, 3.4))
Traceback (most recent call last):
...
ValueError: not an integer: 3.4
'''
__slots__ = ()
class _frozendictmeta(CacheMeta):
def __getitem__(self, keyvaluetype):
if not isinstance(keyvaluetype, builtins.tuple) or len(keyvaluetype) != 2:
raise RuntimeError("expected a 'tuple' of length 2 but got {!r}".format(keyvaluetype))
keytype, valuetype = keyvaluetype
@_copyname(src=self, suffix='[{},{}]'.format(_getname(keytype), _getname(valuetype)))
def constructor(arg):
if isinstance(arg, collections.abc.Mapping):
items = arg.items()
elif isinstance(arg, (collections.abc.MappingView, collections.abc.Iterable)):
items = arg
else:
raise ValueError('expected a mapping or iterable but got {!r}'.format(arg))
return self((keytype(key), valuetype(value)) for key, value in items)
return constructor
class frozendict(collections.abc.Mapping, metaclass=_frozendictmeta):
'''
An immutable version of :class:`dict`. The :class:`frozendict` is hashable
and both the keys and values should be hashable as well.
Custom key and value constructors can be supplied via the ``frozendict[K,V]``
notation, with ``K`` the key constructor and ``V`` the value constructor,
which is roughly equivalent to ``lambda *args, **kwargs: {K(k): V(v) for k, v
in dict(*args, **kwargs).items()}``.
Examples
--------
A :class:`frozendict` with :func:`strictstr` as key constructor and
:func:`strictfloat` as value constructor:
>>> frozendict[strictstr,strictfloat]({'spam': False})
frozendict({'spam': 0.0})
Similar but with non-strict constructors:
>>> frozendict[str,float]({None: '1.2'})
frozendict({'None': 1.2})
Applying the strict constructors to invalid data raises an exception:
>>> frozendict[strictstr,strictfloat]({None: '1.2'})
Traceback (most recent call last):
...
ValueError: not a 'str': None
'''
__slots__ = '__base', '__hash'
__cache__ = '__nutils_hash__',
def __new__(cls, base):
if isinstance(base, frozendict):
return base
self = object.__new__(cls)
self.__base = dict(base)
self.__hash = hash(frozenset(self.__base.items())) # check immutability and precompute hash
return self
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0'.format(type(self).__module__, type(self).__qualname__).encode())
for item in sorted(nutils_hash(k)+nutils_hash(v) for k, v in self.items()):
h.update(item)
return h.digest()
def __reduce__(self):
return frozendict, (self.__base,)
def __eq__(self, other):
if self is other:
return True
if type(other) is not type(self):
return False
if self.__base is other.__base:
return True
if self.__hash != other.__hash or self.__base != other.__base:
return False
# deduplicate
self.__base = other.__base
return True
__getitem__ = lambda self, item: self.__base.__getitem__(item)
__iter__ = lambda self: self.__base.__iter__()
__len__ = lambda self: self.__base.__len__()
__hash__ = lambda self: self.__hash
__contains__ = lambda self, key: self.__base.__contains__(key)
copy = lambda self: self.__base.copy()
__repr__ = __str__ = lambda self: '{}({})'.format(type(self).__name__, self.__base)
class _frozenmultisetmeta(CacheMeta):
def __getitem__(self, itemtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(itemtype)))
def constructor(value):
return self(map(itemtype, value))
return constructor
class frozenmultiset(collections.abc.Container, metaclass=_frozenmultisetmeta):
'''
An immutable multiset_. A multiset is a generalization of a set: items may
occur more than once. Two mutlisets are equal if they have the same set of
items and the same item multiplicities.
A custom item constructor can be supplied via the notation
``frozenmultiset[I]``, with ``I`` the item constructor. This is shorthand
for ``lambda items: frozenmultiset(map(I, items))``. The item constructor
should be any callable that takes one argument.
.. _multiset: https://en.wikipedia.org/wiki/Multiset
Examples
--------
>>> a = frozenmultiset(['spam', 'bacon', 'spam'])
>>> b = frozenmultiset(['sausage', 'spam'])
The :class:`frozenmultiset` objects support ``+``, ``-`` and ``&`` operators:
>>> a + b
frozenmultiset(['spam', 'bacon', 'spam', 'sausage', 'spam'])
>>> a - b
frozenmultiset(['bacon', 'spam'])
>>> a & b
frozenmultiset(['spam'])
The order of the items is irrelevant:
>>> frozenmultiset(['spam', 'spam', 'eggs']) == frozenmultiset(['spam', 'eggs', 'spam'])
True
The multiplicities, however, are not:
>>> frozenmultiset(['spam', 'spam', 'eggs']) == frozenmultiset(['spam', 'eggs'])
False
'''
__slots__ = '__items', '__key'
__cache__ = '__nutils_hash__',
def __new__(cls, items):
if isinstance(items, frozenmultiset):
return items
self = object.__new__(cls)
self.__items = tuple(items)
self.__key = frozenset((item, self.__items.count(item)) for item in self.__items)
return self
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0'.format(type(self).__module__, type(self).__qualname__).encode())
for item in sorted('{:04d}'.format(count).encode()+nutils_hash(item) for item, count in self.__key):
h.update(item)
return h.digest()
def __and__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left and right hand
sides with strict positive multiplicity, where the multiplicity is | |
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GiG G8 Driver
"""
import json
from libcloud.compute.base import NodeImage, NodeSize, Node
from libcloud.compute.base import NodeDriver, UuidMixin
from libcloud.compute.base import StorageVolume, NodeAuthSSHKey
from libcloud.compute.types import Provider, NodeState
from libcloud.common.gig_g8 import G8Connection
from libcloud.common.exceptions import BaseHTTPError
class G8ProvisionError(Exception):
pass
class G8PortForward(UuidMixin):
def __init__(self, network, node_id, publicport, privateport, protocol, driver):
self.node_id = node_id
self.network = network
self.publicport = int(publicport)
self.privateport = int(privateport)
self.protocol = protocol
self.driver = driver
UuidMixin.__init__(self)
def destroy(self):
self.driver.ex_delete_portforward(self)
class G8Network(UuidMixin):
"""
G8 Network object class.
This class maps to a cloudspace
"""
def __init__(self, id, name, cidr, publicipaddress, driver, extra=None):
self.id = id
self.name = name
self._cidr = cidr
self.driver = driver
self.publicipaddress = publicipaddress
self.extra = extra
UuidMixin.__init__(self)
@property
def cidr(self):
"""
Cidr is not part of the list result
we will lazily fetch it with a get request
"""
if self._cidr is None:
networkdata = self.driver._api_request(
"/cloudspaces/get", {"cloudspaceId": self.id}
)
self._cidr = networkdata["privatenetwork"]
return self._cidr
def list_nodes(self):
return self.driver.list_nodes(self)
def destroy(self):
return self.driver.ex_destroy_network(self)
def list_portforwards(self):
return self.driver.ex_list_portforwards(self)
def create_portforward(self, node, publicport, privateport, protocol="tcp"):
return self.driver.ex_create_portforward(
self, node, publicport, privateport, protocol
)
class G8NodeDriver(NodeDriver):
"""
GiG G8 node driver
"""
NODE_STATE_MAP = {
"VIRTUAL": NodeState.PENDING,
"HALTED": NodeState.STOPPED,
"RUNNING": NodeState.RUNNING,
"DESTROYED": NodeState.TERMINATED,
"DELETED": NodeState.TERMINATED,
"PAUSED": NodeState.PAUSED,
"ERROR": NodeState.ERROR,
# transition states
"DEPLOYING": NodeState.PENDING,
"STOPPING": NodeState.STOPPING,
"MOVING": NodeState.MIGRATING,
"RESTORING": NodeState.PENDING,
"STARTING": NodeState.STARTING,
"PAUSING": NodeState.PENDING,
"RESUMING": NodeState.PENDING,
"RESETTING": NodeState.REBOOTING,
"DELETING": NodeState.TERMINATED,
"DESTROYING": NodeState.TERMINATED,
"ADDING_DISK": NodeState.RECONFIGURING,
"ATTACHING_DISK": NodeState.RECONFIGURING,
"DETACHING_DISK": NodeState.RECONFIGURING,
"ATTACHING_NIC": NodeState.RECONFIGURING,
"DETTACHING_NIC": NodeState.RECONFIGURING,
"DELETING_DISK": NodeState.RECONFIGURING,
"CHANGING_DISK_LIMITS": NodeState.RECONFIGURING,
"CLONING": NodeState.PENDING,
"RESIZING": NodeState.RECONFIGURING,
"CREATING_TEMPLATE": NodeState.PENDING,
}
name = "GiG G8 Node Provider"
website = "https://gig.tech"
type = Provider.GIG_G8
connectionCls = G8Connection
def __init__(self, user_id, key, api_url):
# type (int, str, str) -> None
"""
:param key: Token to use for api (jwt)
:type key: ``str``
:param user_id: Id of the account to connect to (accountId)
:type user_id: ``int``
:param api_url: G8 api url
:type api_url: ``str``
:rtype: ``None``
"""
self._apiurl = api_url.rstrip("/")
super(G8NodeDriver, self).__init__(key=key)
self._account_id = user_id
self._location_data = None
def _ex_connection_class_kwargs(self):
return {"url": self._apiurl}
def _api_request(self, endpoint, params=None):
return self.connection.request(
endpoint.lstrip("/"), data=json.dumps(params), method="POST"
).object
@property
def _location(self):
if self._location_data is None:
self._location_data = self._api_request("/locations/list")[0]
return self._location_data
def create_node(
self,
name,
image,
ex_network,
ex_description,
size=None,
auth=None,
ex_create_attr=None,
ex_expose_ssh=False,
):
# type (str, Image, G8Network, str, Size,
# Optional[NodeAuthSSHKey], Optional[Dict], bool) -> Node
"""
Create a node.
The `ex_create_attr` parameter can include the following dictionary
key and value pairs:
* `memory`: ``int`` Memory in MiB
(only used if size is None and vcpus is passed
* `vcpus`: ``int`` Amount of vcpus
(only used if size is None and memory is passed)
* `disk_size`: ``int`` Size of bootdisk
defaults to minimumsize of the image
* `user_data`: ``str`` for cloud-config data
* `private_ip`: ``str`` Private Ip inside network
* `data_disks`: ``list(int)`` Extra data disks to assign
to vm list of disk sizes in GiB
:param name: the name to assign the vm
:type name: ``str``
:param size: the plan size to create
mutual exclusive with `memory` `vcpus`
:type size: :class:`NodeSize`
:param image: which distribution to deploy on the vm
:type image: :class:`NodeImage`
:param network: G8 Network to place vm in
:type size: :class:`G8Network`
:param ex_description: Descripton of vm
:type size: : ``str``
:param auth: an SSH key
:type auth: :class:`NodeAuthSSHKey`
:param ex_create_attr: A dictionary of optional attributes for
vm creation
:type ex_create_attr: ``dict``
:param ex_expose_ssh: Create portforward for ssh port
:type ex_expose_ssh: int
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {
"name": name,
"imageId": int(image.id),
"cloudspaceId": int(ex_network.id),
"description": ex_description,
}
ex_create_attr = ex_create_attr or {}
if size:
params["sizeId"] = int(size.id)
else:
params["memory"] = ex_create_attr["memory"]
params["vcpus"] = ex_create_attr["vcpus"]
if "user_data" in ex_create_attr:
params["userdata"] = ex_create_attr["user_data"]
if "data_disks" in ex_create_attr:
params["datadisks"] = ex_create_attr["data_disks"]
if "private_ip" in ex_create_attr:
params["privateIp"] = ex_create_attr["private_ip"]
if "disk_size" in ex_create_attr:
params["disksize"] = ex_create_attr["disk_size"]
else:
params["disksize"] = image.extra["min_disk_size"]
if auth and isinstance(auth, NodeAuthSSHKey):
userdata = params.setdefault("userdata", {})
users = userdata.setdefault("users", [])
root = None
for user in users:
if user["name"] == "root":
root = user
break
else:
root = {"name": "root", "shell": "/bin/bash"}
users.append(root)
keys = root.setdefault("ssh-authorized-keys", [])
keys.append(auth.pubkey)
elif auth:
error = "Auth type {} is not implemented".format(type(auth))
raise NotImplementedError(error)
machineId = self._api_request("/machines/create", params)
machine = self._api_request("/machines/get", params={"machineId": machineId})
node = self._to_node(machine, ex_network)
if ex_expose_ssh:
port = self.ex_expose_ssh_node(node)
node.extra["ssh_port"] = port
node.extra["ssh_ip"] = ex_network.publicipaddress
return node
def _find_ssh_ports(self, ex_network, node):
forwards = ex_network.list_portforwards()
usedports = []
result = {"node": None, "network": usedports}
for forward in forwards:
usedports.append(forward.publicport)
if forward.node_id == node.id and forward.privateport == 22:
result["node"] = forward.privateport
return result
def ex_expose_ssh_node(self, node):
"""
Create portforward for ssh purposed
:param node: Node to expose ssh for
:type node: ``Node``
:rtype: ``int``
"""
network = node.extra["network"]
ports = self._find_ssh_ports(network, node)
if ports["node"]:
return ports["node"]
usedports = ports["network"]
sshport = 2200
endport = 3000
while sshport < endport:
while sshport in usedports:
sshport += 1
try:
network.create_portforward(node, sshport, 22)
node.extra["ssh_port"] = sshport
node.extra["ssh_ip"] = network.publicipaddress
break
except BaseHTTPError as e:
if e.code == 409:
# port already used maybe raise let's try next
usedports.append(sshport)
raise
else:
raise G8ProvisionError("Failed to create portforward")
return sshport
def ex_create_network(self, name, private_network="192.168.103.0/24", type="vgw"):
# type (str, str, str) -> G8Network
"""
Create network also known as cloudspace
:param name: the name to assing to the network
:type name: ``str``
:param private_network: subnet used as private network
:type private_network: ``str``
:param type: type of the gateway vgw or routeros
:type type: ``str``
"""
userinfo = self._api_request("../system/usermanager/whoami")
params = {
"accountId": self._account_id,
"privatenetwork": private_network,
"access": userinfo["name"],
"name": name,
"location": self._location["locationCode"],
"type": type,
}
networkid = self._api_request("/cloudspaces/create", params)
network = self._api_request("/cloudspaces/get", {"cloudspaceId": networkid})
return self._to_network(network)
def ex_destroy_network(self, network):
# type (G8Network) -> bool
self._api_request("/cloudspaces/delete", {"cloudspaceId": int(network.id)})
return True
def stop_node(self, node):
# type (Node) -> bool
"""
Stop virtual machine
"""
node.state = NodeState.STOPPING
self._api_request("/machines/stop", {"machineId": int(node.id)})
node.state = NodeState.STOPPED
return True
def ex_list_portforwards(self, network):
# type (G8Network) -> List[G8PortForward]
data = self._api_request(
"/portforwarding/list", {"cloudspaceId": int(network.id)}
)
forwards = []
for forward in data:
forwards.append(self._to_port_forward(forward, network))
return forwards
def ex_create_portforward(
self, network, node, publicport, privateport, protocol="tcp"
):
# type (G8Network, Node, int, int, str) -> G8PortForward
params = {
"cloudspaceId": int(network.id),
"machineId": int(node.id),
"localPort": privateport,
"publicPort": publicport,
"publicIp": network.publicipaddress,
"protocol": protocol,
}
self._api_request("/portforwarding/create", params)
return self._to_port_forward(params, network)
def ex_delete_portforward(self, portforward):
# type (G8PortForward) -> bool
params = {
"cloudspaceId": int(portforward.network.id),
"publicIp": portforward.network.publicipaddress,
"publicPort": portforward.publicport,
"proto": portforward.protocol,
}
self._api_request("/portforwarding/deleteByPort", params)
return True
def start_node(self, node):
# type (Node) -> bool
"""
Start virtual machine
"""
node.state = NodeState.STARTING
self._api_request("/machines/start", {"machineId": int(node.id)})
node.state = NodeState.RUNNING
return True
def ex_list_networks(self):
# type () -> List[G8Network]
"""
Return the list of networks.
:return: A list of network objects.
:rtype: ``list`` of :class:`G8Network`
"""
networks = []
for network in self._api_request("/cloudspaces/list"):
if network["accountId"] == self._account_id:
networks.append(self._to_network(network))
return networks
def list_sizes(self):
# type () -> List[Size]
"""
Returns a list of node sizes as a cloud provider might have
"""
location = self._location["locationCode"]
sizes = []
for size in self._api_request("/sizes/list", {"location": location}):
sizes.extend(self._to_size(size))
return sizes
def list_nodes(self, ex_network=None):
# type (Optional[G8Network]) -> List[Node]
"""
List the nodes known to a particular driver;
There are two default nodes created at the beginning
"""
def _get_ssh_port(forwards, node):
for forward in forwards:
if forward.node_id == node.id and forward.privateport == 22:
return forward
if ex_network:
networks = [ex_network]
else:
networks | |
*,
duration: pulumi.Input[int],
filename: pulumi.Input[str],
filter: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] duration: Time frame in seconds of the capture.
:param pulumi.Input[str] filename: Defines the name of the capture file.
:param pulumi.Input[str] filter: Additional filter to apply to the capture. For example: `proc.name contains nginx`.
"""
pulumi.set(__self__, "duration", duration)
pulumi.set(__self__, "filename", filename)
if filter is not None:
pulumi.set(__self__, "filter", filter)
@property
@pulumi.getter
def duration(self) -> pulumi.Input[int]:
"""
Time frame in seconds of the capture.
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: pulumi.Input[int]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def filename(self) -> pulumi.Input[str]:
"""
Defines the name of the capture file.
"""
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: pulumi.Input[str]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
"""
Additional filter to apply to the capture. For example: `proc.name contains nginx`.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@pulumi.input_type
class AlertMetricCustomNotificationArgs:
def __init__(__self__, *,
title: pulumi.Input[str],
append: Optional[pulumi.Input[str]] = None,
prepend: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] title: Sets the title of the alert. It is commonly defined as `{{__alert_name__}} is {{__alert_status__}}`.
:param pulumi.Input[str] append: Text to add after the alert template.
:param pulumi.Input[str] prepend: Text to add before the alert template.
"""
pulumi.set(__self__, "title", title)
if append is not None:
pulumi.set(__self__, "append", append)
if prepend is not None:
pulumi.set(__self__, "prepend", prepend)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
Sets the title of the alert. It is commonly defined as `{{__alert_name__}} is {{__alert_status__}}`.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def append(self) -> Optional[pulumi.Input[str]]:
"""
Text to add after the alert template.
"""
return pulumi.get(self, "append")
@append.setter
def append(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "append", value)
@property
@pulumi.getter
def prepend(self) -> Optional[pulumi.Input[str]]:
"""
Text to add before the alert template.
"""
return pulumi.get(self, "prepend")
@prepend.setter
def prepend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prepend", value)
@pulumi.input_type
class AlertPromqlCaptureArgs:
def __init__(__self__, *,
duration: pulumi.Input[int],
filename: pulumi.Input[str],
filter: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "duration", duration)
pulumi.set(__self__, "filename", filename)
if filter is not None:
pulumi.set(__self__, "filter", filter)
@property
@pulumi.getter
def duration(self) -> pulumi.Input[int]:
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: pulumi.Input[int]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def filename(self) -> pulumi.Input[str]:
return pulumi.get(self, "filename")
@filename.setter
def filename(self, value: pulumi.Input[str]):
pulumi.set(self, "filename", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@pulumi.input_type
class AlertPromqlCustomNotificationArgs:
def __init__(__self__, *,
title: pulumi.Input[str],
append: Optional[pulumi.Input[str]] = None,
prepend: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] title: Sets the title of the alert. It is commonly defined as `{{__alert_name__}} is {{__alert_status__}}`.
:param pulumi.Input[str] append: Text to add after the alert template.
:param pulumi.Input[str] prepend: Text to add before the alert template.
"""
pulumi.set(__self__, "title", title)
if append is not None:
pulumi.set(__self__, "append", append)
if prepend is not None:
pulumi.set(__self__, "prepend", prepend)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
Sets the title of the alert. It is commonly defined as `{{__alert_name__}} is {{__alert_status__}}`.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def append(self) -> Optional[pulumi.Input[str]]:
"""
Text to add after the alert template.
"""
return pulumi.get(self, "append")
@append.setter
def append(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "append", value)
@property
@pulumi.getter
def prepend(self) -> Optional[pulumi.Input[str]]:
"""
Text to add before the alert template.
"""
return pulumi.get(self, "prepend")
@prepend.setter
def prepend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prepend", value)
@pulumi.input_type
class DashboardPanelArgs:
def __init__(__self__, *,
height: pulumi.Input[int],
name: pulumi.Input[str],
pos_x: pulumi.Input[int],
pos_y: pulumi.Input[int],
type: pulumi.Input[str],
width: pulumi.Input[int],
autosize_text: Optional[pulumi.Input[bool]] = None,
content: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
queries: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardPanelQueryArgs']]]] = None,
transparent_background: Optional[pulumi.Input[bool]] = None,
visible_title: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[int] height: Height of the panel. Min value: 1.
:param pulumi.Input[str] name: Name of the panel.
:param pulumi.Input[int] pos_x: Position of the panel in the X axis. Min value: 0, max value: 23.
:param pulumi.Input[int] pos_y: Position of the panel in the Y axis. Min value: 0.
:param pulumi.Input[str] type: Kind of panel, must be either `timechart`, `number` or `text`.
:param pulumi.Input[int] width: Width of the panel. Min value: 1, max value: 24.
:param pulumi.Input[bool] autosize_text: If true, the text will be autosized in the panel.
This field is ignored for all panel types except `text`.
:param pulumi.Input[str] content: This field is required if the panel type is `text`. It represents the
text that will be displayed in the panel.
:param pulumi.Input[str] description: Description of the panel.
:param pulumi.Input[Sequence[pulumi.Input['DashboardPanelQueryArgs']]] queries: The PromQL query that will show information in the panel.
If the type of the panel is `timechart`, then it can be specified multiple
times, to have multiple metrics in the same graph.
If the type of the panel is `number` then only one can be specified.
This field is required if the panel type is `timechart` or `number`.
:param pulumi.Input[bool] transparent_background: If true, the panel will have a transparent background.
This field is ignored for all panel types except `text`.
:param pulumi.Input[bool] visible_title: If true, the title of the panel will be displayed. Default: false.
This field is ignored for all panel types except `text`.
"""
pulumi.set(__self__, "height", height)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "pos_x", pos_x)
pulumi.set(__self__, "pos_y", pos_y)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "width", width)
if autosize_text is not None:
pulumi.set(__self__, "autosize_text", autosize_text)
if content is not None:
pulumi.set(__self__, "content", content)
if description is not None:
pulumi.set(__self__, "description", description)
if queries is not None:
pulumi.set(__self__, "queries", queries)
if transparent_background is not None:
pulumi.set(__self__, "transparent_background", transparent_background)
if visible_title is not None:
pulumi.set(__self__, "visible_title", visible_title)
@property
@pulumi.getter
def height(self) -> pulumi.Input[int]:
"""
Height of the panel. Min value: 1.
"""
return pulumi.get(self, "height")
@height.setter
def height(self, value: pulumi.Input[int]):
pulumi.set(self, "height", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the panel.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="posX")
def pos_x(self) -> pulumi.Input[int]:
"""
Position of the panel in the X axis. Min value: 0, max value: 23.
"""
return pulumi.get(self, "pos_x")
@pos_x.setter
def pos_x(self, value: pulumi.Input[int]):
pulumi.set(self, "pos_x", value)
@property
@pulumi.getter(name="posY")
def pos_y(self) -> pulumi.Input[int]:
"""
Position of the panel in the Y axis. Min value: 0.
"""
return pulumi.get(self, "pos_y")
@pos_y.setter
def pos_y(self, value: pulumi.Input[int]):
pulumi.set(self, "pos_y", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Kind of panel, must be either `timechart`, `number` or `text`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def width(self) -> pulumi.Input[int]:
"""
Width of the panel. Min value: 1, max value: 24.
"""
return pulumi.get(self, "width")
@width.setter
def width(self, value: pulumi.Input[int]):
pulumi.set(self, "width", value)
@property
@pulumi.getter(name="autosizeText")
def autosize_text(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the text will be autosized in the panel.
This field is ignored for all panel types except `text`.
"""
return pulumi.get(self, "autosize_text")
@autosize_text.setter
def autosize_text(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autosize_text", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
This field is required if the panel type is `text`. It represents the
text that will be displayed in the panel.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the panel.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def queries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardPanelQueryArgs']]]]:
"""
The PromQL query that will show information in the panel.
If the type of the panel is `timechart`, then it can be specified multiple
times, to have multiple metrics in the same graph.
If the type of the panel is `number` then only one can be specified.
This field is required if the panel type is `timechart` or `number`.
"""
return pulumi.get(self, "queries")
@queries.setter
def queries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardPanelQueryArgs']]]]):
pulumi.set(self, "queries", value)
@property
@pulumi.getter(name="transparentBackground")
def transparent_background(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the panel will have a transparent background.
This field is ignored for all panel types except `text`.
"""
return pulumi.get(self, "transparent_background")
@transparent_background.setter
def transparent_background(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "transparent_background", value)
@property
@pulumi.getter(name="visibleTitle")
def visible_title(self) -> Optional[pulumi.Input[bool]]:
"""
If true, | |
<gh_stars>1-10
"""Main entry file, definition of ColumnDT and DataTables."""
import sys
from collections import namedtuple
from logging import getLogger
from dateutil.parser import parse as date_parse
import datetime
from sqlalchemy.dialects import postgresql, mysql, sqlite
from sqlalchemy import func, Text, or_
import math
from psycopg2.extensions import adapt as sqlescape
from sqlalchemy.sql import compiler
from copy import copy, deepcopy
log = getLogger(__file__)
if sys.version_info > (3, 0):
unicode = str
def clean_regex(regex):
"""
Escape any regex special characters other than alternation.
:param regex: regex from datatables interface
:type regex: str
:rtype: str with regex to use with database
"""
# copy for return
ret_regex = regex
# these characters are escaped (all except alternation | and escape \)
# see http://www.regular-expressions.info/refquick.html
escape_chars = '[^$.?*+(){}'
# remove any escape chars
ret_regex = ret_regex.replace('\\', '')
# escape any characters which are used by regex
# could probably concoct something incomprehensible using re.sub() but
# prefer to write clear code with this loop
# note expectation that no characters have already been escaped
for c in escape_chars:
ret_regex = ret_regex.replace(c, '\\' + c)
# remove any double alternations until these don't exist any more
while True:
old_regex = ret_regex
ret_regex = ret_regex.replace('||', '|')
if old_regex == ret_regex:
break
# if last char is alternation | remove it because this
# will cause operational error
# this can happen as user is typing in global search box
while len(ret_regex) >= 1 and ret_regex[-1] == '|':
ret_regex = ret_regex[:-1]
# and back to the caller
return ret_regex
search_operators = {
'=': lambda expr, value: expr == value,
'>': lambda expr, value: expr > value,
'>=': lambda expr, value: expr >= value,
'<': lambda expr, value: expr < value,
'<=': lambda expr, value: expr <= value,
}
def parse_query_value(combined_value):
"""Parse value in form of '>value' to a lambda and a value."""
split = len(combined_value) - len(combined_value.lstrip('<>='))
operator = combined_value[:split]
if operator == '':
operator = '='
try:
operator_func = search_operators[operator]
except KeyError:
raise ValueError(
'Numeric query should start with operator, choose from %s'
% ', '.join(search_operators.keys()))
value = combined_value[split:].strip()
return operator_func, value
def numeric_query(expr, value):
operator_func, value = parse_query_value(value)
if value == '':
num_value = 0
else:
num_value = float(value)
return operator_func(expr, num_value)
def date_query(expr, value):
operator_func, value = parse_query_value(value)
try:
date_value = date_parse(value)
except ValueError:
date_value = datetime.datetime.now()
return operator_func(expr, date_value)
def yadcf_range_number(expr, value):
v_from, v_to = value.split('-yadcf_delim-')
v_from = float(v_from) if v_from != '' else -float('inf')
v_to = float(v_to) if v_to != '' else float('inf')
log.debug('yadcf_range_number: between %f and %f', v_from, v_to)
return expr.between(v_from, v_to)
def yadcf_range_date(expr, value):
v_from, v_to = value.split('-yadcf_delim-')
v_from = date_parse(v_from) if v_from != '' else datetime.date.min
v_to = date_parse(v_to) if v_to != '' else datetime.date.max
log.debug('yadcf_range_date: between %s and %s', v_from, v_to)
return expr.between(v_from, v_to)
def yadcf_multi_select(expr, value):
options = value.split('|')
log.debug('yadcf_multi_select: in %s', options)
return expr.cast(Text).in_(options)
search_methods = {
'none': lambda expr, value: None,
'string_contains': lambda expr, value: expr.ilike('%' + value + '%'),
'ilike': lambda expr, value: expr.ilike(value),
'like': lambda expr, value: expr.like(value),
'numeric': numeric_query,
'date': date_query,
'yadcf_text': lambda expr, value: expr.ilike('%' + value + '%'),
'yadcf_autocomplete': lambda expr, value: expr == value,
'yadcf_select': lambda expr, value: expr.ilike('%' + value + '%'),
'yadcf_multi_select': yadcf_multi_select,
'yadcf_range_number': yadcf_range_number,
'yadcf_range_number_slider': yadcf_range_number,
'yadcf_range_date': yadcf_range_date,
'regex': lambda expr, value, op : expr.op(op)(value)
}
ColumnTuple = namedtuple(
'ColumnDT',
['sqla_expr', 'column_name', 'mData', 'search_method',
'nulls_order', 'global_search'])
class InvalidParameter(Exception):
"""Class defining an invalid parameter exception."""
pass
class ColumnDT(ColumnTuple):
"""Class defining a DataTables Column with a ColumnTuple.
:param sqla_expr: SQLAlchemy queryable attribute of object
(column, column_property, hubrid property, or
combined expression)
:type sqla_expr: SQLAlchemy query expression
:param mData: name of the mData property as defined in the
DataTables javascript options (default None)
:type mData: str
:param search_method: Define how to interpret search values.
Possible values: 'none', 'contains', 'ilike', 'like',
'numeric'
:type search_method: str
:param nulls_order: define a sort order for the NULL values. Possible
values: nullsfirst, nullslast. (default None)
:type nulls_order: str
:param global_search: search this column for the global search box
:type global_search: bool
:returns: a ColumnDT object
"""
def __new__(cls, sqla_expr, column_name=None, mData=None,
search_method='string_contains', nulls_order=None,
global_search=True):
"""Set default values for mData and filter.
On creation, sets default None values for mData and string value for
filter (cause: Object representation is not JSON serializable).
"""
# check if allowed value
if nulls_order and nulls_order not in ['nullsfirst', 'nullslast']:
raise ValueError('`%s` is not an allowed value for nulls_order.'
% nulls_order)
if search_method not in search_methods:
raise ValueError('`%s` is not an allowed value for search_method.'
% search_method)
return super(ColumnDT, cls).__new__(
cls, sqla_expr, column_name, mData, search_method,
nulls_order, global_search)
class DataTables:
"""Class defining a DataTables object.
:param request: request containing the GET values, specified by the
datatable for filtering, sorting and paging
:type request: pyramid.request
:param query: the query wanted to be seen in the the table
:type query: sqlalchemy.orm.query.Query
:param columns: columns specification for the datatables
:type columns: list
:returns: a DataTables object
"""
def __init__(self, request, query, columns,
allow_regex_searches=False):
"""Initialize object and run the query."""
self.params = dict(request)
if 'sEcho' in self.params:
raise ValueError(
'Legace datatables not supported, upgrade to >=1.10')
self.query = query
self.filtered_query = copy(query)
self.columns = columns
self.results = None
self.allow_regex_searches = allow_regex_searches
# total in the table after filtering
self.cardinality_filtered = 0
# total in the table unfiltered
self.cardinality = 0
self.yadcf_params = []
self.filter_expressions = []
self.error = None
print("try:")
try:
self.run()
except Exception as exc:
self.error = str(exc)
def output_result(self):
"""Output results in the format needed by DataTables."""
output = {}
output['draw'] = str(int(self.params['draw']))
output['recordsTotal'] = str(self.cardinality)
output['recordsFiltered'] = str(self.cardinality_filtered)
if self.error:
output['error'] = self.error
return output
output['data'] = self.results
for k, v in self.yadcf_params:
output[k] = v
return output
def _query_with_all_filters_except_one(self, query, exclude):
return query.filter(
*[e for i, e in enumerate(self.filter_expressions)
if e is not None and i is not exclude]
)
def _set_yadcf_data(self, query):
# determine values for yadcf filters
for i, col in enumerate(self.columns):
if col.search_method in 'yadcf_range_number_slider':
v = query.add_columns(
func.min(col.sqla_expr),
func.max(col.sqla_expr)
).one()
self.yadcf_params.append((
'yadcf_data_{:d}'.format(i),
(math.floor(v[0]), math.ceil(v[1])))
)
if col.search_method in ['yadcf_select', 'yadcf_multi_select',
'yadcf_autocomplete']:
filtered = self._query_with_all_filters_except_one(
query=query, exclude=i)
v = filtered.add_columns(col.sqla_expr).distinct().all()
self.yadcf_params.append(
('yadcf_data_{:d}'.format(i), [r[0] for r in v]))
def run(self):
"""Launch filtering, sorting and paging to output results."""
query = self.query
# count before filtering
# self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()
self._set_column_filter_expressions()
self._set_global_filter_expression()
self._set_sort_expressions()
self._set_yadcf_data(query)
# apply filters
query = query.filter(
*[e for e in self.filter_expressions if e is not None])
self.filtered_query = deepcopy(query)
# self.cardinality_filtered = query.add_columns(
# self.columns[0].sqla_expr).count()
# apply sorts
query = query.order_by(
*[e for e in self.sort_expressions if e is not None])
# add paging options
length = int(self.params.get('length'))
if length >= 0:
query = query.limit(length)
elif length == -1:
pass
else:
raise(ValueError(
'Length should be a positive integer or -1 to disable'))
query = query.offset(int(self.params.get('start')))
# add columns to query
query = query.add_columns(
*[c.sqla_expr for c in self.columns])
self.filtered_query = self.filtered_query.add_columns(
*[c.sqla_expr for c in self.columns])
self.query = query
# fetch the result of the queries
column_names = [col.mData if col.mData else str(i)
for i, col in enumerate(self.columns)]
# self.results = [{k: v for k, v in zip(
# column_names, row)} for row in query.all()]
def _set_column_filter_expressions(self):
"""Construct the query: filtering.
Add filtering when per column searching is used.
"""
# per columns filters:
for i in range(len(self.columns)):
filter_expr = None
value = self.params.get(
'columns[{:d}][search][value]'.format(i), '')
if value:
search_func = search_methods[self.columns[i].search_method]
if self.columns[i].search_method == 'regex':
filter_expr = search_func(self.columns[i].sqla_expr, value, self._get_regex_operator())
else:
filter_expr = search_func(self.columns[i].sqla_expr, value)
self.filter_expressions.append(filter_expr)
def _set_global_filter_expression(self):
# global search filter
global_search = self.params.get('search[value]', '')
if global_search is '':
return
if (self.allow_regex_searches and
self.params.get('search[regex]') == 'true'):
op = self._get_regex_operator()
val = clean_regex(global_search)
def filter_for(col):
return col.sqla_expr.op(op)(val)
else:
val = '%' + global_search + '%'
def filter_for(col):
return col.sqla_expr.cast(Text).ilike(val)
global_filter = [filter_for(col)
for col in self.columns if col.global_search]
self.filter_expressions.append(or_(*global_filter))
def _set_sort_expressions(self):
"""Construct the query: sorting.
Add sorting(ORDER BY) on the columns needed to be applied on.
"""
sort_expressions = []
i = 0
while self.params.get('order[{:d}][column]'.format(i), False):
column_nr = int(self.params.get('order[{:d}][column]'.format(i)))
column = self.columns[column_nr]
direction = self.params.get('order[{:d}][dir]'.format(i))
sort_expr = column.sqla_expr
if direction == 'asc':
sort_expr = sort_expr.asc()
elif direction | |
<filename>cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_asr9k_lc_ethctrl_oper.py
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Config' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Config',
False,
[
_MetaInfoClassMember('duplex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' duplex
''',
'duplex',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('loopback', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' loopback
''',
'loopback',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('my-pause', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' myPause
''',
'my_pause',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('pause', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' pauseEn
''',
'pause',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('speed', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' speed
''',
'speed',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'config',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Phy' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Phy',
False,
[
_MetaInfoClassMember('reg', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '65535')], [],
''' reg
''',
'reg',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False, max_elements=32),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'phy',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Serdes' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Serdes',
False,
[
_MetaInfoClassMember('reg', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '65535')], [],
''' reg
''',
'reg',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False, max_elements=32),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'serdes',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Mac' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Mac',
False,
[
_MetaInfoClassMember('reg', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '65535')], [],
''' reg
''',
'reg',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False, max_elements=32),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'mac',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus',
False,
[
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Config',
[], [],
''' Configuration Data
''',
'config',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('mac', REFERENCE_CLASS, 'Mac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Mac',
[], [],
''' MAC Registers
''',
'mac',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('mac-valid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' MAC data valid
''',
'mac_valid',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('phy', REFERENCE_CLASS, 'Phy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Phy',
[], [],
''' PHY Registers
''',
'phy',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('phy-valid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' PHY data valid
''',
'phy_valid',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('port-num', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Port Number
''',
'port_num',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('serdes', REFERENCE_CLASS, 'Serdes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus.Serdes',
[], [],
''' SERDES Registers
''',
'serdes',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('serdes-valid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SERDES data valid
''',
'serdes_valid',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'port-status',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber',
False,
[
_MetaInfoClassMember('number', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' port number
''',
'number',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', True),
_MetaInfoClassMember('port-status', REFERENCE_CLASS, 'PortStatus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber.PortStatus',
[], [],
''' mlan port status info
''',
'port_status',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'port-status-number',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortStatusNumbers' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortStatusNumbers',
False,
[
_MetaInfoClassMember('port-status-number', REFERENCE_LIST, 'PortStatusNumber' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.PortStatusNumbers.PortStatusNumber',
[], [],
''' Number
''',
'port_status_number',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'port-status-numbers',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg1' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg1',
False,
[
_MetaInfoClassMember('reg', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '65535')], [],
''' reg
''',
'reg',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False, max_elements=32),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'sw-reg-1',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg2' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg2',
False,
[
_MetaInfoClassMember('reg', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '65535')], [],
''' reg
''',
'reg',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False, max_elements=32),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'sw-reg-2',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwStatus' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwStatus',
False,
[
_MetaInfoClassMember('cpu-mac', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' cpu mac
''',
'cpu_mac',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('cpu-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' cpu port
''',
'cpu_port',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('initialized', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' initialized
''',
'initialized',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('mac', ATTRIBUTE, 'str' , None, None,
[(0, 6)], [],
''' mac
''',
'mac',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' mtu
''',
'mtu',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('ppu', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ppu
''',
'ppu',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('restarted', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' restarted
''',
'restarted',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'sw-status',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus',
False,
[
_MetaInfoClassMember('rate-limit', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' CPU Interface Rate Limit
''',
'rate_limit',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('sw-reg-1', REFERENCE_CLASS, 'SwReg1' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg1',
[], [],
''' Switch Global Registers
''',
'sw_reg_1',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('sw-reg-2', REFERENCE_CLASS, 'SwReg2' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwReg2',
[], [],
''' Switch Global Registers
''',
'sw_reg_2',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('sw-status', REFERENCE_CLASS, 'SwStatus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus.SwStatus',
[], [],
''' Switch Status Data
''',
'sw_status',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'switch-status',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.SwitchStatusTable' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.SwitchStatusTable',
False,
[
_MetaInfoClassMember('switch-status', REFERENCE_CLASS, 'SwitchStatus' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper', 'Mlan.Nodes.Node.SwitchStatusTable.SwitchStatus',
[], [],
''' mlan switch status info
''',
'switch_status',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
],
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper',
'switch-status-table',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-lc-ethctrl-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lc_ethctrl_oper'
),
},
'Mlan.Nodes.Node.PortCountersNumbers.PortCountersNumber.PortCounters.MlanStats' : {
'meta_info' : _MetaInfoClass('Mlan.Nodes.Node.PortCountersNumbers.PortCountersNumber.PortCounters.MlanStats',
False,
[
_MetaInfoClassMember('collisions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' collisions
''',
'collisions',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('deferred', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' deferred
''',
'deferred',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('excessive', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' excessive
''',
'excessive',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-bad-octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inBadOctets
''',
'in_bad_octets',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-bcast-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inBcastPkt
''',
'in_bcast_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-discards', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inDiscards
''',
'in_discards',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-fcs-err', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inFcsErr
''',
'in_fcs_err',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-filtered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inFiltered
''',
'in_filtered',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-fragments', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inFragments
''',
'in_fragments',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-good-octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inGoodOctets
''',
'in_good_octets',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-good-octets-hi', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inGoodOctets hi
''',
'in_good_octets_hi',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-jabber', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inJabber
''',
'in_jabber',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-mcast-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inMcastPkt
''',
'in_mcast_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-oversize', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inOversize
''',
'in_oversize',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-pause-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inPausePkt
''',
'in_pause_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-rx-err', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inRxErr
''',
'in_rx_err',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-undersize-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inUndersizePkt
''',
'in_undersize_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('in-unicast-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' inUnicastPkt
''',
'in_unicast_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('late', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' late
''',
'late',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('multiple', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' multiple
''',
'multiple',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-bcast-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outBcastPkt
''',
'out_bcast_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-fcs-err', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outFcsErr
''',
'out_fcs_err',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-filtered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outFiltered
''',
'out_filtered',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-mcast-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outMcastPkt
''',
'out_mcast_pkt',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outOctets
''',
'out_octets',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-octets-hi', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' outOctets hi
''',
'out_octets_hi',
'Cisco-IOS-XR-asr9k-lc-ethctrl-oper', False),
_MetaInfoClassMember('out-pause-pkt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' | |
zeff, zorig, rel,
nr, r, r2, dl, q0, xm1, xm2, njrc, vi):
'''setqmm subroutine'''
c = 137.038
alpha = rel / c
aa = alpha * alpha
a2 = aa / 2.
lp = l + 1
lpx = lp
if lp > 4:
lpx = 4
lp2 = l + l + 1
if lp2 > 7:
lp2 = 7
zeff = zorig
if njrc[lpx] > 0:
zeff = 0.
zaa = zeff * aa
za2 = zeff * a2
if idoflag:
if not njrc[lpx]:
if idoflag == 1:
for j in range(1, nr + 1):
v[j] = - zeff / r[j] + orb[j][i]
for j in range(2, nr - 1):
dvdl = (orb[j + 1][i] - orb[j - 1][i]) / (2. * dl)
ddvdrr = ((orb[j + 1][i] + orb[j - 1][i] - 2.
* orb[j][i]) / (dl * dl) - dvdl) / r2[j]
xm1[j] = -a2 * dvdl / r[j] - za2 / r2[j]
xm2[j] = -a2 * ddvdrr + zaa / r2[j] / r[j]
xm1[nr] = xm1[nr - 1]
xm2[nr] = xm2[nr - 1]
xm1[1] = xm1[2] + za2 / r2[2] - za2 / r2[1]
xm2[1] = xm2[2] - zaa / r2[2] / r[2] + zaa / r2[1] / r[1]
else:
if idoflag == 1:
for j in range(1, nr + 1):
v[j] = vi[j][lp2] + orb[j][i]
for j in range(2, nr - 1 + 1):
dvdl = (v[j + 1] - v[j - 1]) / (2. * dl)
ddvdrr = ((v[j + 1] + v[j - 1] - 2. * v[j])
/ (dl * dl) - dvdl) / r2[j]
xm1[j] = -a2 * dvdl / r[j]
xm2[j] = -a2 * ddvdrr
xm1[nr] = xm1[nr - 1]
xm2[nr] = xm2[nr - 1]
xm1[1] = xm1[2]
xm2[1] = xm2[2]
# figure out the (Desclaux-Numerov) effective potential.
xlb = l + pow(0.5, 2.) / 2.
for j in range(1, nr + 1):
vj = v[j]
q0[j] = vj * (1. - a2 * vj) + xlb / r2[j]
return (i, orb, l, ns, idoflag, v, zeff, zorig, rel,
nr, r, r2, dl, q0, xm1, xm2, njrc, vi)
def initiali(zorig, nr, rmin, rmax, r, dr, r2, dl, njrc=[None] * 4,
xntot=0., nel=0, input_stream='stdin'):
'''
Description
-----------
Initialise the radial charge grid
Parameters
----------
zorig : float
nr : int
Number of radial grid points
rmin : float
Minimum radius
rmax : float
Maximum radius
r : list
Dummy list of radii
dr : list
Dummy list to be populated
r2 : list
Dummy list to be populated
dl : float
Dummy float
njrc : list
Dummy list to be populated
xntot :
nel : int
Returns
-------
tuple : (zorig, nr, rmin, rmax, r, dr, r2, dl, njrc, xntot, nel)
'''
if input_stream == 'stdin':
(zorig, nr) = [t(s) for t, s in zip((float, int),
get_input('Enter Z, NR: ').split())]
elif isinstance(input_stream, file):
(zorig, nr) = [t(s) for t, s in zip((float, int),
input_stream.next().split('!')[0].split())]
else:
raise IOError("input stream is not a file handle or 'stdin'")
rmin = 0.0001 / zorig
rmax = 800. / sqrt(zorig)
nr, rmin, rmax, r, dr, r2, dl = setgrid(nr, rmin, rmax, r, dr, r2, dl)
njrc[j] = [0 for j in range(len(njrc))]
return (zorig, nr, rmin, rmax, r, dr, r2, dl, njrc, xntot, nel)
def setgrid(nr, rmin, rmax, r, dr, r2, dl):
'''Set the radial grid values'''
ratio = rmax / rmin
dl = log(ratio) / float(nr)
xratio = exp(dl)
xr1 = sqrt(xratio) - sqrt(1. / xratio)
for i in range(len(r)):
r[i] = pow(rmin * xratio, i)
dr[i] = r[i] * xr1
r2[i] = r[i] * r[i]
return (nr, rmin, rmax, r, dr, r2, dl)
def integ(e, l, xkappa, n, nn, istop, ief, x0, phi, z, v, q0, xm1,
xm2, nr, r, dr, r2, dl, rel):
'''integrate out count nodes'''
dl2 = dl * dl / 12.
dl5 = 10. * dl2
c = 137.038
alpha = rel / c
za2 = z * z * alpha * alpha
a2 = alpha * alpha / 2.
xl = l
xlp = l + 1
xl2 = 0.5 + xl
xl4 = xl2 * xl2
# we set up the leading power.
# adjust for Desclaux's implementation of Numerov.
if rel == 0.:
ss = xlp
else:
rtest = 1. - za2
if rtest < 0.:
print('Z>137 is too big.')
sys.exit(1)
ss = sqrt(rtest)
ss2 = ss - 0.5
# we shall set ief to -1 if energy is too low, +1 if too high.
ief = 0
# see Desclaux and documentation to see the origin of the below equations.
# here, we set up the first two points.
t = e - v(1)
xm0 = 1. + a2 * t
tm = xm0 + xm0
xmx = xm1[1] / xm0
xk0 = r2[1] * (tm * t - xmx * (xkappa / r[1] + 0.75 * xmx)
+ xm2[1] / tm) - xl4
dk0 = 1. + dl2 * xk0
p0 = dk0
phi[1] = p0 * sqrt(xm0 * r[1]) / dk0
t = e - v[2]
xm = 1. + a2 * t
tm = xm + xm
xmx = xm1[2] / xm
xk2 = r2[2] * (tm * t - xmx * (xkappa / r[2] + 0.75 * xmx)
+ xm2[2] / tm) - xl4
dk2 = 1. + dl2 * xk2
p1 = dk2 * pow((r[2] / r[1], ss2) - (r[2] - r[1]) * z
/ xlp) * sqrt(xm0 / xm)
phi[2] = p1 * sqrt(xm * r[2]) / dk2
# if istop is set, the we know to stop there. If it is zero, it shall
# be set to the classical turning point.
is0 = istop
if not istop:
for j in range(nr - 1, 2 - 1, -1):
if e > v[j]:
break
ief = -1
return ief
istop = j
# initialize number of nodes, and determine the ideal number.
nn = 0
nnideal = n - l - 1
# integrate out count nodes, and stop along the way if there are too many
for i in range(3, istop + 2 + 1):
t = e - v[i]
xm = 1. + a2 * t
tm = xm + xm
xmx = xm1[i] / xm
p2 = (2. - dl5 * xk2) * p1 / dk2 - p0
xk2 = r2[i] * (tm * t - xmx * (xkappa / r[i] + 0.75 * xmx)
+ xm2[i] / tm) - xl4
dk2 = 1. + dl2 * xk2
phi[i] = p2 * sqrt(xm * r[i]) / dk2
if abs(p2) > 10000000000.:
for j in range(1, i + 1):
phi[j] /= p2
p0 /= p2
p1 /= p2
p2 /= p2
if p2 * p1 < 0.:
nn += 1
if nn > nnideal:
ief = 1
return ief
p0 = p1
p1 = p2
if istop > 0:
psip2 = (phi[istop + 2] - phi[istop - 2])
psip1 = (phi[istop + 1] - phi[istop - 1])
psip = (8. * psip1 - psip2) / (12. * dl * r[istop])
x0 = psip / phi[istop]
if not is0:
return
for i in range(istop + 3, nr - 1 + 1):
t = e - v[i]
xm = 1. + a2 * t
tm = xm + xm
xmx = xm1[i] / xm
p2 = (2. - dl5 * xk2) * p1 / dk2 - p0
if p2 / p1 > 1.:
ief = -1
return ief
xk2 = r2[i] * (tm * t - xmx * (xkappa / | |
<reponame>wawzat/pa-get-data<gh_stars>1-10
# Get PurpleAir sensor data from PurpleAir and Thingspeak API's for year-month and store as csv files
# similar to downloaded CSV's from PurpleAir map list.
# Rename config_template py to config.py and edit to include PurpleAir keys, directory path and bounding box variables.
# <NAME> 20210120
# Todo: Handle daylight savings offset in transition months
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import json
import os
import sys
import io
import math
import config
import argparse
import pandas as pd
import calendar
from datetime import datetime
import pytz
from timezonefinder import TimezoneFinder
from dateutil.relativedelta import relativedelta
if sys.platform == 'win32':
import winsound
# Initialize your own logger
logger = logging.getLogger('pa_get_data')
logger.setLevel(logging.DEBUG)
# Silence other loggers
for log_name, log_obj in logging.Logger.manager.loggerDict.items():
if log_name != 'pa_get_data':
log_obj.disabled = True
#logger.setLevel(format='%(levelname)s:%(message)s', level=logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s \n %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Change this variable to point to the desired directory variable in config.py.
data_directory = config.matrix5
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def get_arguments():
parser = argparse.ArgumentParser(
description='Dowload PurpleAir csv files for sensors in bounding box during year and month provided.',
prog='pa_get_data',
usage='%(prog)s [-b <bbox>], [-y <year>], [-m <month>], [-i <interval>], [-c <channel>], [-g <group>], [-a], [r], [f <folder>}, [-s <start>}, [-e <end>]',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
g=parser.add_argument_group(title='arguments',
description=''' -b, --bbox bbox format: Lower_Left_lon Lower_Left_lat Upper_Right_lon Upper_Right_lat
-y, --year optional. year to get data for.
-m --month optional. month to get data for.
-i --interval optional. data average interval. minutes. default 2 min
-c --channel channel to get (a or b). default a.
-g --group group to get (primary or secondary). default p.
-a --all get all channels and groups.
-t --type optional. choices = i, o, b. i.e., (i)ndoor, (o)outdoor, (b)oth. default o.
-r --regional get data for regions defined in config.py bbox_dict.
-f --folder folder prefex to save data in
-s --start optional. start date. format "YYYY-MM-DD HH:MM:SS" include quotes.
-e --end optional. end date. format "YYYY-MM-DD HH:MM:SS" include quotes ''')
g.add_argument('-b', '--bbox',
type=float,
nargs=4,
dest='bbox',
default=False,
help=argparse.SUPPRESS)
g.add_argument('-y', '--year',
type=int,
default=0,
dest='yr',
help=argparse.SUPPRESS)
g.add_argument('-m', '--month',
type=int,
default=0,
dest='mnth',
help=argparse.SUPPRESS)
g.add_argument('-i', '--interval',
type=str,
default = '2',
dest='interval',
help=argparse.SUPPRESS)
g.add_argument('-c', '--channel',
type=str,
dest='channel',
default='a',
choices=['a', 'b'],
help=argparse.SUPPRESS)
g.add_argument('-g', '--group',
type=str,
dest='group',
default='p',
choices=['p', 's'],
help=argparse.SUPPRESS)
g.add_argument('-a', '--all',
action='store_true',
dest='all',
help=argparse.SUPPRESS)
g.add_argument('-t', '--type',
type=str,
default='o',
choices = [
'i', 'o', 'b'
],
metavar='',
dest='sensor_type',
help=argparse.SUPPRESS)
g.add_argument('-r', '--regional',
action='store_true',
dest='regional',
help=argparse.SUPPRESS),
g.add_argument('-f', '--folder',
type=str,
dest='folder',
default=False,
help=argparse.SUPPRESS)
g.add_argument('-s', '--startdate',
type=valid_date,
help=argparse.SUPPRESS)
g.add_argument('-e', '--enddate',
type=valid_date,
help=argparse.SUPPRESS)
args = parser.parse_args()
return(args)
def get_sensor_indexes(bbox, sensor_type):
indoor = 1
outdoor = 0
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
root_url = "https://api.purpleair.com/v1/sensors"
#bbox = config.bbox
#sensor_index is returned automatically and doesn't need to be included in params fields
params = {
'fields': "name,latitude,longitude,primary_id_a,primary_key_a,primary_id_b,primary_key_b,secondary_id_a,secondary_key_a,secondary_id_b,secondary_key_b,location_type",
'nwlng': bbox[0],
'selat': bbox[1],
'selng': bbox[2],
'nwlat': bbox[3]
}
url_template = root_url + "?fields={fields}&nwlng={nwlng}&nwlat={nwlat}&selng={selng}&selat={selat}"
url = url_template.format(**params)
try:
sensor_ids = []
header = {"X-API-Key":config.PURPLEAIR_READ_KEY}
response = session.get(url, headers=header)
if response.status_code == 200:
sensors_data = json.loads(response.text)
for sensor_list in sensors_data['data']:
#print(sensor_list)
if sensor_type == 'o' or sensor_type == 'b':
if sensor_list[12] == outdoor:
sensor_ids.append(sensor_list)
if sensor_type == 'i' or sensor_type == 'b':
if sensor_list[12] == indoor:
sensor_ids.append(sensor_list)
print(" ")
logger.debug(sensor_ids)
print(" ")
return sensor_ids
else:
print("error no 200 response.")
except Exception as e:
print(e)
def date_range(start, end, intv):
diff = (end - start ) / intv
for i in range(intv):
yield (start + diff * i).strftime("%Y%m%d")
yield end.strftime("%Y%m%d")
def get_ts_data(args, groups_channels, request_num, sensor_ids, data_directory, interval, yr, mnth, startdate, enddate, channel, group, suffix, folder):
num_sensors = len(sensor_ids)
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
for sensor in sensor_ids:
sensor_name = sensor[1]
latitude = sensor[2]
longitude = sensor[3]
tf = TimezoneFinder()
data_tz = tf.timezone_at(lng=longitude, lat=latitude)
#print(data_tz)
if group == 'p':
group_str = "Primary"
elif group == 's':
group_str = "Secondary"
if channel == 'a':
channel_str = 'a'
elif channel == 'b':
channel_str = 'b'
end_date = enddate
if args.yr != 0 or args.mnth !=0:
tz_offset = int(pytz.timezone(data_tz).localize(datetime(yr,mnth,1)).strftime('%z')) * -1 // 100
time_offset = '0' + str(tz_offset) + ':00:00'
# returns a tuple (first_day_of_month, last_day_of_month)
mnth_range = calendar.monthrange(yr, mnth)
start_date_str = str(yr) + "-" + str(mnth) + "-" + "01"
start_date = datetime.strptime(start_date_str, "%Y-%m-%d")
#end_date_str = str(yr) + "-" + str(mnth+1) + "-" + '1'
#end_date = datetime.strptime(end_date_str, "%Y-%m-%d")
end_date = start_date + relativedelta(months=+1)
delta = end_date - start_date
rows = delta.days * 24 * 60 / int(interval)
intv = int(math.ceil(rows / 7800))
if intv < 1:
intv = 1
filename_template = '{sensor_name} ({lat} {lon}) {group} {mnth}_{first_day}_{yr} {mnth}_{last_day}_{yr}_{channel}.csv'
params = {
'sensor_name': sensor_name,
'lat': str(latitude),
'lon': str(longitude),
'group': group_str,
'mnth': str(mnth),
'first_day': "01",
'last_day': str(mnth_range[1]),
'yr': str(yr),
'channel': channel_str
}
filename = filename_template.format(**params)
elif startdate and enddate:
delta = enddate - startdate
rows = delta.days * 24 * 60 / int(interval)
intv = int(math.ceil(rows / 7800))
if intv < 1:
intv = 1
yr = startdate.year
mnth = startdate.month
tz_offset = int(pytz.timezone(data_tz).localize(datetime(yr,mnth,1)).strftime('%z')) * -1 // 100
time_offset = '0' + str(tz_offset) + ':00:00'
start_date = startdate
if enddate > datetime.now():
enddate = datetime.now()
filename_template = '{sensor_name} ({lat} {lon}) {group} {first_date} {last_date}_{channel}.csv'
params = {
'sensor_name': sensor_name,
'lat': str(latitude),
'lon': str(longitude),
'group': group_str,
'first_date': startdate.strftime('%Y%m%d'),
'last_date': enddate.strftime('%Y%m%d'),
'channel': channel_str
}
filename = filename_template.format(**params)
# remove forbidden characters in filename
bad_filename_chars = dict((ord(char), None) for char in '\/*?:"<>|')
filename = filename.translate(bad_filename_chars)
data_range = list(date_range(start_date, end_date, intv))
#output_folder = start_date.strftime('%Y-%m')
if folder:
output_folder = folder + " " + start_date.strftime('%Y-%m') + suffix
elif args.yr != 0 or args.mnth != 0:
output_folder = start_date.strftime('%Y-%m') + suffix
else:
output_folder = start_date.strftime('%Y-%m') + '_' + end_date.strftime('%Y-%m') + suffix
output_path = data_directory + os.path.sep + output_folder
if not os.path.isdir(output_path):
os.mkdir(output_path)
output_pathname = output_path + os.path.sep + filename
for t in range(0, intv):
request_num += 1
root_url = 'https://api.thingspeak.com/channels/{channel}/feeds.csv?api_key={api_key}&start={start}%20{offset}&end={end}%20{offset}&average={average}'
if channel == 'a' and group == 'p':
channel_id = sensor[4] # primary channel A
api_key = sensor[5] # primary channel A
elif channel == 'b' and group == 'p':
channel_id = sensor[6] # primary channel B
api_key = sensor[7] # primary channel B
start_date = data_range[t]
if channel == 'a' and group == 's':
channel_id = sensor[8] # secondary channel A
api_key = sensor[9] # secondary channel A
elif channel == 'b' and group == 's':
channel_id = sensor[10] # secondary channel B
api_key = sensor[11] # secondary channel B
end_date = data_range[t+1]
params = {
'channel': channel_id,
'api_key': api_key,
'start': start_date,
'end': end_date,
'offset': time_offset,
'average': interval
}
url = root_url.format(**params)
print(f"{yr}-{mnth} {request_num} of {num_sensors * groups_channels * intv} : {url}")
if t == 0:
response = session.get(url)
url_data = response.content
df = pd.read_csv(io.StringIO(url_data.decode('utf-8')))
else:
response = session.get(url)
url_data = response.content
df = pd.concat([df, pd.read_csv(io.StringIO(url_data.decode('utf-8')))])
if channel == 'a' and group == 'p':
mapping = {
'created_at': 'created_at',
'entry_id': 'entry_id',
'field1': 'PM1.0_CF1_ug/m3',
'field2': 'PM2.5_CF1_ug/m3',
'field3': 'PM10.0_CF1_ug/m3',
'field4': 'UptimeMinutes',
'field5': 'RSSI_dbm',
'field6': 'Temperature_F',
'field7': 'Humidity_%',
'field8': 'PM2.5_ATM_ug/m3'
}
elif channel == 'b' and group == 'p':
mapping = {
'created_at': 'created_at',
'entry_id': 'entry_id',
'field1': 'PM1.0_CF1_ug/m3',
'field2': 'PM2.5_CF1_ug/m3',
'field3': 'PM10.0_CF1_ug/m3',
'field4': 'Free_Mem',
'field5': 'ADC',
'field6': 'Pressure_hpa',
'field7': 'IAQ',
'field8': 'PM2.5_ATM_ug/m3'
}
if channel == 'a' and group == 's':
mapping = {
'created_at': 'created_at',
'entry_id': 'entry_id',
'field1': '>=0.3um/dl',
'field2': '>=0.5um/dl',
'field3': '>=1.0um/dl',
'field4': '>=2.5um/dl',
'field5': '>=5.0um/dl',
'field6': '>=10.0um/dl',
'field7': 'PM1.0_ATM_ug/m3',
'field8': 'PM10_ATM_ug/m3'
}
if channel == 'b' and group == 's':
mapping = {
'created_at': 'created_at',
'entry_id': 'entry_id',
'field1': '>=0.3um/dl',
'field2': '>=0.5um/dl',
'field3': '>=1.0um/dl',
'field4': '>=2.5um/dl',
'field5': '>=5.0um/dl',
'field6': '>=10.0um/dl',
'field7': 'PM1.0_ATM_ug/m3',
'field8': 'PM10_ATM_ug/m3'
}
df = df.rename(columns=mapping)
#print(" ")
#print(df)
if not df.empty:
df.to_csv(output_pathname, index=False, header=True)
return request_num
#Main
args = get_arguments()
#yrs = [2018, 2019, 2020]
#args.channel = 'b'
#args.group = 's'
#for yr in yrs:
#mnth = 1
#while mnth <= 12:
#get_ts_data(sensor_ids, data_directory, yr, mnth, args.channel, args.group)
#mnth += 1
if args.regional:
for key, value in config.bbox_dict.items():
#print(value[0])
groups_channels = len(groups) + len(channels)
sensor_ids = get_sensor_indexes(value[0], args.sensor_type)
request_num = 0
if args.all:
channels = ['a', 'b']
groups | |
<gh_stars>1-10
# replicas.models
# Database models for the replicas app.
#
# Author: <NAME> <<EMAIL>>
# Created: Wed Jun 06 22:20:21 2018 -0400
#
# ID: models.py [] <EMAIL> $
"""
Database models for the replicas app.
"""
##########################################################################
## Imports
##########################################################################
from django.db import models
from django.urls import reverse
from django.db import transaction
from model_utils import FieldTracker
from model_utils.models import TimeStampedModel
from datetime import timedelta
from .utils import Health, utcnow
from .managers import ReplicaManager
from .utils import ONLINE_THRESHOLD, OFFLINE_THRESHOLD
##########################################################################
## Replica Object
##########################################################################
class Replica(TimeStampedModel):
"""
A replica represents a single distributed process running on a host machine
managed by Kahu. Unlike previous versions of Kahu, there is no disambiguation
between machine and replica (so if multiple processes are running on the
same machine, there will be data duplication). However, because for the
most part there is a one-to-one relationship between replica process and
host machine (particularly when running on virtual instances in the cloud),
this duplication is acceptable.
"""
name = models.SlugField(
max_length=255, null=False, blank=True, unique=True,
help_text="unique name of the replica (hostname-pid) by default"
)
precedence = models.PositiveSmallIntegerField(
default=0, blank=True,
help_text="the precedence of the replica over other replicas (PID)"
)
hostname = models.CharField(
max_length=255, null=True, blank=True,
help_text="identifying name of the host machine"
)
domain = models.CharField(
max_length=255, null=True, blank=True, default="",
help_text="domain name for the specified host if supplied"
)
ip_address = models.GenericIPAddressField(
null=False, blank=False, verbose_name="IP Address",
help_text="the external IP address replicas can connect to"
)
port = models.IntegerField(
default=3264, blank=False, null=False,
help_text="the port the replica is listening for consensus on"
)
description = models.TextField(
max_length=512, blank=True,
help_text="describe the replica or machine resources and host"
)
location = models.ForeignKey(
'replicas.Location', null=True, blank=True,
on_delete=models.PROTECT, related_name='replicas',
help_text="the GeoIP location of the replica",
)
active = models.BooleanField(
default=True,
help_text="the replica is currently under management"
)
last_seen = models.DateTimeField(
editable=False, null=True,
help_text="the time of the last ping from the replica"
)
api_key = models.CharField(
max_length=45, null=False, editable=False, unique=True,
help_text="generated API key that identifies a host"
)
# Replicas manager
objects = ReplicaManager()
# Field Tracker (to detect changes, e.g. with IP address)
tracker = FieldTracker()
class Meta:
db_table = "replicas"
get_latest_by = "last_seen"
ordering = ("precedence", "last_seen")
@property
def address(self):
"""
Composes the address of the replica using domain first, then ipaddr.
"""
# TODO: better domain/IP address handling
return "{}:{}".format(self.ip_address, self.port)
def health(self):
"""
Returns the status of the replica based on the last seen date
"""
if not self.last_seen:
return Health.UNKNOWN
delta = utcnow() - self.last_seen
if delta.total_seconds() <= ONLINE_THRESHOLD:
return Health.ONLINE
if delta.total_seconds() <= OFFLINE_THRESHOLD:
return Health.UNRESPONSIVE
return Health.OFFLINE
def get_absolute_url(self):
return reverse('replica-detail', kwargs={'slug': self.name})
def __str__(self):
return "{} ({}:{})".format(self.name, self.ip_address, self.port)
##########################################################################
## GeoIP Location
##########################################################################
class Location(TimeStampedModel):
"""
Represents a location created by a GeoIP lookup, associated with the
location of the replica's IP Address. These are combined into a normalized
table because we expect many replicas to be colocated.
"""
location = models.CharField(
max_length=255, null=True, blank=True, default="",
help_text="text description of the location (e.g. city, state)"
)
latitude = models.DecimalField(
max_digits=10, decimal_places=6, null=False, blank=False,
help_text="latitude of the location"
)
longitude = models.DecimalField(
max_digits=10, decimal_places=6, null=False, blank=False,
help_text="longitude of the location"
)
class Meta:
db_table = "locations"
get_latest_by = "modified"
unique_together = ("latitude", "longitude")
def get_marker(self):
"""
Returns a JSON representation of the marker for Google Maps.
"""
return {
"lat": float(self.latitude),
"lng": float(self.longitude),
"title": self.location,
"replicas": [
replica.hostname for replica in self.replicas.active().all()
]
}
def __str__(self):
if self.location:
return self.location
return "{}, {}".format(self.latitude, self.longitude)
##########################################################################
## Ping Latencies
##########################################################################
class Latency(TimeStampedModel):
"""
Stores an online computation of the distribution of latencies between two
replicas over time. The distribution can be updated by a single ping delay
and all distributed statistics will be recomputed.
"""
source = models.ForeignKey(
'replicas.Replica', null=False, blank=False,
on_delete=models.CASCADE, related_name='latencies',
help_text="the replica where the ping originates",
)
target = models.ForeignKey(
'replicas.Replica', null=False, blank=False,
on_delete=models.CASCADE, related_name='+',
help_text="the target of the ping to measure RTT to",
)
messages = models.BigIntegerField(
default=0, null=False, blank=False,
help_text="the number of successful pings (excludes timeouts)"
)
timeouts = models.BigIntegerField(
default=0, null=False, blank=False,
help_text="the number of unsuccessful pings"
)
total = models.FloatField(
default=0.0, null=False, blank=False,
help_text="the sum of all latencies in milliseconds"
)
squares = models.FloatField(
default=0.0, null=False, blank=False,
help_text="the sum of all latencies squared in milliseconds"
)
fastest = models.FloatField(
default=0.0, null=False, blank=False,
help_text="the minimum latency in milliseconds"
)
slowest = models.FloatField(
default=0.0, null=False, blank=False,
help_text="the maximum latency in milliseconds"
)
mean = models.FloatField(
default=0.0, null=False, editable=False,
help_text="the computed latency mean in milliseconds"
)
stddev = models.FloatField(
default=0.0, null=False, editable=False,
help_text="the computed latency standard deviation in milliseconds"
)
variance = models.FloatField(
default=0.0, null=False, editable=False,
help_text="the computed latency variance in milliseconds"
)
range = models.FloatField(
default=0.0, null=False, editable=False,
help_text="the computed latency range in milliseconds"
)
class Meta:
db_table = "latencies"
get_latest_by = "modified"
ordering = ("-modified",)
unique_together = ("source", "target")
verbose_name_plural = "latencies"
@classmethod
def update_from_ping(cls, id, latency, timeout=False):
"""
Create or update a latency record between the source and target
replicas by incrementing the number of messages, the total, the sum of
squares values and the fastest and slowest values.
The pre-save latency signal will update all other computed statistcs.
If latency is <= 0 -- this is considered a timeout and timeouts will
be incremented instead of messages and distribution statistics.
This method uses select_for_update to ensure that the database table
is locked to prevent concurrent updates to the table from becoming
inconsistent with respect to the data collected.
"""
with transaction.atomic():
record = cls.objects.select_for_update().get(id=id)
if timeout or latency <= 0.0:
record.timeouts += 1
else:
record.messages += 1
record.total += latency
record.squares += (latency * latency)
if record.messages == 1 or latency < record.fastest:
record.fastest = latency
if record.messages == 1 or latency > record.slowest:
record.slowest = latency
record.save()
return record
def __str__(self):
return "{} ⇄ {} μ={:0.3f}ms σ={:0.3f}ms"
##########################################################################
## System Status
##########################################################################
class SystemStatus(TimeStampedModel):
"""
A representation of the halth and status of the underlying machine
environment that a replica is running on. Currently we don't maintain a
history of the system status over time, but instead maintain a snapshot of
the status of the replica from the last time the health check was
performed on the machine and reported.
"""
replica = models.OneToOneField(
'replicas.Replica', primary_key=True,
on_delete=models.CASCADE, related_name='status',
help_text="the replica running on this system",
)
hostname = models.CharField(
max_length=255, null=True, blank=True,
help_text="hostname identified by OS"
)
os = models.CharField(
max_length=255, null=True, blank=True,
help_text="operating system name, e.g. darwin, linux"
)
platform = models.CharField(
max_length=255, null=True, blank=True,
help_text="specific os version e.g. ubuntu, linuxmint"
)
platform_version = models.CharField(
max_length=255, null=True, blank=True,
help_text="operating system version number"
)
active_procs = models.IntegerField(
null=True, blank=True,
help_text="number of active processes"
)
uptime = models.BigIntegerField(
null=True, blank=True,
help_text="number of seconds the host has been online"
)
total_ram = models.BigIntegerField(
null=True, blank=True,
help_text="total amount of RAM on the system"
)
available_ram = models.BigIntegerField(
null=True, blank=True,
help_text="RAM available for programs to allocate (from kernel)"
)
used_ram = models.BigIntegerField(
null=True, blank=True,
help_text="amount of RAM used by programs (from kernel)"
)
used_ram_percent = models.FloatField(
null=True, blank=True,
help_text="percentage of RAM used by programs"
)
filesystem = models.CharField(
max_length=255, null=True, blank=True,
help_text="the type of filesystem at root"
)
total_disk = models.BigIntegerField(
null=True, blank=True,
help_text="total amount of disk space available at root directory"
)
free_disk = models.BigIntegerField(
null=True, blank=True,
help_text="total amount of unused disk space at root directory"
)
used_disk = models.BigIntegerField(
null=True, blank=True,
help_text="total amount of disk space used by root directory"
)
used_disk_percent = models.FloatField(
null=True, blank=True,
help_text="percentage of disk space used by root directory"
)
cpu_model = models.CharField(
max_length=255, null=True, blank=True,
help_text="the model of CPU on the machine"
)
cpu_cores = models.IntegerField(
null=True, blank=True,
help_text="the number of CPU cores detected"
)
cpu_percent = models.FloatField(
null=True, blank=True,
help_text="the percentage of all cores being used over the last 5 seconds"
)
go_version = models.CharField(
max_length=255, null=True, blank=True,
help_text="the version of Go for the currently running instance"
)
go_platform = models.CharField(
max_length=255, null=True, blank=True,
help_text="the platform compiled for the currently running instance"
)
go_architecture = models.CharField(
max_length=255, null=True, blank=True,
help_text="the chip architecture compiled for the currently running instance"
)
class Meta:
db_table = | |
<reponame>hrnciar/klusta
# -*- coding: utf-8 -*-
"""SpikeDetekt algorithm."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from collections import defaultdict
import logging
import numpy as np
from tqdm import tqdm
from ..utils import (Bunch,
get_excerpts,
chunk_bounds,
data_chunk,
_concatenate,
)
from ..kwik.mea import (_channels_per_group,
_probe_adjacency_list,
)
from .detect import Thresholder, compute_threshold, FloodFillDetector
from .filter import Filter
from .pca import PCA
from .store import SpikeDetektStore
from .waveform import WaveformExtractor
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------------
def _keep_spikes(samples, bounds):
"""Only keep spikes within the bounds `bounds=(start, end)`."""
start, end = bounds
return (start <= samples) & (samples <= end)
def _split_spikes(groups, idx=None, **arrs):
"""Split spike data according to the channel group."""
# split: {group: {'spike_samples': ..., 'waveforms':, 'masks':}}
dtypes = {'spike_samples': np.float64,
'waveforms': np.float32,
'masks': np.float32,
}
groups = np.asarray(groups)
if idx is not None:
assert idx.dtype == np.bool
n_spikes_chunk = np.sum(idx)
# First, remove the overlapping bands.
groups = groups[idx]
arrs_bis = arrs.copy()
for key, arr in arrs.items():
arrs_bis[key] = arr[idx]
assert len(arrs_bis[key]) == n_spikes_chunk
# Then, split along the group.
groups_u = np.unique(groups)
out = {}
for group in groups_u:
i = (groups == group)
out[group] = {}
for key, arr in arrs_bis.items():
out[group][key] = _concat(arr[i], dtypes.get(key, None))
return out
def _array_list(arrs):
out = np.empty((len(arrs),), dtype=np.object)
out[:] = arrs
return out
def _concat(arr, dtype=None):
out = np.array([_[...] for _ in arr], dtype=dtype)
return out
def _cut_traces(traces, interval_samples):
n_samples, n_channels = traces.shape
# Take a subset if necessary.
if interval_samples is not None:
start, end = interval_samples
else:
return traces, 0
assert 0 <= start < end
# WARNING: this loads all traces into memory! To fix this properly,
# we'll have to implement lazy chunking in ConcatenatedTraces.
size = (end - start) * traces.shape[1] * traces.dtype.itemsize / 1024. ** 3
if size > 1:
logger.warn("Loading all traces in memory: this will require %.3f GB "
"of RAM! ", size)
logger.warn("To avoid this, do not specify `--interval`. "
"This bug will be fixed later.")
traces = traces[start:end, ...]
if start > 0:
# TODO: add offset to the spike samples...
raise NotImplementedError("Need to add `start` to the "
"spike samples")
return traces, start
def _subtract_offsets(samples, offsets):
"""Subtract the recording offsets from spike samples.
Return the subtracted spike samples and the spike_recordings array.
"""
if samples is None:
return None, None
assert isinstance(samples, (list, np.ndarray))
samples = np.asarray(samples).copy()
assert len(offsets) >= 2
assert offsets[0] == 0
assert offsets[-1] >= samples[-1]
n = len(offsets) - 1
# Find where to insert the offsets in the spike samples.
ind = np.searchsorted(samples, offsets)
assert ind[0] == 0
assert len(ind) == n + 1
spike_recordings = np.zeros(len(samples), dtype=np.int32)
# Loop over all recordings.
for rec in range(n):
# Start of the current recording.
start_rec = offsets[rec]
# Spike indices of the first spikes in every recording.
i, j = ind[rec], ind[rec + 1]
# Ensure that the selected spikes belong to the current recording.
if i < len(samples) - 1:
assert start_rec <= samples[i]
# Subtract the current recording offset to the selected spikes.
samples[i:j] -= start_rec
# Create the spike_recordings array.
spike_recordings[i:j] = rec
assert np.all(samples[i:j] >= 0)
assert np.all(samples[i:j] <= (offsets[rec + 1] - start_rec))
return samples, spike_recordings
def _relative_channels(channels, adjacency):
"""Renumber channels from absolute indices to relative indices,
to match arrays used in the detection code.
Parameters
----------
channels : dict
A dict {group: list_of_channels}
adjacency : dict
A dict {group: set_of_neighbors}
"""
ch_out = {}
adj_out = {}
mapping = {}
offset = 0
for group in channels:
ch = channels[group]
n = len(ch)
ch_out[group] = [i + offset for i in range(n)]
# From absolute to relative indices.
mapping.update({c: (i + offset) for i, c in enumerate(ch)})
offset += n
# Recreate the adjacency dict by applying the mapping to
# both the keys and values.
for c, i in mapping.items():
adj_out[i] = set(mapping[_] for _ in adjacency.get(c, set())
if _ in mapping)
return ch_out, adj_out
#------------------------------------------------------------------------------
# Spike detection class
#------------------------------------------------------------------------------
class SpikeDetekt(object):
"""Spike detection class.
Parameters
----------
tempdir : str
Path to the temporary directory used by the algorithm. It should be
on a SSD for best performance.
probe : dict
The probe dictionary.
**kwargs : dict
Spike detection parameters.
"""
def __init__(self, tempdir=None, probe=None, **kwargs):
super(SpikeDetekt, self).__init__()
self._tempdir = tempdir
# Load a probe.
if probe is not None:
kwargs['probe_channels'] = _channels_per_group(probe)
kwargs['probe_adjacency_list'] = _probe_adjacency_list(probe)
self._kwargs = kwargs
self._n_channels_per_group = {
group: len(channels)
for group, channels in self._kwargs['probe_channels'].items()
}
for group in self._n_channels_per_group:
logger.info("Found %d live channels in group %d.",
self._n_channels_per_group[group], group)
self._groups = sorted(self._n_channels_per_group)
self._n_features = self._kwargs['n_features_per_channel']
before = self._kwargs['extract_s_before']
after = self._kwargs['extract_s_after']
self._n_samples_waveforms = before + after
# Processing objects creation
# -------------------------------------------------------------------------
def _create_filter(self):
rate = self._kwargs['sample_rate']
low = self._kwargs['filter_low']
high = self._kwargs['filter_high_factor'] * rate
order = self._kwargs['filter_butter_order']
return Filter(rate=rate,
low=low,
high=high,
order=order,
)
def _create_thresholder(self, thresholds=None):
mode = self._kwargs['detect_spikes']
return Thresholder(mode=mode, thresholds=thresholds)
def _create_detector(self):
graph = self._kwargs['probe_adjacency_list']
probe_channels = self._kwargs['probe_channels']
join_size = self._kwargs['connected_component_join_size']
return FloodFillDetector(probe_adjacency_list=graph,
join_size=join_size,
channels_per_group=probe_channels,
)
def _create_extractor(self, thresholds):
before = self._kwargs['extract_s_before']
after = self._kwargs['extract_s_after']
weight_power = self._kwargs['weight_power']
probe_channels = self._kwargs['probe_channels']
return WaveformExtractor(extract_before=before,
extract_after=after,
weight_power=weight_power,
channels_per_group=probe_channels,
thresholds=thresholds,
)
def _create_pca(self):
n_pcs = self._kwargs['n_features_per_channel']
return PCA(n_pcs=n_pcs)
# Misc functions
# -------------------------------------------------------------------------
def update_params(self, **kwargs):
self._kwargs.update(kwargs)
# Processing functions
# -------------------------------------------------------------------------
def apply_filter(self, data):
"""Filter the traces."""
filter = self._create_filter()
return filter(data).astype(np.float32)
def find_thresholds(self, traces):
"""Find weak and strong thresholds in filtered traces."""
rate = self._kwargs['sample_rate']
n_excerpts = self._kwargs['n_excerpts']
excerpt_size = int(self._kwargs['excerpt_size_seconds'] * rate)
single = bool(self._kwargs['use_single_threshold'])
strong_f = self._kwargs['threshold_strong_std_factor']
weak_f = self._kwargs['threshold_weak_std_factor']
logger.info("Finding the thresholds...")
excerpt = get_excerpts(traces,
n_excerpts=n_excerpts,
excerpt_size=excerpt_size)
excerpt_f = self.apply_filter(excerpt)
thresholds = compute_threshold(excerpt_f,
single_threshold=single,
std_factor=(weak_f, strong_f))
logger.debug("Thresholds: {}.".format(thresholds))
return {'weak': thresholds[0],
'strong': thresholds[1]}
def detect(self, traces_f, thresholds=None):
"""Detect connected waveform components in filtered traces.
Parameters
----------
traces_f : array
An `(n_samples, n_channels)` array with the filtered data.
thresholds : dict
The weak and strong thresholds.
Returns
-------
components : list
A list of `(n, 2)` arrays with `sample, channel` pairs.
"""
# Threshold the data following the weak and strong thresholds.
thresholder = self._create_thresholder(thresholds)
# Transform the filtered data according to the detection mode.
traces_t = thresholder.transform(traces_f)
# Compute the threshold crossings.
weak = thresholder.detect(traces_t, 'weak')
strong = thresholder.detect(traces_t, 'strong')
# Find dead channels.
cpg = self._kwargs['probe_channels']
live_channels = sorted([item for sublist in cpg.values()
for item in sublist])
n_channels = traces_f.shape[1]
dead_channels = np.setdiff1d(np.arange(n_channels), live_channels)
logger.debug("Dead channels: %s.", ', '.join(map(str, dead_channels)))
# Kill threshold crossings on dead channels.
weak[:, dead_channels] = 0
strong[:, dead_channels] = 0
# Run the detection.
detector = self._create_detector()
return detector(weak_crossings=weak,
strong_crossings=strong)
def extract_spikes(self, components, traces_f,
thresholds=None, keep_bounds=None, s_start=None):
"""Extract spikes from connected components.
Returns a split object.
Parameters
----------
components : list
List of connected components.
traces_f : array
Filtered data.
thresholds : dict
The weak and strong thresholds.
keep_bounds : tuple
(keep_start, keep_end).
s_start : 0
Start of the chunk.
"""
n_spikes = len(components)
if n_spikes == 0:
return {}
# Transform the filtered data according to the detection mode.
thresholder = self._create_thresholder()
traces_t = thresholder.transform(traces_f)
# Extract all waveforms.
extractor = self._create_extractor(thresholds)
groups, samples, waveforms, masks = zip(*[extractor(component,
data=traces_f,
data_t=traces_t,
)
for component in components])
# Create the return arrays.
groups = np.array(groups, dtype=np.int32)
assert groups.shape == (n_spikes,)
assert groups.dtype == np.int32
samples = np.array(samples, dtype=np.float64)
assert samples.shape == (n_spikes,)
assert samples.dtype == np.float64
# These are lists of arrays of various shapes (because of various
# groups).
waveforms = _array_list(waveforms)
assert waveforms.shape == (n_spikes,)
assert waveforms.dtype == np.object
masks = _array_list(masks)
assert masks.dtype == np.object
assert masks.shape == (n_spikes,)
# Reorder the spikes.
idx = np.argsort(samples)
groups = groups[idx]
samples = samples[idx]
waveforms = waveforms[idx]
masks = masks[idx]
# Remove spikes in the overlapping bands.
# WARNING: add s_start to spike_samples, because spike_samples
# is relative to the start of the chunk.
# It is important to add s_start and not keep_start, because of
# edge effects between overlapping chunks.
s_start = s_start or 0
(keep_start, keep_end) = keep_bounds
idx = _keep_spikes(samples + s_start, (keep_start, keep_end))
# Split the data according to the channel groups.
split = _split_spikes(groups, | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2007, 2008 <NAME> <<EMAIL>>
#
# This file is part of the Bobcat program.
#
# Bobcat is free software; you can use it, redistribute it and/or modify it
# under the terms of the MIT license.
#
# You should have received a copy of the MIT license with Bobcat. If not,
# see <http://bobcat.origo.ethz.ch/wiki/Licence>.
#
"""The preprocessor of Bobcat source files.
Its main purpose is twofold: First, it converts characters sequences to single
Unicode characters. And secondly, it keeps track of the origins of the
preprocessed text, so that in case of parsing errors the user can be told where
exactly the error occured in the source document.
It achieves this by one fat unicode-like data type called `Excerpt`.
"""
import re, os.path, codecs, string, warnings
from . import common
from .common import FileError, EncodingError, PositionMarker
class Excerpt(unicode):
"""Class for preprocessed Bobcat source text. It behaves like a unicode string
with extra methods and attributes.
The typical lifecycle of such an object is as follows:
1. The Bobcat source text is read from the file (or whereever) and stored as
one big unicode string.
2. This unicode string is used to create an Excerpt instance from it. In
order to do this, the pre input method rules are applied.
3. This excerpt is send to the parser that divides it into smaller and
smaller excerpts, parsing it recursively while building the parse tree.
4. When parsing is finished, the post input method is applied (which is
usually *much* smaller than the pre method).
5. Now, the excerpts are given to the routines of the backend, which
process them further, convert them to unicodes, and write them to the
output.
:cvar entity_pattern: Regexp pattern for numerical entities like
``\\0x0207;`` or ``\\#8022;``.
:type entity_pattern: re.pattern
:ivar escaped_positions: the indices of all characters in the Excerpt which
were escaped in the original input. Note that this is a set which is not
ordered.
:ivar code_snippets_intervals: all start--end tuples of index ranges in the
Excerpt which contain code snippets, so that they have to be treated as
escaped. Note that they must be in ascending order of the start
indices. Actually, this could also be called ``escaped_intervals``
because it could be substituted with many equivalent entries in
`escaped_positions`. However, for performance reasons, code snippets are
stored in this start--end form. Otherwise, `escaped_positions` would be
cluttered up with too many subsequent entries.
:ivar original_positions: maps indices in the Excerpt to position markers
that point to the actual origin of this index in the Excerpt.
:ivar original_text: the original unicode string this Excerpt stems from
:ivar __post_substitutions: the substitutions for the post input method.
They are stored here for eventual use in `apply_post_input_method`.
:ivar __escaped_text: the unicode equivalent of the Excerpt, with all
escaped characters and characters of code snippets replaced with NULL
characters. It is a cache used in `escaped_text`.
:type escaped_positions: set of int
:type code_snippets_intervals: list of (int, int)
:type original_positions: list of `common.PositionMarker`
:type original_text: unicode
:type __post_substitutions: list of (re.pattern, unicode)
:type __escaped_text: unicode
"""
# FixMe: The following pylint directive is necessary because astng doesn't
# parse attribute settings in the __new__ classmethod. If this changes or
# if a workaround is found, this directive should be removed in order to
# find real errors.
#
# pylint: disable-msg=E1101
entity_pattern = re.compile(r"((0x(?P<hex>[0-9a-fA-F]+))|(#(?P<dec>[0-9]+)));")
whitespace_pattern = re.compile(r"(\A\s+)|(\s+\Z)|(\s{2,})|([\t\n\r\f\v])")
@classmethod
def get_next_match(cls, original_text, substitutions, offset=0):
"""Return the next input method match in `original_text`. The search
starts at `offset`.
:Parameters:
- `original_text`: the original line in the Bobcat input file
- `substitutions`: the substitution dictionary to be used
- `offset`: starting position for the search in original_text
:type original_text: unicode
:type substitutions: list with the (match, replacement) tuples
:type offset: int
:Return:
the position of the found match, the length of the match, and the
replacement for this match (a single character). If no match was
found, it's len(original_text), 0, None instead
:rtype: int, int, unicode
"""
earliest_match_position = len(original_text)
longest_match_length = 0
best_match = None
for substitution in substitutions:
match = substitution[0].search(original_text, offset)
if match and match.group().count("\r") + match.group().count("\n") == 0:
start, end = match.span()
if start == earliest_match_position:
if end - start > longest_match_length:
longest_match_length = end - start
best_match = match
replacement = substitution[1]
elif start < earliest_match_position:
earliest_match_position = start
longest_match_length = end - start
best_match = match
replacement = substitution[1]
if not best_match or not best_match.group():
return len(original_text), 0, None
return best_match.start(), best_match.end() - best_match.start(), replacement
def is_escaped(self, position):
"""Return True, if the character at position is escaped.
:Parameters:
- `position`: the position or interval in the Excerpt
:type position: int or (int, int)
:Return:
whether or not the character at `position` is escaped. If an
interval was given, whether or not at least one character in the
interval is escaped.
:rtype: boolean
"""
if isinstance(position, (list, tuple)):
part = self.escaped_text()[position[0]:position[1]]
else:
part = self.escaped_text()[position]
return u"\u0000" in part
def escaped_text(self):
"""Returns the unicode representation of the Excerpt with all escaped
characters replaced with Null characters.
:Return:
the unicode representation of the Excerpt with all escaped characters
replaced with Null characters
:rtype: unicode
"""
# pylint: disable-msg=E0203, W0201
if self.__escaped_text is None:
text = list(unicode(self))
for pos in self.escaped_positions:
text[pos] = u"\u0000"
for start, end in self.code_snippets_intervals:
text[start:end] = (end-start) * u"\u0000"
self.__escaped_text = u"".join(text)
return self.__escaped_text
def original_position(self, position=0):
"""Maps a position within the excerpt to the position in the original
file.
:Parameters:
- `position`: the position in the excerpt to which this method
belongs. Note that len(self) is an allowed value for `position`, in
order to get the original span of the whole string.
:type position: int
:Return:
the Position the given character originates from. This includes url
(filename), linenumber, and column. If the Excerpt was empty, the
position of the following character in the original file is
returned.
:rtype: PositionMarker
:Exceptions:
- `IndexError`: if a position was requested which lies outside the
line.
"""
length = len(self)
if not 0 <= position <= length:
raise IndexError("invalid value %d for "
"position in original_position near line %d of file %s" %
(position, self.original_positions[0].linenumber,
self.original_positions[0].url))
closest_position = max([pos for pos in self.original_positions if pos <= position])
offset = position - closest_position
closest_marker = self.original_positions[closest_position].transpose(offset)
closest_marker.column += offset
return closest_marker
def split(self, split_characters=None):
"""Splits the Excerpt like Python's split() string method does. If no
argument is given, it splits at whitespace (just as the string method).
Important note: Escaped characters are not regarded as split
characters.
:Parameters:
- `split_characters`: a string containing all characters that divide
the parts that should be created
:type split_characters: unicode
:Return:
a list with all parts in which the Excerpt was split up
:rtype: list of `Excerpt`
"""
parts = []
characters = split_characters if split_characters is not None else u" \t\v\n\r"
for match in re.finditer(u"[^" + re.escape(characters) + "]*",
self.escaped_text(), re.UNICODE):
start, end = match.span()
if start == end:
# Match was empty; then it is ignored, unless at the beginning
# and the end.
if split_characters is None or \
(start != 0 and start != len(self.escaped_text())):
continue
parts.append(self[start:end])
return parts
def normalize_whitespace(self):
def add_part(result, new_part):
return result + new_part if result else new_part
result = None
unicode_representation = unicode(self)
whitespace_match = self.whitespace_pattern.match(unicode_representation)
if whitespace_match:
current_position = start = whitespace_match.end()
else:
current_position = start = 0
while current_position < len(unicode_representation):
whitespace_match = self.whitespace_pattern.search(unicode_representation, current_position)
if whitespace_match:
current_position = whitespace_match.end()
if whitespace_match.end() == len(unicode_representation):
result = add_part(result, self[start:whitespace_match.start()])
break
elif whitespace_match.group().startswith(u" "):
result = add_part(result, self[start:whitespace_match.start()+1])
start = current_position
elif whitespace_match.group().endswith(u" "):
result = add_part(result, self[start:whitespace_match.start()])
start = current_position - 1
else:
result = add_part(result, self[start:whitespace_match.start()])
result = add_part(result, u" ")
start = current_position
else:
result = add_part(result, self[start:len(unicode_representation)])
break
return result or self[:0]
class Status(object):
"""A mere container for some immutable data structures used in the pre-
and postprocessing.
The only reason for | |
"""
Calculate the cost basis.
"""
import csv
import decimal
import io
from decimal import Decimal
from typing import Sequence
from yabc import coinpool
from yabc import ohlcprovider
from yabc import transaction
from yabc.costbasisreport import CostBasisReport
from yabc.transaction import Transaction
from yabc.transaction import is_fiat
BASIS_INFORMATION_FLAG = "Transaction without basis information"
__author__ = "<NAME> <<EMAIL>>"
def _split_coin_to_add(coin_to_split, amount, trans):
# type: (transaction.Transaction, Decimal, transaction.Transaction) -> transaction.Transaction
"""
Create a coin to be added back to the pool.
TODO: For creating an audit trail, we should track the origin of the split
coin, ie. was it generated from mining, a gift, or purchased? This could
be similar to the way we track the origin coin in CBRs.
:param coin_to_split: a Transaction, either a BUY or an TRADE_INPUT
:param amount: unsold portion of the asset ie. float(0.3) for a sale of 1
BTC where another coin of 0.7 was already used
:param trans: the transaction triggering the report
:return: a Transaction of type SPLIT with a proper basis
"""
split_amount_back_in_pool = coin_to_split.quantity_received - amount
fraction_back_in_pool = split_amount_back_in_pool / coin_to_split.quantity_received
cost = coin_to_split.quantity_traded * fraction_back_in_pool
fees = coin_to_split.fees * fraction_back_in_pool
quantity_received = split_amount_back_in_pool
to_add = transaction.Transaction(
transaction.Transaction.Operation.SPLIT,
symbol_received=coin_to_split.symbol_received,
quantity_received=quantity_received,
fees=fees,
symbol_traded=coin_to_split.symbol_traded,
quantity_traded=cost,
date=coin_to_split.date,
)
return to_add
def _fiat_value_for_trade(
tx: transaction.Transaction, ohlc: ohlcprovider.OhlcProvider, prefer_traded: bool
) -> decimal.Decimal:
"""
Lookup the value of a coin/coin trade.
Suppose we want the value of a trade of a binance coin/coin pair where one
token has essentially zero value, or no history available.
The values of the buy and sell sides can be different, but if we don't have any
data about one leg, we can use the other to calculate the trade's value.
Get the value of a transaction in fiat, or else raise NoDataError.
:param prefer_traded: If True, prefer the symbol_traded leg.
"""
traded = lambda: ohlc.get(tx.symbol_traded, tx.date).high * tx.quantity_traded
received = lambda: ohlc.get(tx.symbol_received, tx.date).high * tx.quantity_received
if prefer_traded:
lambdas = (traded, received)
else:
lambdas = (received, traded)
for fx in lambdas:
try:
return fx()
except ohlcprovider.NoDataError:
pass
# if we got here, we found no data.
return decimal.Decimal(0) # TODO: consider raising.
def _split_report(coin_to_split, amount, trans, ohlc=None):
# type: (transaction.Transaction, Decimal, transaction.Transaction, ohlcprovider.OhlcProvider) -> CostBasisReport
"""
Given that we are splitting `coin_to_split`, sell `amount` of it and create
a CostBasisReport. The event triggering the CBR is `trans`, typically a
sale of some kind.
The cost basis logic. Note that all fees on buy and sell sides are
subtracted from the taxable result.
basis = cost + fees income_subtotal = proceeds - fees taxable_income =
income_subtotal - basis
:param coin_to_split: an input. part of this will be the cost basis portion
of this report
:param amount: quantity of the split asset that needs to be sold in this
report (not the USD).
:param trans: the transaction triggering this report, a SELL or SPENDING
"""
assert amount < coin_to_split.quantity_received
# TODO: add a test that makes this assertion fail.
assert not (amount - trans.quantity_traded > 1e-5) # allowed to be equal
# coin_to_split is a buy, mining, previous split, some kind of input.
# quantity_received: crypto amount.
# quantity_traded: USD
# basis and fee (partial amounts of coin_to_split)
frac_of_basis_coin = amount / coin_to_split.quantity_received
purchase_price = frac_of_basis_coin * coin_to_split.quantity_traded
purchase_fee = frac_of_basis_coin * coin_to_split.fees
# sale proceeds and fee (again, partial amounts of trans)
fiat_received = trans.quantity_received
received_asset = "USD"
if not is_fiat(trans.symbol_received):
assert trans.is_coin_to_coin()
fiat_received = _fiat_value_for_trade(trans, ohlc, prefer_traded=False)
received_asset = trans.symbol_received
frac_of_sale_tx = amount / trans.quantity_traded
proceeds = (frac_of_sale_tx * fiat_received).quantize(Decimal(".01"))
sale_fee = (frac_of_sale_tx * trans.fees).quantize(Decimal(".01"))
return CostBasisReport(
trans.user_id,
purchase_price + purchase_fee,
amount,
coin_to_split.date,
proceeds - sale_fee,
trans.date,
trans.symbol_traded,
triggering_transaction=trans,
secondary_asset=received_asset,
)
def _process_one(trans, pool, ohlc_source=None):
# type: (transaction.Transaction, coinpool.CoinPool, ohlcprovider.OhlcProvider) -> Sequence
"""
Cost basis calculator for a single transaction. Return the 'diff'
required to process this one tx.
It is assumed that the coin to sell is at the front, at `pool[0]`, so this
works for both LIFO and FIFO.
- If transaction is a buy, just return the add-to-pool op.
- Otherwise, for a sale:
- Example: buy 0.25 @ $1 and 0.25 at $2. Then sell 0.5 at $3.
this is reported to IRS as 2 transactions:
One: Sell 0.25 with a basis of $1
Two: Sell 0.25 with a basis of $2
:param trans: a buy or sell with fields filled
:param pool: a sequence containing transaction.Transaction instances.
:return (reports, diff, flags): any CostBasisReports, the PoolDiff, any flags raised.
"""
diff = coinpool.PoolDiff()
flags = []
cost_basis_reports = []
basis_information_absent = False
amount = Decimal(0)
pool_index = -1
if trans.is_simple_input(): # what about: and not trans.is_coin_to_coin():
diff.add(trans.symbol_received, trans)
return ([], diff, [])
# At this point, trans is a sell
while amount < trans.quantity_traded:
pool_index += 1
curr_pool = pool.get(trans.symbol_traded)
if pool_index >= len(curr_pool):
# If we get here, we have partial information about the tx.
# Use a basis of zero for the sale.
flags.append((BASIS_INFORMATION_FLAG, trans))
basis_information_absent = True
amount = trans.quantity_traded
else:
amount += curr_pool[pool_index].quantity_received
needs_split = (amount - trans.quantity_traded) > 1e-5
if needs_split:
coin_to_split = pool.get(trans.symbol_traded)[pool_index]
excess = amount - trans.quantity_traded
portion_of_split_coin_to_sell = coin_to_split.quantity_received - excess
if trans.is_taxable_output():
# Outgoing gifts would not trigger this.
# TODO: Alert the user if the value of a gift exceeds $15,000, in which
# case gift taxes may be eligible...
cost_basis_reports.append(
_split_report(
coin_to_split, portion_of_split_coin_to_sell, trans, ohlc_source
)
)
coin_to_add = _split_coin_to_add(
coin_to_split, portion_of_split_coin_to_sell, trans
)
diff.add(coin_to_add.symbol_received, coin_to_add)
diff.remove(trans.symbol_traded, pool_index)
if not needs_split:
pool_index += 1 # Ensures that we report the oldest transaction as a sale.
if trans.is_taxable_output():
# The other option is gift. If it's a gift we don't report any gain or loss.
# The coins just magically remove themselves from the pool.
# No entry in 8949 for them.
cost_basis_reports.extend(
_build_sale_reports(
pool, pool_index, trans, basis_information_absent, ohlc_source
)
)
if trans.is_coin_to_coin():
to_add = _get_coin_to_coin_input(trans, ohlc_source)
diff.add(to_add.symbol_received, to_add)
return (cost_basis_reports, diff, flags)
def _get_coin_to_coin_input(trans, ohlc):
# type: (transaction.Transaction, ohlcprovider.OhlcProvider) -> transaction.Transaction
"""
For a coin/coin trade, we need to add an input back to the pool.
TODO: determine the buy-side fees for a coin/coin trade.
"""
cost_basis = _fiat_value_for_trade(trans, ohlc, prefer_traded=True)
return transaction.Transaction(
transaction.Operation.TRADE_INPUT,
symbol_received=trans.symbol_received,
quantity_received=trans.quantity_received,
symbol_traded="USD",
quantity_traded=cost_basis,
fee_symbol="USD",
fees=0,
date=trans.date,
user_id=trans.user_id,
source=trans.source,
triggering_transaction=trans,
)
def _build_no_basis_report(
trans: transaction.Transaction, ohlc: ohlcprovider.OhlcProvider
):
proceeds = trans.quantity_received * ohlc.get("BTC", trans.date).high
basis = 0
if abs(proceeds) < 1:
return []
if proceeds < 0:
basis = abs(proceeds)
proceeds = 0
report = CostBasisReport(
trans.user_id,
basis=decimal.Decimal(basis),
quantity=trans.quantity_received,
date_purchased=trans.date,
proceeds=decimal.Decimal(proceeds),
date_sold=trans.date,
asset="{} {} perpetual".format(trans.source, trans.symbol_traded),
secondary_asset="BTC",
triggering_transaction=trans,
)
return [report]
def _build_sale_reports(pool, pool_index, trans, basis_information_absent, ohlc):
# type: (coinpool.CoinPool, int, transaction.Transaction, bool, ohlcprovider.OhlcProvider) -> Sequence[CostBasisReport]
"""
Use coins from pool to make CostBasisReports.
:param basis_information_absent: If true, we do not know how much the
original coin was purchased for yet. This is either because no previous
buy exists, or because `trans` is a coin/coin trade we need to look up
the fiat values for.
:param trans: the tx triggering the reports. It must be a sell of some kind.
"""
ans = []
received_asset = "USD"
proceeds = trans.quantity_received
fees_in_fiat = trans.fees
if trans.operation == transaction.Operation.PERPETUAL_PNL:
return _build_no_basis_report(trans, ohlc)
if not is_fiat(trans.fee_symbol):
try:
fees_in_fiat = ohlc.get(trans.fee_symbol, trans.date).high * trans.fees
except ohlcprovider.NoDataError:
fees_in_fiat = decimal.Decimal("0")
if not is_fiat(trans.symbol_received):
received_asset = trans.symbol_received
proceeds = (
_fiat_value_for_trade(trans, ohlc, prefer_traded=False) - fees_in_fiat
)
if basis_information_absent:
report = CostBasisReport(
trans.user_id,
decimal.Decimal(0),
trans.quantity_traded,
trans.date,
proceeds=proceeds,
date_sold=trans.date,
asset=trans.symbol_traded,
triggering_transaction=trans,
secondary_asset=received_asset,
)
return [report]
for i in range(pool_index):
# each of these including pool_index will become a sale to be reported to IRS
# The cost basis is pool[i].proceeds
# The sale price is dependent on the parameter `trans'
#
# NOTE: the entire basis coin will be sold; but for each iteration
# through we only use a portion of trans.
# curr_basis_tx is a BUY, GIFT_RECEIVED or a TRADE_INPUT, or another input.
curr_basis_tx = pool.get(trans.symbol_traded)[i]
portion_of_sale = curr_basis_tx.quantity_received / trans.quantity_traded
# The seller can inflate their cost basis by the buy fees.
assert curr_basis_tx.symbol_received == | |
import numpy as np
from scipy.ndimage import imread
from scipy.misc import imsave
from itertools import izip
from collections import deque
from pylibs.spatialfunclib import projection_onto_line
import sqlite3
import math
import sys
import os
import pickle
# globals
min_lat, min_lon, max_lat, max_lon = None, None, None, None
height = None
width = None
xscale = None
yscale = None
def douglas_peucker(segment, epsilon):
dmax = 0
index = 0
for i in range(1, len(segment) - 1):
(_, _, d) = projection_onto_line(segment[0].latitude, segment[0].longitude, segment[-1].latitude, segment[-1].longitude, segment[i].latitude, segment[i].longitude)
if (d > dmax):
index = i
dmax = d
if (dmax >= epsilon):
rec_results1 = douglas_peucker(segment[0:index], epsilon)
rec_results2 = douglas_peucker(segment[index:], epsilon)
smoothed_segment = rec_results1
smoothed_segment.extend(rec_results2)
else:
smoothed_segment = [segment[0], segment[-1]]
return smoothed_segment
def pixels_to_coords((i, j)):
return ((((height - i) / yscale) + min_lat), ((j / xscale) + min_lon))
class Node:
def __init__(self, (latitude, longitude), weight):
self.id = None
self.latitude = latitude
self.longitude = longitude
self.weight = weight
class MainCrossing:
def __init__(self, crossing_stack):
self.component_crossings = []
self.i = 0
self.j = 0
for crossing in crossing_stack:
self.component_crossings.append(crossing)
self.i += crossing[0]
self.j += crossing[1]
self.i /= float(len(crossing_stack))
self.j /= float(len(crossing_stack))
@property
def location(self):
return (self.i, self.j)
class Graph:
def __init__(self):
pass
def extract(self, skeleton, density_estimate, sqlite_filename, output_filename):
skeleton = self.identify_crossing_points(skeleton)
main_crossings, segments = self.find_main_crossings_and_segments(skeleton)
self.create_graph(main_crossings, segments, density_estimate, sqlite_filename, output_filename)
def create_graph(self, main_crossings, segments, density_estimate, sqlite_filename, output_filename):
nodes, new_segments, intersections = self.create_nodes_and_new_segments(main_crossings, segments, density_estimate)
my_nodes = {}
my_edges = {}
my_segments = {}
my_intersections = {}
try:
os.remove(sqlite_filename)
except OSError:
pass
conn = sqlite3.connect(sqlite_filename)
cur = conn.cursor()
cur.execute("CREATE TABLE nodes (id INTEGER, latitude FLOAT, longitude FLOAT, weight FLOAT)")
cur.execute("CREATE TABLE edges (id INTEGER, in_node INTEGER, out_node INTEGER, weight FLOAT)")
cur.execute("CREATE TABLE segments (id INTEGER, edge_ids TEXT)")
cur.execute("CREATE TABLE intersections (node_id INTEGER)")
conn.commit()
node_id = 0
edge_id = 0
segment_id = 0
for segment in new_segments:
segment_weight = 0
if (len(segment) > 2):
for i in range(1, len(segment) - 1):
segment_weight += segment[i].weight
segment_weight /= float(len(segment) - 2)
else:
segment_weight = float(segment[0].weight + segment[1].weight) / 2.0
# remove unnecessary intermediate points with Douglas-Peucker
# smoothed_segment = douglas_peucker(segment, 10)
smoothed_segment = douglas_peucker(segment, 3)
for node in smoothed_segment:
if (node.id is None):
node.id = node_id
my_nodes[node.id]=[node.latitude, node.longitude]
cur.execute("INSERT INTO nodes VALUES (" + str(node.id) + "," + str(node.latitude) + "," + str(node.longitude) + "," + str(node.weight) + ")")
node_id += 1
outbound_segment_edge_ids = []
for i in range(0, len(smoothed_segment) - 1):
my_edges[edge_id] = [smoothed_segment[i].id, smoothed_segment[i + 1].id]
cur.execute("INSERT INTO edges VALUES (" + str(edge_id) + "," + str(smoothed_segment[i].id) + "," + str(smoothed_segment[i + 1].id) + "," + str(segment_weight) + ")")
outbound_segment_edge_ids.append(edge_id)
edge_id += 1
inbound_segment_edge_ids = []
for i in range(0, len(smoothed_segment) - 1):
#my_edges[edge_id] = [smoothed_segment[i+1].id, smoothed_segment[i].id] # One Way
cur.execute("INSERT INTO edges VALUES (" + str(edge_id) + "," + str(smoothed_segment[i + 1].id) + "," + str(smoothed_segment[i].id) + "," + str(segment_weight) + ")")
inbound_segment_edge_ids.append(edge_id)
#edge_id += 1
inbound_segment_edge_ids.reverse()
# sanity check
if (len(outbound_segment_edge_ids) != len(inbound_segment_edge_ids)):
print "ERROR!! Number of inbound and outbound edges are not equal!"
print len(outbound_segment_edge_ids)
print len(inbound_segment_edge_ids)
exit()
my_segments[segment_id] = outbound_segment_edge_ids
cur.execute("INSERT INTO segments VALUES (" + str(segment_id) + ",'" + str(outbound_segment_edge_ids) + "')")
segment_id += 1
my_segments[segment_id] = inbound_segment_edge_ids
cur.execute("INSERT INTO segments VALUES (" + str(segment_id) + ",'" + str(inbound_segment_edge_ids) + "')")
segment_id += 1
for intersection in intersections:
my_intersections[intersection.id] = 1
cur.execute("INSERT INTO intersections VALUES (" + str(intersection.id) + ")")
conn.commit()
conn.close()
my_map = [my_nodes, my_edges, my_segments, my_intersections]
pickle.dump(my_map, open( output_filename, "wb" ) )
def create_nodes_and_new_segments(self, main_crossings, segments, density_estimate):
density_map = [2**x for x in range(16, 3, -1)] + range(15, 0, -1)
density_map.reverse()
nodes = {}
new_segments = []
intersections = set()
for segment in segments:
new_segment = []
head_node = main_crossings[segment[0]].location
if (head_node not in nodes):
#nodes[head_node] = Node(pixels_to_coords(head_node), density_map[density_estimate[segment[0][0], segment[0][1]] - 1])
nodes[head_node] = Node(pixels_to_coords(head_node), 0)
new_segment = [nodes[head_node]]
intersections.add(nodes[head_node])
for i in range(1, len(segment) - 1):
if (segment[i] not in nodes):
#nodes[segment[i]] = Node(pixels_to_coords(segment[i]), density_map[density_estimate[segment[i][0], segment[i][1]] - 1])
nodes[segment[i]] = Node(pixels_to_coords(segment[i]), 0)
new_segment.append(nodes[segment[i]])
tail_node = main_crossings[segment[-1]].location
if (tail_node not in nodes):
#nodes[tail_node] = Node(pixels_to_coords(tail_node), density_map[density_estimate[segment[-1][0], segment[-1][1]] - 1])
nodes[tail_node] = Node(pixels_to_coords(tail_node), 0)
new_segment.append(nodes[tail_node])
intersections.add(nodes[tail_node])
new_segments.append(new_segment)
return nodes, new_segments, intersections
def find_main_crossings_and_segments(self, skeleton):
crossing_pixels = np.where(skeleton == 2)
print "crossing_pixels: " + str(len(crossing_pixels[0]))
curr_count = 1
total_count = len(crossing_pixels[0])
main_crossings = {}
segments = []
for (i, j) in izip(crossing_pixels[0], crossing_pixels[1]):
if ((curr_count % 100 == 0) or (curr_count == total_count)):
sys.stdout.write("\r" + str(curr_count) + "/" + str(total_count) + "... ")
sys.stdout.flush()
curr_count += 1
#
# begin extended combustion (to consume adjacent intersection pixels)
#
crossing_stack = []
combusting_queue = deque([])
if (skeleton[i][j] == 2):
skeleton[i][j] = 3
combusting_queue.appendleft((i, j))
else:
if ((i, j) not in main_crossings):
print "ERROR!! (" + str(i) + "," + str(j) + ") not in main_crossings!"
exit()
while (len(combusting_queue) > 0):
current_crossing = combusting_queue.pop()
crossing_stack.append(current_crossing)
(m, n) = current_crossing
# north
if (skeleton[m - 1][n] == 2):
skeleton[m - 1][n] = 3
combusting_queue.appendleft((m - 1, n))
# north-east
if (skeleton[m - 1][n + 1] == 2):
skeleton[m - 1][n + 1] = 3
combusting_queue.appendleft((m - 1, n + 1))
# east
if (skeleton[m][n + 1] == 2):
skeleton[m][n + 1] = 3
combusting_queue.appendleft((m, n + 1))
# south-east
if (skeleton[m + 1][n + 1] == 2):
skeleton[m + 1][n + 1] = 3
combusting_queue.appendleft((m + 1, n + 1))
# south
if (skeleton[m + 1][n] == 2):
skeleton[m + 1][n] = 3
combusting_queue.appendleft((m + 1, n))
# south-west
if (skeleton[m + 1][n - 1] == 2):
skeleton[m + 1][n - 1] = 3
combusting_queue.appendleft((m + 1, n - 1))
# west
if (skeleton[m][n - 1] == 2):
skeleton[m][n - 1] = 3
combusting_queue.appendleft((m, n - 1))
# north-west
if (skeleton[m - 1][n - 1] == 2):
skeleton[m - 1][n - 1] = 3
combusting_queue.appendleft((m - 1, n - 1))
if (len(crossing_stack) > 0):
new_main_crossing = MainCrossing(crossing_stack)
for crossing in crossing_stack:
main_crossings[crossing] = new_main_crossing
#
# end extended combustion (all adjacent intersection pixels consumed)
#
# mark current crossing point as "do not return"
skeleton[i][j] = -1
# north
if (skeleton[i - 1][j] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# north-east
if (skeleton[i - 1][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# east
if (skeleton[i][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south-east
if (skeleton[i + 1][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south
if (skeleton[i + 1][j] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south-west
if (skeleton[i + 1][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# west
if (skeleton[i][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# north-west
if (skeleton[i - 1][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# reset crossing point value
skeleton[i][j] = 3
print "done."
#imsave("no_edges_skeleton.png", skeleton)
return main_crossings, segments
def find_edge_nodes(self, start_location, skeleton, edge_nodes):
queue = deque([])
queue.appendleft(start_location)
(i, j) = start_location
skeleton[i][j] = 0
while (len(queue) > 0):
curr_location = queue.pop()
edge_nodes.append(curr_location)
(i, j) = curr_location
# north
if (skeleton[i - 1][j] == 1):
skeleton[i - 1][j] = 0
queue.appendleft((i - 1, j))
# east
if (skeleton[i][j + 1] == 1):
skeleton[i][j + 1] = 0
queue.appendleft((i, j + 1))
# south
if (skeleton[i + 1][j] == 1):
skeleton[i + 1][j] = 0
queue.appendleft((i + 1, j))
# west
if (skeleton[i][j - 1] == 1):
skeleton[i][j - 1] = 0
queue.appendleft((i, j - 1))
# north-east
if (skeleton[i - 1][j + 1] == 1):
skeleton[i - | |
""" drivers for conformer
"""
import numpy
from qcelemental import constants as qcc
import automol
import elstruct
import autofile
import moldr
from automol.convert import _pyx2z
WAVEN2KCAL = qcc.conversion_factor('wavenumber', 'kcal/mol')
EH2KCAL = qcc.conversion_factor('hartree', 'kcal/mol')
def conformer_sampling(
spc_info, thy_level, thy_save_fs, cnf_run_fs, cnf_save_fs, script_str,
overwrite, saddle=False, nsamp_par=(False, 3, 3, 1, 50, 50),
tors_names='', dist_info=[], two_stage=False, **kwargs):
""" Find the minimum energy conformer by optimizing from nsamp random
initial torsional states
"""
ich = spc_info[0]
coo_names = []
if not saddle:
geo = thy_save_fs.leaf.file.geometry.read(thy_level[1:4])
tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)
zma = automol.geom.zmatrix(geo)
else:
geo = thy_save_fs.trunk.file.geometry.read()
zma = thy_save_fs.trunk.file.zmatrix.read()
coo_names.append(tors_names)
tors_ranges = tuple((0, 2*numpy.pi) for tors in tors_names)
tors_range_dct = dict(zip(tors_names, tors_ranges))
if not saddle:
gra = automol.inchi.graph(ich)
ntaudof = len(automol.graph.rotational_bond_keys(gra, with_h_rotors=False))
nsamp = moldr.util.nsamp_init(nsamp_par, ntaudof)
else:
ntaudof = len(tors_names)
nsamp = moldr.util.nsamp_init(nsamp_par, ntaudof)
save_conformers(
cnf_run_fs=cnf_run_fs,
cnf_save_fs=cnf_save_fs,
saddle=saddle,
dist_info=dist_info
)
run_conformers(
zma=zma,
spc_info=spc_info,
thy_level=thy_level,
nsamp=nsamp,
tors_range_dct=tors_range_dct,
cnf_run_fs=cnf_run_fs,
cnf_save_fs=cnf_save_fs,
script_str=script_str,
overwrite=overwrite,
saddle=saddle,
two_stage=two_stage,
**kwargs,
)
save_conformers(
cnf_run_fs=cnf_run_fs,
cnf_save_fs=cnf_save_fs,
saddle=saddle,
dist_info=dist_info
)
# save information about the minimum energy conformer in top directory
min_cnf_locs = moldr.util.min_energy_conformer_locators(cnf_save_fs)
if min_cnf_locs:
geo = cnf_save_fs.leaf.file.geometry.read(min_cnf_locs)
zma = cnf_save_fs.leaf.file.zmatrix.read(min_cnf_locs)
if not saddle:
assert automol.zmatrix.almost_equal(zma, automol.geom.zmatrix(geo))
thy_save_fs.leaf.file.geometry.write(geo, thy_level[1:4])
thy_save_fs.leaf.file.zmatrix.write(zma, thy_level[1:4])
else:
thy_save_fs.trunk.file.geometry.write(geo)
thy_save_fs.trunk.file.zmatrix.write(zma)
ene = cnf_save_fs.leaf.file.energy.read(min_cnf_locs)
def run_conformers(
zma, spc_info, thy_level, nsamp, tors_range_dct,
cnf_run_fs, cnf_save_fs, script_str, overwrite, saddle, two_stage,
**kwargs):
""" run sampling algorithm to find conformers
"""
if not tors_range_dct:
print("No torsional coordinates. Setting nsamp to 1.")
nsamp = 1
print('Number of samples requested:', nsamp)
cnf_save_fs.trunk.create()
vma = automol.zmatrix.var_(zma)
if cnf_save_fs.trunk.file.vmatrix.exists():
existing_vma = cnf_save_fs.trunk.file.vmatrix.read()
assert vma == existing_vma
cnf_save_fs.trunk.file.vmatrix.write(vma)
idx = 0
nsamp0 = nsamp
inf_obj = autofile.system.info.conformer_trunk(0, tors_range_dct)
if cnf_save_fs.trunk.file.info.exists():
inf_obj_s = cnf_save_fs.trunk.file.info.read()
nsampd = inf_obj_s.nsamp
elif cnf_run_fs.trunk.file.info.exists():
inf_obj_r = cnf_run_fs.trunk.file.info.read()
nsampd = inf_obj_r.nsamp
else:
nsampd = 0
while True:
nsamp = nsamp0 - nsampd
if nsamp <= 0:
print('Reached requested number of samples. '
'Conformer search complete.')
break
else:
print(" New nsamp requested is {:d}.".format(nsamp))
samp_zma, = automol.zmatrix.samples(zma, 1, tors_range_dct)
cid = autofile.system.generate_new_conformer_id()
locs = [cid]
cnf_run_fs.leaf.create(locs)
cnf_run_path = cnf_run_fs.leaf.path(locs)
run_fs = autofile.fs.run(cnf_run_path)
idx += 1
print("Run {}/{}".format(nsampd+1, nsamp0))
tors_names = list(tors_range_dct.keys())
if two_stage and len(tors_names) > 0:
print('Stage one beginning, holding the coordinates constant', tors_names, samp_zma)
moldr.driver.run_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geom=samp_zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
frozen_coordinates=[tors_names],
saddle=saddle,
**kwargs
)
print('Stage one success, reading for stage 2')
ret = moldr.driver.read_job(job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)
if ret:
sinf_obj, inp_str, out_str = ret
prog = sinf_obj.prog
samp_zma = elstruct.reader.opt_zmatrix(prog, out_str)
print('Stage one success beginning stage two on', samp_zma)
moldr.driver.run_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geom=samp_zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
saddle=saddle,
**kwargs
)
else:
moldr.driver.run_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geom=samp_zma,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
saddle=saddle,
**kwargs
)
#moldr.driver.run_job(
# job=elstruct.Job.OPTIMIZATION,
# script_str=script_str,
# run_fs=run_fs,
# geom=samp_zma,
# spc_info=spc_info,
# thy_level=thy_level,
# overwrite=overwrite,
# **kwargs
#)
if cnf_save_fs.trunk.file.info.exists():
inf_obj_s = cnf_save_fs.trunk.file.info.read()
nsampd = inf_obj_s.nsamp
elif cnf_run_fs.trunk.file.info.exists():
inf_obj_r = cnf_run_fs.trunk.file.info.read()
nsampd = inf_obj_r.nsamp
nsampd += 1
inf_obj.nsamp = nsampd
cnf_save_fs.trunk.file.info.write(inf_obj)
cnf_run_fs.trunk.file.info.write(inf_obj)
def save_conformers(cnf_run_fs, cnf_save_fs, saddle=False, dist_info=[]):
""" save the conformers that have been found so far
"""
locs_lst = cnf_save_fs.leaf.existing()
seen_geos = [cnf_save_fs.leaf.file.geometry.read(locs)
for locs in locs_lst]
seen_enes = [cnf_save_fs.leaf.file.energy.read(locs)
for locs in locs_lst]
if not cnf_run_fs.trunk.exists():
print("No conformers to save. Skipping...")
else:
for locs in cnf_run_fs.leaf.existing():
cnf_run_path = cnf_run_fs.leaf.path(locs)
run_fs = autofile.fs.run(cnf_run_path)
print("Reading from conformer run at {}".format(cnf_run_path))
ret = moldr.driver.read_job(job=elstruct.Job.OPTIMIZATION, run_fs=run_fs)
if ret:
inf_obj, inp_str, out_str = ret
prog = inf_obj.prog
method = inf_obj.method
ene = elstruct.reader.energy(prog, method, out_str)
geo = elstruct.reader.opt_geometry(prog, out_str)
if saddle:
gra = automol.geom.weakly_connected_graph(geo)
else:
gra = automol.geom.graph(geo)
conns = automol.graph.connected_components(gra)
if len(conns) > 1:
print(" - Geometry is disconnected.. Skipping...")
else:
if saddle:
zma = elstruct.reader.opt_zmatrix(prog, out_str)
dist_name = dist_info[0]
dist_len = dist_info[1]
conf_dist_len = automol.zmatrix.values(zma)[dist_name]
print('distance test:', dist_len, conf_dist_len)
if abs(conf_dist_len - dist_len) > 0.3:
print(" - Transition State conformer has diverged from original structure of dist {:.3f} with dist {:.3f}".format(dist_len, conf_dist_len))
continue
else:
zma = automol.geom.zmatrix(geo)
unique = is_unique_tors_dist_mat_energy(geo, ene, seen_geos, seen_enes, saddle)
if not unique:
print(" - Geometry is not unique. Skipping...")
else:
vma = automol.zmatrix.var_(zma)
if cnf_save_fs.trunk.file.vmatrix.exists():
existing_vma = cnf_save_fs.trunk.file.vmatrix.read()
if vma != existing_vma:
print(" - Isomer is not the same as starting isomer. Skipping...")
else:
save_path = cnf_save_fs.leaf.path(locs)
print(" - Geometry is unique. Saving...")
print(" - Save path: {}".format(save_path))
cnf_save_fs.leaf.create(locs)
cnf_save_fs.leaf.file.geometry_info.write(
inf_obj, locs)
cnf_save_fs.leaf.file.geometry_input.write(
inp_str, locs)
cnf_save_fs.leaf.file.energy.write(ene, locs)
cnf_save_fs.leaf.file.geometry.write(geo, locs)
cnf_save_fs.leaf.file.zmatrix.write(zma, locs)
seen_geos.append(geo)
seen_enes.append(ene)
# update the conformer trajectory file
moldr.util.traj_sort(cnf_save_fs)
def run_gradient(
spc_info, thy_level, geo_run_fs, geo_save_fs, locs,
script_str, overwrite, **kwargs):
""" Determine the gradient for the geometry in the given location
"""
geo_run_path = geo_run_fs.leaf.path(locs)
geo_save_path = geo_save_fs.leaf.path(locs)
geo = geo_save_fs.leaf.file.geometry.read(locs)
run_fs = autofile.fs.run(geo_run_path)
print('Running gradient')
moldr.driver.run_job(
job='gradient',
script_str=script_str,
run_fs=run_fs,
geom=geo,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
**kwargs,
)
ret = moldr.driver.read_job(
job='gradient',
run_fs=run_fs,
)
if ret is not None:
inf_obj, inp_str, out_str = ret
if automol.geom.is_atom(geo):
freqs = ()
else:
print(" - Reading gradient from output...")
grad = elstruct.reader.gradient(inf_obj.prog, out_str)
print(" - Saving gradient...")
print(" - Save path: {}".format(geo_save_path))
geo_save_fs.leaf.file.gradient_info.write(inf_obj, locs)
geo_save_fs.leaf.file.gradient_input.write(inp_str, locs)
geo_save_fs.leaf.file.gradient.write(grad, locs)
def run_hessian(
spc_info, thy_level, geo_run_fs, geo_save_fs, locs,
script_str, overwrite, **kwargs):
""" Determine the hessian for the geometry in the given location
"""
geo_run_path = geo_run_fs.leaf.path(locs)
geo_save_path = geo_save_fs.leaf.path(locs)
geo = geo_save_fs.leaf.file.geometry.read(locs)
run_fs = autofile.fs.run(geo_run_path)
print('Running hessian')
moldr.driver.run_job(
job='hessian',
script_str=script_str,
run_fs=run_fs,
geom=geo,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
**kwargs,
)
ret = moldr.driver.read_job(
job='hessian',
run_fs=run_fs,
)
if ret is not None:
inf_obj, inp_str, out_str = ret
if automol.geom.is_atom(geo):
freqs = ()
else:
print(" - Reading hessian from output...")
hess = elstruct.reader.hessian(inf_obj.prog, out_str)
freqs = elstruct.util.harmonic_frequencies(
geo, hess, project=False)
print(" - Saving hessian...")
print(" - Save path: {}".format(geo_save_path))
geo_save_fs.leaf.file.hessian_info.write(inf_obj, locs)
geo_save_fs.leaf.file.hessian_input.write(inp_str, locs)
geo_save_fs.leaf.file.hessian.write(hess, locs)
geo_save_fs.leaf.file.harmonic_frequencies.write(freqs, locs)
def run_vpt2(
spc_info, thy_level, geo_run_fs, geo_save_fs, locs,
script_str, overwrite, **kwargs):
""" Perform vpt2 analysis for the geometry in the given location
"""
geo_run_path = geo_run_fs.leaf.path(locs)
geo_save_path = geo_save_fs.leaf.path(locs)
geo = geo_save_fs.leaf.file.geometry.read(locs)
run_fs = autofile.fs.run(geo_run_path)
print('Running vpt2')
moldr.driver.run_job(
job='vpt2',
script_str=script_str,
run_fs=run_fs,
geom=geo,
spc_info=spc_info,
thy_level=thy_level,
overwrite=overwrite,
**kwargs,
)
ret = moldr.driver.read_job(
job='vpt2',
run_fs=run_fs,
)
if ret is not None:
inf_obj, inp_str, out_str = ret
if not automol.geom.is_atom(geo):
print(" - Reading vpt2 from output...")
vpt2 = elstruct.reader.vpt2(inf_obj.prog, out_str)
print(" - Saving vpt2...")
print(" - Save path: {}".format(geo_save_path))
geo_save_fs.leaf.file.vpt2.info.write(inf_obj, locs)
geo_save_fs.leaf.file.vpt2.input.write(inp_str, locs)
geo_save_fs.leaf.file.vpt2.write(vpt2, locs)
def is_unique_coulomb_energy(geo, ene, geo_list, ene_list):
""" compare given geo with list of geos all to see if any have the same
coulomb spectrum and energy
"""
unique = True
for idx, geoi in enumerate(geo_list):
enei = ene_list[idx]
etol = 2.e-5
if abs(ene-enei) < etol:
if automol.geom.almost_equal_coulomb_spectrum(
geo, geoi, rtol=1e-2):
unique = False
return unique
def is_unique_dist_mat_energy(geo, ene, geo_list, ene_list):
""" compare given geo with list of geos all to see if any have the same
distance matrix and energy
"""
unique = True
for idx, geoi in enumerate(geo_list):
enei = ene_list[idx]
etol = 2.e-5
if abs(ene-enei) < etol:
if automol.geom.almost_equal_dist_mat(
geo, geoi, thresh=1e-1):
unique = False
return unique
def int_sym_num_from_sampling(geo, ene, cnf_save_fs, saddle=False, form_coords=[], tors_names=[]):
""" Determine the symmetry number for a given conformer geometry.
First explore the saved conformers to find the list of similar conformers -
i.e. those with a coulomb matrix and energy that are equivalent to those for the
reference geometry. Then expand each of those similar conformers by applying
rotational permutations to each of the terminal groups. Finally count how many
distinct distance matrices there are in the fully expanded conformer list.
"""
# Note, for now we are ignoring for saddle points the possibility that two configurations
# differ only in their torsional values. As a result, the symmetry factor is a lower bound on the
# true symmetry factor
if automol.geom.is_atom(geo):
int_sym_num = 1.
else:
if not saddle:
tors_names = automol.geom.zmatrix_torsion_coordinate_names(geo)
else:
print('tors_names test:', tors_names, len(tors_names))
if len(tors_names) == 0:
int_sym_num = 1.
else:
ethrsh = 1.e-5
locs_lst = cnf_save_fs.leaf.existing()
geo_sim = [geo]
geo_sim_exp = [geo]
ene_sim = [ene]
int_sym_num = 1.
if locs_lst:
enes = [cnf_save_fs.leaf.file.energy.read(locs)
for locs in locs_lst]
geos = [cnf_save_fs.leaf.file.geometry.read(locs)
for locs in locs_lst]
for geoi, enei in zip(geos, enes):
if enei - enes[0] < ethrsh:
geo_lst = [geoi]
ene_lst = [enei]
if not is_unique_coulomb_energy(geo, ene, geo_lst, ene_lst):
geo_sim.append(geoi)
ene_sim.append(enei)
int_sym_num = 0
for idx_i, geo_sim_i in enumerate(geo_sim):
new_geos = automol.geom.rot_permutated_geoms(geo_sim_i, saddle, form_coords)
new_geom = | |
return jnp.where(self.concentration <= 1, jnp.inf, a)
@property
def variance(self):
# var is inf for alpha <= 2
a = (self.rate / (self.concentration - 1)) ** 2 / (self.concentration - 2)
return jnp.where(self.concentration <= 2, jnp.inf, a)
def tree_flatten(self):
return super(TransformedDistribution, self).tree_flatten()
def cdf(self, x):
return 1 - self.base_dist.cdf(1 / x)
class Gumbel(Distribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
support = constraints.real
reparametrized_params = ["loc", "scale"]
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc, self.scale = promote_shapes(loc, scale)
batch_shape = lax.broadcast_shapes(jnp.shape(loc), jnp.shape(scale))
super(Gumbel, self).__init__(
batch_shape=batch_shape, validate_args=validate_args
)
def sample(self, key, sample_shape=()):
assert is_prng_key(key)
standard_gumbel_sample = random.gumbel(
key, shape=sample_shape + self.batch_shape + self.event_shape
)
return self.loc + self.scale * standard_gumbel_sample
@validate_sample
def log_prob(self, value):
z = (value - self.loc) / self.scale
return -(z + jnp.exp(-z)) - jnp.log(self.scale)
@property
def mean(self):
return jnp.broadcast_to(
self.loc + self.scale * EULER_MASCHERONI, self.batch_shape
)
@property
def variance(self):
return jnp.broadcast_to(jnp.pi ** 2 / 6.0 * self.scale ** 2, self.batch_shape)
def cdf(self, value):
return jnp.exp(-jnp.exp((self.loc - value) / self.scale))
def icdf(self, q):
return self.loc - self.scale * jnp.log(-jnp.log(q))
class Laplace(Distribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
support = constraints.real
reparametrized_params = ["loc", "scale"]
def __init__(self, loc=0.0, scale=1.0, validate_args=None):
self.loc, self.scale = promote_shapes(loc, scale)
batch_shape = lax.broadcast_shapes(jnp.shape(loc), jnp.shape(scale))
super(Laplace, self).__init__(
batch_shape=batch_shape, validate_args=validate_args
)
def sample(self, key, sample_shape=()):
assert is_prng_key(key)
eps = random.laplace(
key, shape=sample_shape + self.batch_shape + self.event_shape
)
return self.loc + eps * self.scale
@validate_sample
def log_prob(self, value):
normalize_term = jnp.log(2 * self.scale)
value_scaled = jnp.abs(value - self.loc) / self.scale
return -value_scaled - normalize_term
@property
def mean(self):
return jnp.broadcast_to(self.loc, self.batch_shape)
@property
def variance(self):
return jnp.broadcast_to(2 * self.scale ** 2, self.batch_shape)
def cdf(self, value):
scaled = (value - self.loc) / self.scale
return 0.5 - 0.5 * jnp.sign(scaled) * jnp.expm1(-jnp.abs(scaled))
def icdf(self, q):
a = q - 0.5
return self.loc - self.scale * jnp.sign(a) * jnp.log1p(-2 * jnp.abs(a))
class LKJ(TransformedDistribution):
r"""
LKJ distribution for correlation matrices. The distribution is controlled by ``concentration``
parameter :math:`\eta` to make the probability of the correlation matrix :math:`M` proportional
to :math:`\det(M)^{\eta - 1}`. Because of that, when ``concentration == 1``, we have a
uniform distribution over correlation matrices.
When ``concentration > 1``, the distribution favors samples with large large determinent. This
is useful when we know a priori that the underlying variables are not correlated.
When ``concentration < 1``, the distribution favors samples with small determinent. This is
useful when we know a priori that some underlying variables are correlated.
Sample code for using LKJ in the context of multivariate normal sample::
def model(y): # y has dimension N x d
d = y.shape[1]
N = y.shape[0]
# Vector of variances for each of the d variables
theta = numpyro.sample("theta", dist.HalfCauchy(jnp.ones(d)))
concentration = jnp.ones(1) # Implies a uniform distribution over correlation matrices
corr_mat = numpyro.sample("corr_mat", dist.LKJ(d, concentration))
sigma = jnp.sqrt(theta)
# we can also use a faster formula `cov_mat = jnp.outer(theta, theta) * corr_mat`
cov_mat = jnp.matmul(jnp.matmul(jnp.diag(sigma), corr_mat), jnp.diag(sigma))
# Vector of expectations
mu = jnp.zeros(d)
with numpyro.plate("observations", N):
obs = numpyro.sample("obs", dist.MultivariateNormal(mu, covariance_matrix=cov_mat), obs=y)
return obs
:param int dimension: dimension of the matrices
:param ndarray concentration: concentration/shape parameter of the
distribution (often referred to as eta)
:param str sample_method: Either "cvine" or "onion". Both methods are proposed in [1] and
offer the same distribution over correlation matrices. But they are different in how
to generate samples. Defaults to "onion".
**References**
[1] `Generating random correlation matrices based on vines and extended onion method`,
<NAME>, <NAME>, <NAME>
"""
arg_constraints = {"concentration": constraints.positive}
reparametrized_params = ["concentration"]
support = constraints.corr_matrix
def __init__(
self, dimension, concentration=1.0, sample_method="onion", validate_args=None
):
base_dist = LKJCholesky(dimension, concentration, sample_method)
self.dimension, self.concentration = (
base_dist.dimension,
base_dist.concentration,
)
self.sample_method = sample_method
super(LKJ, self).__init__(
base_dist, CorrMatrixCholeskyTransform().inv, validate_args=validate_args
)
@property
def mean(self):
return jnp.broadcast_to(
jnp.identity(self.dimension),
self.batch_shape + (self.dimension, self.dimension),
)
def tree_flatten(self):
return (self.concentration,), (self.dimension, self.sample_method)
@classmethod
def tree_unflatten(cls, aux_data, params):
dimension, sample_method = aux_data
return cls(dimension, *params, sample_method=sample_method)
class LKJCholesky(Distribution):
r"""
LKJ distribution for lower Cholesky factors of correlation matrices. The distribution is
controlled by ``concentration`` parameter :math:`\eta` to make the probability of the
correlation matrix :math:`M` generated from a Cholesky factor propotional to
:math:`\det(M)^{\eta - 1}`. Because of that, when ``concentration == 1``, we have a
uniform distribution over Cholesky factors of correlation matrices.
When ``concentration > 1``, the distribution favors samples with large diagonal entries
(hence large determinent). This is useful when we know a priori that the underlying
variables are not correlated.
When ``concentration < 1``, the distribution favors samples with small diagonal entries
(hence small determinent). This is useful when we know a priori that some underlying
variables are correlated.
Sample code for using LKJCholesky in the context of multivariate normal sample::
def model(y): # y has dimension N x d
d = y.shape[1]
N = y.shape[0]
# Vector of variances for each of the d variables
theta = numpyro.sample("theta", dist.HalfCauchy(jnp.ones(d)))
# Lower cholesky factor of a correlation matrix
concentration = jnp.ones(1) # Implies a uniform distribution over correlation matrices
L_omega = numpyro.sample("L_omega", dist.LKJCholesky(d, concentration))
# Lower cholesky factor of the covariance matrix
sigma = jnp.sqrt(theta)
# we can also use a faster formula `L_Omega = sigma[..., None] * L_omega`
L_Omega = jnp.matmul(jnp.diag(sigma), L_omega)
# Vector of expectations
mu = jnp.zeros(d)
with numpyro.plate("observations", N):
obs = numpyro.sample("obs", dist.MultivariateNormal(mu, scale_tril=L_Omega), obs=y)
return obs
:param int dimension: dimension of the matrices
:param ndarray concentration: concentration/shape parameter of the
distribution (often referred to as eta)
:param str sample_method: Either "cvine" or "onion". Both methods are proposed in [1] and
offer the same distribution over correlation matrices. But they are different in how
to generate samples. Defaults to "onion".
**References**
[1] `Generating random correlation matrices based on vines and extended onion method`,
<NAME>, <NAME>, <NAME>
"""
arg_constraints = {"concentration": constraints.positive}
reparametrized_params = ["concentration"]
support = constraints.corr_cholesky
def __init__(
self, dimension, concentration=1.0, sample_method="onion", validate_args=None
):
if dimension < 2:
raise ValueError("Dimension must be greater than or equal to 2.")
self.dimension = dimension
self.concentration = concentration
batch_shape = jnp.shape(concentration)
event_shape = (dimension, dimension)
# We construct base distributions to generate samples for each method.
# The purpose of this base distribution is to generate a distribution for
# correlation matrices which is propotional to `det(M)^{\eta - 1}`.
# (note that this is not a unique way to define base distribution)
# Both of the following methods have marginal distribution of each off-diagonal
# element of sampled correlation matrices is Beta(eta + (D-2) / 2, eta + (D-2) / 2)
# (up to a linear transform: x -> 2x - 1)
Dm1 = self.dimension - 1
marginal_concentration = concentration + 0.5 * (self.dimension - 2)
offset = 0.5 * jnp.arange(Dm1)
if sample_method == "onion":
# The following construction follows from the algorithm in Section 3.2 of [1]:
# NB: in [1], the method for case k > 1 can also work for the case k = 1.
beta_concentration0 = (
jnp.expand_dims(marginal_concentration, axis=-1) - offset
)
beta_concentration1 = offset + 0.5
self._beta = Beta(beta_concentration1, beta_concentration0)
elif sample_method == "cvine":
# The following construction follows from the algorithm in Section 2.4 of [1]:
# offset_tril is [0, 1, 1, 2, 2, 2,...] / 2
offset_tril = matrix_to_tril_vec(jnp.broadcast_to(offset, (Dm1, Dm1)))
beta_concentration = (
jnp.expand_dims(marginal_concentration, axis=-1) - offset_tril
)
self._beta = Beta(beta_concentration, beta_concentration)
else:
raise ValueError("`method` should be one of 'cvine' or 'onion'.")
self.sample_method = sample_method
super(LKJCholesky, self).__init__(
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
)
def _cvine(self, key, size):
# C-vine method first uses beta_dist to generate partial correlations,
# then apply signed stick breaking to transform to cholesky factor.
# Here is an attempt to prove that using signed stick breaking to
# generate correlation matrices is the same as the C-vine method in [1]
# for the entry r_32.
#
# With notations follow from [1], we define
# p: partial correlation matrix,
# c: cholesky factor,
# r: correlation matrix.
# From recursive formula (2) in [1], we have
# r_32 | |
import re
from datetime import datetime
from aiohttp import web
from sqlalchemy.sql import func, or_
from sqlalchemy.orm import aliased
from ..app import app, logger
from ..model.build import Build, BUILD_STATES, DATETIME_FORMAT
from ..model.sourcerepository import SourceRepository
from ..model.project import Project
from ..model.projectversion import ProjectVersion
from ..model.maintainer import Maintainer
from ..tools import paginate, ErrorResponse
from ..molior.queues import enqueue_task
@app.http_get("/api/builds")
async def get_builds(request):
"""
Returns a list of builds.
---
description: Returns a list of builds.
tags:
- Builds
parameters:
- name: search
in: query
required: false
type: string
- name: page
in: query
required: false
type: integer
- name: page_size
in: query
required: false
type: integer
- name: from
in: query
required: false
type: datetime
- name: to
in: query
required: false
type: datetime
- name: currently_failing
in: query
required: false
type: boolean
- name: count_only
in: query
required: false
type: boolean
- name: project_version_id
in: query
required: false
type: integer
- name: sourcerepository_id
in: query
required: false
type: integer
- name: project_id
in: query
required: false
type: integer
- name: architecture
in: query
required: false
type: string
- name: distrelease
in: query
required: false
type: string
- name: buildvariant
in: query
required: false
type: string
- name: buildvariant_id
in: query
required: false
type: integer
- name: sourcerepository
in: query
required: false
type: string
- name: buildstate
in: query
required: false
type: array
- name: startstamp
in: query
required: false
type: string
- name: version
in: query
required: false
type: string
- name: maintainer
in: query
required: false
type: string
- name: project
in: query
required: false
type: string
produces:
- text/json
"""
search = request.GET.getone("search", None)
search_project = request.GET.getone("search_project", None)
project = request.GET.getone("project", None)
maintainer = request.GET.getone("maintainer", None)
commit = request.GET.getone("commit", None)
# FIXME:
# buildvariant = request.GET.getone("buildvariant", None)
# buildvariant_id = request.GET.getone("buildvariant_id", None)
architecture = request.GET.getone("architecture", None)
distrelease = request.GET.getone("distrelease", None)
version = request.GET.getone("version", None)
sourcerepository_name = request.GET.getone("sourcerepository", None)
startstamp = request.GET.getone("startstamp", None)
buildstates = request.GET.getall("buildstate", [])
try:
project_version_id = int(request.GET.getone("project_version_id"))
except (ValueError, KeyError):
project_version_id = None
# try:
# buildvariant_id = int(request.GET.getone("buildvariant_id"))
# except (ValueError, KeyError):
# buildvariant_id = None
try:
project_id = int(request.GET.getone("project_id"))
except (ValueError, KeyError):
project_id = None
try:
from_date = datetime.strptime(request.GET.getone("from"), "%Y-%m-%d %H:%M:%S")
except (ValueError, KeyError):
from_date = None
try:
to_date = datetime.strptime(request.GET.getone("to"), "%Y-%m-%d %H:%M:%S")
except (ValueError, KeyError):
to_date = None
try:
count_only = request.GET.getone("count_only").lower() == "true"
except (ValueError, KeyError):
count_only = False
try:
sourcerepository_id = int(request.GET.getone("sourcerepository_id"))
except (ValueError, KeyError):
sourcerepository_id = None
db = request.cirrina.db_session
builds = db.query(Build).outerjoin(Build.maintainer)
if sourcerepository_id:
builds = builds.filter(Build.sourcerepository_id == sourcerepository_id)
if project_id:
builds = builds.filter(Build.projectversion.project.id == project_id)
if project_version_id:
builds = builds.filter(Build.projectversion.id == project_version_id)
if from_date:
builds = builds.filter(Build.startstamp > from_date)
if to_date:
builds = builds.filter(Build.startstamp < to_date)
if distrelease:
builds = builds.filter(Project.name.ilike("%{}%".format(distrelease)))
# if buildvariant:
# buildvariant_ids = [
# b.id
# for b in (
# request.cirrina.db_session.query(BuildVariant.id)
# .join(ProjectVersion)
# .join(Project)
# .join(Architecture)
# .filter(
# BuildVariant.name.like("%{}%".format(buildvariant))
# )
# .distinct()
# )
# ]
# builds = builds.filter(BuildVariant.id.in_(buildvariant_ids))
#
# if buildvariant_id:
# builds = builds.filter(BuildVariant.id == buildvariant_id)
builds = builds.filter(Build.is_deleted.is_(False))
if search:
terms = re.split("[/ ]", search)
for term in terms:
if not term:
continue
builds = builds.filter(or_(
Build.sourcename.ilike("%{}%".format(term)),
Build.version.ilike("%{}%".format(term)),
Build.architecture.ilike("%{}%".format(term)),
))
if search_project:
builds = builds.join(ProjectVersion).join(Project)
terms = re.split("[/ ]", search_project)
for term in terms:
if not term:
continue
builds = builds.filter(Project.is_mirror.is_(False), or_(
ProjectVersion.name.ilike("%{}%".format(term)),
Project.name.ilike("%{}%".format(term)),
))
projectversion = None
if project:
if "/" not in project:
return ErrorResponse(400, "Project not found")
project_name, project_version = project.split("/", 1)
projectversion = db.query(ProjectVersion).join(Project).filter(
Project.is_mirror.is_(False),
func.lower(Project.name) == project_name.lower(),
func.lower(ProjectVersion.name) == project_version.lower(),
).first()
if projectversion:
builds = builds.join(ProjectVersion).filter(ProjectVersion.id == projectversion.id)
# do not shot snapshot builds, except for snapshot projects
if not projectversion or projectversion.projectversiontype != "snapshot":
builds = builds.filter(Build.snapshotbuild_id.is_(None))
# FIXME:
if version:
builds = builds.filter(Build.version.like("%{}%".format(version)))
if maintainer:
builds = builds.filter(Maintainer.fullname.ilike("%{}%".format(maintainer)))
if commit:
builds = builds.filter(Build.git_ref.like("%{}%".format(commit)))
if architecture:
builds = builds.filter(Build.architecture.like("%{}%".format(architecture)))
if sourcerepository_name:
builds = builds.filter(or_(Build.sourcename.like("%{}%.format(sourcerepository_name)"),
Build.sourcerepository.url.like("%/%{}%.git".format(sourcerepository_name))))
if startstamp:
builds = builds.filter(func.to_char(Build.startstamp, "YYYY-MM-DD HH24:MI:SS").contains(startstamp))
if buildstates and set(buildstates).issubset(set(BUILD_STATES)):
builds = builds.filter(or_(*[Build.buildstate == buildstate for buildstate in buildstates]))
if search or search_project or project:
# make sure parents and grandparents are invited
child_cte = builds.cte(name='childs')
parentbuilds = request.cirrina.db_session.query(Build).filter(Build.id == child_cte.c.parent_id)
parent_cte = parentbuilds.cte(name='parents')
grandparentbuilds = request.cirrina.db_session.query(Build).filter(Build.id == parent_cte.c.parent_id)
builds = builds.union(parentbuilds, grandparentbuilds)
nb_builds = builds.count()
# sort hierarchically
# select id, parent_id, sourcename, buildtype, (select b2.parent_id from build b2
# where b2.id = b. parent_id) as grandparent_id, coalesce(parent_id, id, 7) from
# build b order by coalesce((select b2.parent_id from build b2 where b2.id = b.
# parent_id), b.parent_id, b.id)desc , b.id;
parent = aliased(Build)
builds = builds.outerjoin(parent, parent.id == Build.parent_id)
builds = builds.order_by(func.coalesce(parent.parent_id, Build.parent_id, Build.id).desc(), Build.id)
builds = paginate(request, builds)
data = {"total_result_count": nb_builds, "results": []}
if not count_only:
for build in builds:
data["results"].append(build.data())
return web.json_response(data)
@app.http_get("/api2/build/{build_id:\\d+}")
@app.http_get("/api/builds/{build_id:\\d+}")
@app.authenticated
async def get_build(request):
"""
Returns a build.
---
description: Returns a build.
tags:
- Builds
consumes:
- application/x-www-form-urlencoded
parameters:
- name: build_id
in: path
required: true
type: integer
produces:
- text/json
responses:
"200":
description: successful
"500":
description: internal server error
"""
build_id = request.match_info["build_id"]
try:
build_id = int(build_id)
except (ValueError, TypeError):
return web.Response(text="Incorrect value for build_id", status=400)
build = request.cirrina.db_session.query(Build).filter(Build.id == build_id).first()
if not build:
return web.Response(text="Build not found", status=400)
maintainer = str()
if build.maintainer:
maintainer = "{} {}".format(
build.maintainer.firstname, build.maintainer.surname
)
project = {}
if build.projectversion:
project = {"id": build.projectversion.project.id,
"name": build.projectversion.project.name,
"is_mirror": build.projectversion.project.is_mirror,
"version": {"id": build.projectversion.id,
"name": build.projectversion.name,
"is_locked": build.projectversion.is_locked}}
data = {
"id": build.id,
"buildstate": build.buildstate,
"buildtype": build.buildtype,
"startstamp": build.startstamp.strftime(DATETIME_FORMAT) if build.startstamp else "",
"endstamp": build.endstamp.strftime(DATETIME_FORMAT) if build.endstamp else "",
"version": build.version,
"maintainer": maintainer,
"sourcename": build.sourcename,
# "can_rebuild": build.can_rebuild(request.cirrina.web_session, request.cirrina.db_session),
"branch": build.ci_branch,
"git_ref": build.git_ref,
"architecture": build.architecture,
"project": project
}
if build.sourcerepository:
data.update(
{
"sourcerepository": {
"name": build.sourcerepository.name,
"url": build.sourcerepository.url,
"id": build.sourcerepository.id,
}
}
)
if build.projectversion:
basemirror_name = ""
basemirror_version = ""
buildvariant = ""
arch = ""
if build.projectversion.basemirror:
basemirror_name = build.projectversion.basemirror.project.name
basemirror_version = build.projectversion.basemirror.name
if build.architecture:
arch = build.architecture
buildvariant = basemirror_name + "-" + basemirror_version + "/" + arch
data.update(
{
"buildvariant": {
"architecture": {
"name": build.architecture,
},
"base_mirror": {
"name": basemirror_name,
"version": basemirror_version
},
"name": buildvariant
}
}
)
return web.json_response(data)
@app.http_put("/api2/build/{build_id}")
@app.http_put("/api/builds/{build_id}")
@app.authenticated
# FIXME: req_role
async def rebuild_build(request):
"""
Rebuild a failed build
---
description: Delete a build from database.
tags:
- Builds
consumes:
- application/x-www-form-urlencoded
parameters:
- name: build_id
in: path
required: true
type: integer
produces:
- text/json
responses:
"200":
description: successful
"500":
description: internal server error
"""
build_id = request.match_info["build_id"]
try:
build_id = int(build_id)
except (ValueError, TypeError):
return web.Response(text="Incorrect value for build_id", status=400)
logger.info("rebuilding build %d" % build_id)
build = request.cirrina.db_session.query(Build).filter(Build.id == build_id).first()
if not build:
logger.error("build %d not found" % build_id)
return web.Response(text="Build not found", status=400)
if not build.can_rebuild(request.cirrina.web_session, request.cirrina.db_session):
logger.error("build %d cannot be rebuilt" % build_id)
return web.Response(text="This build cannot be rebuilt", status=400)
args = {"rebuild": [build_id]}
await enqueue_task(args)
return web.json_response("Rebuild triggered")
@app.http_post("/api/build")
# @app.authenticated
async def trigger_build(request):
"""
Triggers a build.
---
description: Triggers a build
tags:
- TriggerBuild
consumes:
- application/x-www-form-urlencoded
parameters:
- name: repository
in: body
required: true
type: string
- name: git_ref
in: body
required: false
type: string
- name: git_branch
in: body
required: false
type: string
produces:
- text/json
responses:
"200":
description: successful
"500":
description: internal server error
"""
data = await request.json()
repository = data.get("repository")
git_ref = data.get("git_ref")
git_branch = data.get("git_branch")
targets = data.get("targets")
force_ci = data.get("force_ci")
maintenance_mode = False
query = "SELECT value from metadata where name = :key"
result = request.cirrina.db_session.execute(query, {"key": "maintenance_mode"})
for value in result:
if value[0] == "true":
maintenance_mode = True
break
if maintenance_mode:
return web.Response(status=503, text="Maintenance Mode")
if not repository:
return web.Response(text="Bad Request", status=400)
repo = request.cirrina.db_session.query(SourceRepository).filter(SourceRepository.url == repository).first()
if not repo:
return web.Response(text="Repo not found", status=400)
repo.log_state("build triggered: %s(%s) force_ci=%s, targets=%s" % (git_ref, git_branch, force_ci, str(targets)))
build = Build(
version=None,
git_ref=git_ref,
ci_branch=git_branch,
is_ci=None,
sourcename=repo.name,
buildstate="new",
buildtype="build",
sourcerepository=repo,
maintainer=None,
)
request.cirrina.db_session.add(build)
request.cirrina.db_session.commit()
await build.build_added()
if git_ref == "":
args = {"buildlatest": [repo.id, build.id]}
else:
args = {"build": [build.id, repo.id, git_ref, git_branch, targets, force_ci]}
await enqueue_task(args)
return web.json_response({"build_id": str(build.id)})
@app.http_get("/api/build/{build_id}")
async def get_build_info(request):
"""
Gets build task info.
---
description: Returns a list of builds.
tags:
- Builds
consumes:
- application/x-www-form-urlencoded
parameters:
- name: build_id
in: query
required: true
type: string
produces:
- text/json
responses:
"200":
description: successful
"500":
| |
in this segment, in the order [11, 3, 6, 4, 9].
The segment can be read has having length 4 because four consecutive
pitch classes, [3, 6, 4, 9], can be read from this sequence
in such a way that the first pitch class of this subsequence is part of the
first chord in the segment, and the last pitch class
is that of the last note of the segment. More generally, in this setting the
found segments are those which contain at least 4
distinct pitch classes, but the top note of the first chord (or note), the
bottom note of the last chord (or note),
and all pitches of all notes and chords other than the first and last
contain at most 4 distinct pitch classes.
OMIT_FROM_DOCS
>>> import copy
>>> sc = stream.Score(id='outerScore')
>>> p = stream.Part(id='toBeCloned')
>>> n1 = note.Note('f4')
>>> n2 = note.Note('e4')
>>> c1 = chord.Chord(['c5', 'd5'])
>>> c2 = chord.Chord(['c4', 'd4'])
>>> p.append([n1, n2, c1, c2])
>>> p = p.makeMeasures()
>>> p1 = copy.deepcopy(p)
>>> p1.id = 'clone1'
>>> sc.insert(0.0, p1)
>>> p2 = copy.deepcopy(p)
>>> p2.id = 'clone2'
>>> sc.insert(0.0, p2)
>>> searcherNew = search.serial.ContiguousSegmentSearcher(sc, 'ignoreAll')
>>> allSegs = searcherNew.byLength(3)
>>> [seg.segment for seg in allSegs]
[[<music21.note.Note F>, <music21.note.Note E>, <music21.chord.Chord C5 D5>],
[<music21.note.Note E>, <music21.chord.Chord C5 D5>],
[<music21.note.Note E>, <music21.chord.Chord C5 D5>, <music21.chord.Chord C4 D4>],
[<music21.note.Note F>, <music21.note.Note E>, <music21.chord.Chord C5 D5>],
[<music21.note.Note E>, <music21.chord.Chord C5 D5>],
[<music21.note.Note E>, <music21.chord.Chord C5 D5>, <music21.chord.Chord C4 D4>]]
'''
def __init__(self, inputStream=None, reps='skipConsecutive', includeChords=True):
self.stream = inputStream
self.reps = reps
self.includeChords = includeChords
self.searchLength = 1
self.currentNote = None
self.partNumber = None
self.chordList = [] # contains Chord or Note objects
self.activeChordList = [] # can also be Note objects.
self.totalLength = 0
self.listOfContiguousSegments = []
# for ignoreAll, this will reduce the number
# of possibilities much faster if True
self.trimToShortestLengthFast = False
def getSearchBoundMethod(self):
'''
Return a search method based on the setting of reps (how to classify repetitions),
and the includeChord setting.
'''
reps = self.reps
if self.includeChords is False:
if reps == 'skipConsecutive':
return self.searchSkipConsecutiveExclude
elif reps == 'rowsOnly':
return self.searchRowsOnlyExclude
elif reps == 'includeAll':
return self.searchIncludeAllExclude
elif reps == 'ignoreAll':
return self.searchIgnoreAllExclude
else:
if reps == 'skipConsecutive':
return self.searchSkipConsecutiveInclude
elif reps == 'rowsOnly':
return self.searchRowsOnlyInclude
elif reps == 'includeAll':
return self.searchIncludeAllInclude
elif reps == 'ignoreAll':
return self.searchIgnoreAllInclude
def byLength(self, length):
'''
Run the current setting for reps and includeChords to find all segments
of length `length`.
'''
self.searchLength = length
self.listOfContiguousSegments = []
hasParts = True
partList = self.stream.recurse().getElementsByClass('Part')
if not partList:
partList = [self.stream]
hasParts = False
searchMethod = self.getSearchBoundMethod()
self.listOfContiguousSegments = []
for partNumber, partObj in enumerate(partList):
if hasParts is False:
partNumber = None #
self.chordList = []
self.totalLength = 0 # counts each pitch within a chord once
for n in partObj.recurse().notes:
if n.tie is not None and n.tie.type != 'start':
continue
searchMethod(n, partNumber)
return self.listOfContiguousSegments
def addActiveChords(self, partNumber):
csn = ContiguousSegmentOfNotes(self.activeChordList,
self.stream,
partNumber)
self.listOfContiguousSegments.append(csn)
return csn
def searchIncludeAllExclude(self, n, partNumber):
if len(n.pitches) > 1:
self.chordList = []
return False
chordList = self.chordList
chordList.append(n)
self.totalLength = self.totalLength + len(n.pitches)
if len(chordList) == self.searchLength + 1:
chordList.pop(0)
if len(chordList) == self.searchLength:
self.activeChordList = chordList[:]
self.addActiveChords(partNumber)
return True
return False
def searchIncludeAllInclude(self, n, partNumber):
'''
Returns the number added.
'''
numCSNAdded = 0
chordList = self.chordList
chordList.append(n)
self.totalLength = self.totalLength + len(n.pitches)
lengthOfActive = self.totalLength
numChordsToDelete = 0
for i in range(len(chordList)):
activeChordList = chordList[i:]
firstChordNumPitches = len(activeChordList[0].pitches)
lastChordNumPitches = len(activeChordList[-1].pitches)
if i:
lengthOfActive -= len(chordList[i - 1].pitches)
numPitchesMinusFirstLast = lengthOfActive - (firstChordNumPitches + lastChordNumPitches)
if (lengthOfActive >= self.searchLength
and numPitchesMinusFirstLast <= self.searchLength - 2):
self.activeChordList = activeChordList
self.addActiveChords(partNumber)
numCSNAdded += 1
elif (lengthOfActive >= self.searchLength):
numChordsToDelete += 1
else:
break
for unused_counter in range(numChordsToDelete):
removedChord = chordList.pop(0)
self.totalLength -= len(removedChord.pitches)
return numCSNAdded
def searchSkipConsecutiveExclude(self, n, partNumber):
chordList = self.chordList
if chordList and chordList[-1].pitches == n.pitches:
return False
return self.searchIncludeAllExclude(n, partNumber)
def searchSkipConsecutiveInclude(self, n, partNumber):
chordList = self.chordList
if chordList and chordList[-1].pitches == n.pitches:
return False
return self.searchIncludeAllInclude(n, partNumber)
def searchIgnoreAllExclude(self, n, partNumber):
if len(n.pitches) > 1:
self.chordList = []
return False
numCSNAdded = 0
numChordsToDelete = 0
chordList = self.chordList
chordList.append(n)
for i in range(len(chordList)):
activeChordList = chordList[i:]
activePitches = []
for thisChord in activeChordList:
activePitches.extend(thisChord.pitches[:])
uniqueActivePitchClasses = set([p.pitchClass for p in activePitches])
numUniqueActivePitchClasses = len(uniqueActivePitchClasses)
if numUniqueActivePitchClasses == self.searchLength:
self.activeChordList = activeChordList
self.addActiveChords(partNumber)
if self.trimToShortestLengthFast:
numChordsToDelete += 1
numCSNAdded += 1
elif numUniqueActivePitchClasses > self.searchLength:
numChordsToDelete += 1
for unused_counter in range(numChordsToDelete):
removedChord = chordList.pop(0)
self.totalLength -= len(removedChord.pitches)
return numCSNAdded
def searchIgnoreAllInclude(self, n, partNumber):
numCSNAdded = 0
numChordsToDelete = 0
chordList = self.chordList
chordList.append(n)
for i in range(len(chordList)):
self.activeChordList = activeChordList = chordList[i:]
csn = self.addActiveChords(partNumber)
rowSuperset = set(csn.readPitchClassesFromBottom())
if len(rowSuperset) >= self.searchLength:
middleSegment = ContiguousSegmentOfNotes(activeChordList[1:-1], None, None)
middlePitchClassSet = set(middleSegment.readPitchClassesFromBottom())
setToCheck = middlePitchClassSet.union([activeChordList[0].pitches[-1].pitchClass,
activeChordList[-1].pitches[0].pitchClass])
if (len(setToCheck)) > self.searchLength:
self.listOfContiguousSegments.pop()
numChordsToDelete += 1
elif self.trimToShortestLengthFast:
numChordsToDelete += 1
else:
self.listOfContiguousSegments.pop()
break
for unused_counter in range(numChordsToDelete):
removedChord = chordList.pop(0)
self.totalLength -= len(removedChord.pitches)
return numCSNAdded
def searchRowsOnlyExclude(self, n, partNumber):
if len(n.pitches) > 1:
self.chordList = []
return False
chordList = self.chordList
if len(chordList) == self.searchLength:
chordList.pop(0)
if n.pitch.pitchClass not in [oldN.pitch.pitchClass for oldN in chordList]:
chordList.append(n)
else:
self.chordList = chordList = [n]
# all unique....
if len(chordList) == self.searchLength:
self.activeChordList = chordList[:]
self.addActiveChords(partNumber)
def searchRowsOnlyInclude(self, n, partNumber):
chordList = self.chordList
chordList.append(n)
self.totalLength += len(n.pitches)
lengthOfActive = self.totalLength
numChordsToDelete = 0
for i in range(len(chordList)):
activeChordList = chordList[i:]
firstChordNumPitches = len(activeChordList[0].pitches)
lastChordNumPitches = len(activeChordList[-1].pitches)
if i:
lengthOfActive -= len(chordList[i - 1].pitches)
numPitchesMinusFirstLast = lengthOfActive - (firstChordNumPitches + lastChordNumPitches)
if (lengthOfActive >= self.searchLength
and numPitchesMinusFirstLast <= self.searchLength - 2):
self.activeChordList = activeChordList
csn = self.addActiveChords(partNumber)
rowSuperset = csn.readPitchClassesFromBottom()
lowerBound = max([0,
len(rowSuperset)
- self.searchLength
- len(self.activeChordList[-1].pitches)
+ 1])
upperBound = min([len(self.activeChordList[0].pitches),
len(rowSuperset) - self.searchLength + 1])
for j in range(lowerBound, upperBound):
if len(set(rowSuperset[j:j + self.searchLength])) == self.searchLength:
break
else:
# was not a match, should not have been added,
# thus remove from listOfContiguousSegments
self.listOfContiguousSegments.pop()
elif (lengthOfActive >= self.searchLength):
numChordsToDelete += 1
else:
break
for unused_counter in range(numChordsToDelete):
removedChord = chordList.pop(0)
self.totalLength -= len(removedChord.pitches)
class SegmentMatcher:
'''
Matches all the ContiguousSegmentsOfNotes (found by ContiguousSegmentSearcher)
within a :class:`~music21.stream.Stream`
to one or more segments of pitch classes.
The inputStream is a :class:`~music21.stream.Stream`; as
in :class:`~music21.search.serial.ContiguousSegmentSearcher`,
the inputStream can contain at most one :class:`~music21.stream.Score` and
its notes must be contained in measures. The searchList is a list of contiguous segments to
be searched for, each segment being given as a list of pitch classes.
The reps and includeChords settings specify how
repeated pitches and chords, respectively, are handled; the
possible settings are the same as those in
:class:`~music21.search.serial.ContiguousSegmentSearcher`.
Returns a list of :class:`~music21.search.serial.ContiguousSegmentOfNotes`
objects for which the
:attr:`~music21.search.serial.ContiguousSegmentOfNotes.activeSegment`
matches at least one of the elements of the searchList,
subject to the settings specified in reps and includeChords.
>>> sc = stream.Score()
>>> part = stream.Part()
>>> sig = meter.TimeSignature('2/4')
>>> part.append(sig)
>>> n1 = note.Note('e4')
>>> n1.quarterLength = 6
>>> part.append(n1)
>>> n2 = note.Note('f4')
>>> n2.quarterLength = 1
>>> part.append(n2)
>>> n3 = chord.Chord(['g4', 'b4'])
>>> n3.quarterLength = 1
>>> part.append(n3)
>>> n4 = note.Note('g4')
>>> n4.quarterLength = 1
>>> part.repeatAppend(n4, 2)
>>> n5 = note.Note('a4')
>>> n5.quarterLength = 3
>>> part.repeatAppend(n5, 2)
>>> n6 = note.Note('b4')
>>> n6.quarterLength = 1
>>> part.append(n6)
>>> n7 = note.Note('c5')
>>> n7.quarterLength = 1
>>> part.append(n7)
>>> newpart = part.makeMeasures()
>>> newpart.makeTies(inPlace=True)
>>> #_DOCS_SHOW newpart.show()
.. image:: images/serial-findSegments.png
:width: 500
>>> sc.insert(0, newpart)
>>> matcher = search.serial.SegmentMatcher(sc, includeChords=False)
>>> GABandABC = matcher.find([[7, 9, 11], [9, 11, 0]])
>>> print(GABandABC)
[<music21.search.serial.ContiguousSegmentOfNotes ['G4', 'A4', 'B4']>,
<music21.search.serial.ContiguousSegmentOfNotes ['A4', 'B4', 'C5']>]
>>> GABandABC[0].segment, GABandABC[1].segment
([<music21.note.Note G>, <music21.note.Note A>, <music21.note.Note B>],
[<music21.note.Note A>, <music21.note.Note B>, <music21.note.Note C>])
>>> GABandABC[0].startMeasureNumber, GABandABC[1].startMeasureNumber
(5, 6)
In case it is not clear, we can use
the :attr:`~music21.search.serial.ContiguousSegmentsOfNotes.matchedSegment` property
to determine, to which element of the | |
"""
The ID of the function associated with the trigger.
"""
return pulumi.get(self, "function_id")
@function_id.setter
def function_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the trigger.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _EventTriggerState:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
config_collection: Optional[pulumi.Input[str]] = None,
config_database: Optional[pulumi.Input[str]] = None,
config_full_document: Optional[pulumi.Input[bool]] = None,
config_full_document_before: Optional[pulumi.Input[bool]] = None,
config_match: Optional[pulumi.Input[str]] = None,
config_operation_type: Optional[pulumi.Input[str]] = None,
config_operation_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
config_project: Optional[pulumi.Input[str]] = None,
config_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
config_schedule: Optional[pulumi.Input[str]] = None,
config_schedule_type: Optional[pulumi.Input[str]] = None,
config_service_id: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
event_processors: Optional[pulumi.Input['EventTriggerEventProcessorsArgs']] = None,
function_id: Optional[pulumi.Input[str]] = None,
function_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
trigger_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EventTrigger resources.
:param pulumi.Input[str] app_id: The ObjectID of your application.
:param pulumi.Input[str] config_collection: Required for `DATABASE` type. The name of the MongoDB collection that the trigger watches for change events. The collection must be part of the specified database.
:param pulumi.Input[str] config_database: Required for `DATABASE` type. The name of the MongoDB database that contains the watched collection.
:param pulumi.Input[bool] config_full_document: Optional for `DATABASE` type. If true, indicates that `UPDATE` change events should include the most current [majority-committed](https://docs.mongodb.com/manual/reference/read-concern-majority/) version of the modified document in the fullDocument field.
:param pulumi.Input[str] config_match: Optional for `DATABASE` type. A [$match](https://docs.mongodb.com/manual/reference/operator/aggregation/match/) expression document that MongoDB Realm includes in the underlying change stream pipeline for the trigger. This is useful when you want to filter change events beyond their operation type. The trigger will only fire if the expression evaluates to true for a given change event.
:param pulumi.Input[str] config_operation_type: Required for `AUTHENTICATION` type. The [authentication operation type](https://docs.mongodb.com/realm/triggers/authentication-triggers/#std-label-authentication-event-operation-types) to listen for. Possible Values: `LOGIN`, `CREATE`, `DELETE`
:param pulumi.Input[Sequence[pulumi.Input[str]]] config_operation_types: Required for `DATABASE` type. The [database event operation types](https://docs.mongodb.com/realm/triggers/database-triggers/#std-label-database-events) to listen for. This must contain at least one value. Possible Values: `INSERT`, `UPDATE`, `REPLACE`, `DELETE`
:param pulumi.Input[str] config_project: Optional for `DATABASE` type. A [$project](https://docs.mongodb.com/manual/reference/operator/aggregation/project/) expression document that Realm uses to filter the fields that appear in change event objects.
:param pulumi.Input[Sequence[pulumi.Input[str]]] config_providers: Required for `AUTHENTICATION` type. A list of one or more [authentication provider](https://docs.mongodb.com/realm/authentication/providers/) id values. The trigger will only listen for authentication events produced by these providers.
:param pulumi.Input[str] config_schedule: Required for `SCHEDULED` type. A [cron expression](https://docs.mongodb.com/realm/triggers/cron-expressions/) that defines the trigger schedule.
:param pulumi.Input[str] config_service_id: Required for `DATABASE` type. The ID of the MongoDB Service associated with the trigger.
:param pulumi.Input[bool] disabled: Default: `false` If `true`, the trigger is disabled.
:param pulumi.Input['EventTriggerEventProcessorsArgs'] event_processors: An object where each field name is an event processor ID and each value is an object that configures its corresponding event processor. The following event processors are supported: `AWS_EVENTBRIDGE` For an example configuration object, see [Send Trigger Events to AWS EventBridge](https://docs.mongodb.com/realm/triggers/eventbridge/#std-label-event_processor_example).
* `event_processors.0.aws_eventbridge.config_account_id` - (Optional) AWS Account ID.
* `event_processors.0.aws_eventbridge.config_region` - (Optional) Region of AWS Account.
:param pulumi.Input[str] function_id: The ID of the function associated with the trigger.
:param pulumi.Input[str] function_name: The name of the function associated with the trigger.
:param pulumi.Input[str] name: The name of the trigger.
:param pulumi.Input[str] project_id: The unique ID for the project to create the trigger.
:param pulumi.Input[str] trigger_id: The unique ID of the trigger.
:param pulumi.Input[str] type: The type of the trigger. Possible Values: `DATABASE`, `AUTHENTICATION`,`SCHEDULED`
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if config_collection is not None:
pulumi.set(__self__, "config_collection", config_collection)
if config_database is not None:
pulumi.set(__self__, "config_database", config_database)
if config_full_document is not None:
pulumi.set(__self__, "config_full_document", config_full_document)
if config_full_document_before is not None:
pulumi.set(__self__, "config_full_document_before", config_full_document_before)
if config_match is not None:
pulumi.set(__self__, "config_match", config_match)
if config_operation_type is not None:
pulumi.set(__self__, "config_operation_type", config_operation_type)
if config_operation_types is not None:
pulumi.set(__self__, "config_operation_types", config_operation_types)
if config_project is not None:
pulumi.set(__self__, "config_project", config_project)
if config_providers is not None:
pulumi.set(__self__, "config_providers", config_providers)
if config_schedule is not None:
pulumi.set(__self__, "config_schedule", config_schedule)
if config_schedule_type is not None:
pulumi.set(__self__, "config_schedule_type", config_schedule_type)
if config_service_id is not None:
pulumi.set(__self__, "config_service_id", config_service_id)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if event_processors is not None:
pulumi.set(__self__, "event_processors", event_processors)
if function_id is not None:
pulumi.set(__self__, "function_id", function_id)
if function_name is not None:
pulumi.set(__self__, "function_name", function_name)
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if trigger_id is not None:
pulumi.set(__self__, "trigger_id", trigger_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
The ObjectID of your application.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="configCollection")
def config_collection(self) -> Optional[pulumi.Input[str]]:
"""
Required for `DATABASE` type. The name of the MongoDB collection that the trigger watches for change events. The collection must be part of the specified database.
"""
return pulumi.get(self, "config_collection")
@config_collection.setter
def config_collection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_collection", value)
@property
@pulumi.getter(name="configDatabase")
def config_database(self) -> Optional[pulumi.Input[str]]:
"""
Required for `DATABASE` type. The name of the MongoDB database that contains the watched collection.
"""
return pulumi.get(self, "config_database")
@config_database.setter
def config_database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_database", value)
@property
@pulumi.getter(name="configFullDocument")
def config_full_document(self) -> Optional[pulumi.Input[bool]]:
"""
Optional for `DATABASE` type. If true, indicates that `UPDATE` change events should include the most current [majority-committed](https://docs.mongodb.com/manual/reference/read-concern-majority/) version of the modified document in the fullDocument field.
"""
return pulumi.get(self, "config_full_document")
@config_full_document.setter
def config_full_document(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "config_full_document", value)
@property
@pulumi.getter(name="configFullDocumentBefore")
def config_full_document_before(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "config_full_document_before")
@config_full_document_before.setter
def config_full_document_before(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "config_full_document_before", value)
@property
@pulumi.getter(name="configMatch")
def config_match(self) -> Optional[pulumi.Input[str]]:
"""
Optional for `DATABASE` type. A [$match](https://docs.mongodb.com/manual/reference/operator/aggregation/match/) expression document that MongoDB Realm includes in the underlying change stream pipeline for the trigger. This is useful when you want to filter change events beyond their operation type. The trigger will only fire if the expression evaluates to true for a given change event.
"""
return pulumi.get(self, "config_match")
@config_match.setter
def config_match(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_match", value)
@property
@pulumi.getter(name="configOperationType")
def config_operation_type(self) -> Optional[pulumi.Input[str]]:
"""
Required for `AUTHENTICATION` type. The [authentication operation type](https://docs.mongodb.com/realm/triggers/authentication-triggers/#std-label-authentication-event-operation-types) to listen for. Possible Values: `LOGIN`, `CREATE`, `DELETE`
"""
return pulumi.get(self, "config_operation_type")
@config_operation_type.setter
def config_operation_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_operation_type", value)
@property
@pulumi.getter(name="configOperationTypes")
def config_operation_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Required for `DATABASE` type. The [database event operation types](https://docs.mongodb.com/realm/triggers/database-triggers/#std-label-database-events) to listen for. This must contain at least one value. Possible Values: `INSERT`, `UPDATE`, `REPLACE`, `DELETE`
"""
return pulumi.get(self, "config_operation_types")
@config_operation_types.setter
def config_operation_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "config_operation_types", value)
@property
@pulumi.getter(name="configProject")
def config_project(self) -> Optional[pulumi.Input[str]]:
"""
Optional for `DATABASE` type. A [$project](https://docs.mongodb.com/manual/reference/operator/aggregation/project/) expression document that Realm uses to filter the fields that appear in change event objects.
"""
return pulumi.get(self, "config_project")
@config_project.setter
def config_project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_project", value)
@property
@pulumi.getter(name="configProviders")
def config_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Required for `AUTHENTICATION` type. A list of one or more [authentication provider](https://docs.mongodb.com/realm/authentication/providers/) id values. The trigger will only listen for authentication events produced by these providers.
"""
return pulumi.get(self, "config_providers")
@config_providers.setter
def config_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "config_providers", value)
@property
@pulumi.getter(name="configSchedule")
def config_schedule(self) -> Optional[pulumi.Input[str]]:
"""
Required for `SCHEDULED` type. A [cron expression](https://docs.mongodb.com/realm/triggers/cron-expressions/) that defines the trigger schedule.
"""
return pulumi.get(self, "config_schedule")
@config_schedule.setter
def config_schedule(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_schedule", value)
@property
@pulumi.getter(name="configScheduleType")
def config_schedule_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_schedule_type")
@config_schedule_type.setter
def config_schedule_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_schedule_type", value)
@property
@pulumi.getter(name="configServiceId")
def config_service_id(self) -> Optional[pulumi.Input[str]]:
"""
Required for `DATABASE` type. The ID of the MongoDB Service associated with the trigger.
"""
return pulumi.get(self, "config_service_id")
@config_service_id.setter
def config_service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_service_id", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Default: `false` If `true`, the trigger is disabled.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="eventProcessors")
def event_processors(self) -> Optional[pulumi.Input['EventTriggerEventProcessorsArgs']]:
"""
An object where each field name is an event processor ID and each value is an object that configures its corresponding event processor. The following event processors are supported: `AWS_EVENTBRIDGE` For an example configuration object, see [Send Trigger Events to AWS EventBridge](https://docs.mongodb.com/realm/triggers/eventbridge/#std-label-event_processor_example).
* `event_processors.0.aws_eventbridge.config_account_id` - | |
<gh_stars>1-10
"""
# hdl21 Elaboration
Defines the primary `elaborate` method used to flesh out an in-memory `Module` or `Generator`.
Internally defines and uses a number of hierarchical visitor-classes which traverse the hardware hierarchy,
performing one or more transformation-passes.
"""
# Std-Lib Imports
import copy
from enum import Enum
from types import SimpleNamespace
from typing import Union, Any, Dict, List, Optional, Tuple
# PyPi
from pydantic.dataclasses import dataclass
# Local imports
from .connect import connectable
from .module import Module, ExternalModuleCall
from .instance import InstArray, Instance, PortRef
from .primitives import R, PrimitiveCall
from .bundle import AnonymousBundle, Bundle, BundleInstance, _check_compatible
from .signal import PortDir, Signal, Visibility, Slice, Concat, Sliceable, NoConn
from .generator import Generator, GeneratorCall
from .params import _unique_name
from .instantiable import Instantiable
class Context:
"""Elaboration Context"""
... # To be continued!
# Type short-hand for elaborate-able types
Elabable = Union[Module, GeneratorCall]
# (Plural Version)
Elabables = Union[Elabable, List[Elabable], SimpleNamespace]
def elabable(obj: Any) -> bool:
# Function to test this, since `isinstance` doesn't work for `Union`.
return isinstance(obj, (Module, Generator, GeneratorCall))
class _Elaborator:
""" Base Elaborator Class """
@classmethod
def elaborate(cls, top, ctx):
""" Elaboration entry-point. Elaborate the top-level object. """
return cls(top, ctx).elaborate_top()
def __init__(self, top: Elabable, ctx: Context):
self.top = top
self.ctx = ctx
def elaborate_top(self):
""" Elaborate our top node """
if not isinstance(self.top, Module):
raise TypeError
return self.elaborate_module(self.top)
def elaborate_generator_call(self, call: GeneratorCall) -> Module:
""" Elaborate a GeneratorCall """
# Only the generator-elaborator can handle generator calls; default it to error on others.
raise RuntimeError(f"Invalid call to elaborate GeneratorCall by {self}")
def elaborate_module(self, module: Module) -> Module:
""" Elaborate a Module """
# Required for all passes. Defaults to `NotImplementedError`.
raise NotImplementedError
def elaborate_external_module(self, call: ExternalModuleCall) -> ExternalModuleCall:
""" Elaborate an ExternalModuleCall """
# Default: nothing to see here, carry on
return call
def elaborate_primitive_call(self, call: PrimitiveCall) -> PrimitiveCall:
""" Elaborate a PrimitiveCall """
# Default: nothing to see here, carry on
return call
def elaborate_bundle_instance(self, inst: BundleInstance) -> None:
""" Elaborate an BundleInstance """
# Annotate each BundleInstance so that its pre-elaboration `PortRef` magic is disabled.
inst._elaborated = True
def elaborate_bundle(self, bundle: Bundle) -> Bundle:
""" Elaborate an Bundle """
# Default: nothing to see here, carry on
return bundle
def elaborate_instance_array(self, array: InstArray) -> Instantiable:
""" Elaborate an InstArray """
# Turn off `PortRef` magic
array._elaborated = True
# And visit the Instance's target
return self.elaborate_instantiable(array._resolved)
def elaborate_instance(self, inst: Instance) -> Instantiable:
""" Elaborate a Module Instance. """
# This version of `elaborate_instantiable` is the "post-generators" version used by *most* passes.
# The Generator-elaborator is different, and overrides it.
# Turn off `PortRef` magic
inst._elaborated = True
# And visit the Instance's target
return self.elaborate_instantiable(inst._resolved)
def elaborate_instantiable(self, of: Instantiable) -> Instantiable:
# This version of `elaborate_instantiable` is the "post-generators" version used by *most* passes.
# The Generator-elaborator is different, and overrides it.
if not of:
raise RuntimeError(f"Error elaborating undefined Instance-target {of}")
if isinstance(of, Module):
return self.elaborate_module(of)
if isinstance(of, PrimitiveCall):
return self.elaborate_primitive_call(of)
if isinstance(of, ExternalModuleCall):
return self.elaborate_external_module(of)
raise TypeError
@staticmethod
def flatname(
segments: List[str], *, avoid: Optional[Dict] = None, maxlen: int = 511
) -> str:
""" Create a attribute-name merging string-list `segments`, while avoiding all keys in dictionary `avoid`.
Commonly re-used while flattening nested objects and while creating explicit attributes from implicit ones.
Raises a `RunTimeError` if no such name can be found of length less than `maxlen`.
The default max-length is 511 characters, a value representative of typical limits in target EDA formats. """
if avoid is None:
avoid = {}
# The default format and result is of the form "seg0_seg1".
# If that is in the avoid-keys, append underscores until it's not, or fails.
name = "_".join(segments)
while True:
if len(name) > maxlen:
msg = f"Could not generate a flattened name for {segments}: (trying {name})"
raise RuntimeError(msg)
if name not in avoid: # Done!
break
name += "_" # Collision; append underscore
return name
class GeneratorElaborator(_Elaborator):
""" Hierarchical Generator Elaborator
Walks a hierarchy from `top` calling Generators. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generator_calls = dict() # GeneratorCalls to their (Module) results
self.modules = dict() # Module ids to references
self.primitive_calls = dict() # PrimitiveCall ids to references
self.ext_module_calls = dict() # PrimitiveCall ids to references
def elaborate_top(self):
""" Elaborate our top node """
if isinstance(self.top, Module):
return self.elaborate_module(self.top)
if isinstance(self.top, GeneratorCall):
return self.elaborate_generator_call(self.top)
msg = f"Invalid Elaboration top-level {self.top}, must be a Module or Generator"
raise TypeError(msg)
def elaborate_generator_call(self, call: GeneratorCall) -> Module:
""" Elaborate Generator-function-call `call`. Returns the generated Module. """
# First check out cache
if call in self.generator_calls: # Already done!
# Give the `call` a reference to its result.
# Note this *has not* necessarily already happened, as the `self.generator_calls` key may be an equally-valued (but distinct) `GeneratorCall`.
result = self.generator_calls[call]
call.result = result
return result
# The main event: Run the generator-function
if call.gen.usecontext:
m = call.gen.func(call.arg, self.ctx)
else:
m = call.gen.func(call.arg)
# Type-check the result
# Generators may return other (potentially nested) generator-calls; unwind any of them
while isinstance(m, GeneratorCall):
# Note this should hit Python's recursive stack-check if it doesn't terminate
m = self.elaborate_generator_call(m)
# Ultimately they've gotta resolve to Modules, or they fail.
if not isinstance(m, Module):
msg = f"Generator {call.gen.func.__name__} returned {type(m)}, must return Module."
raise TypeError(msg)
# Give the GeneratorCall a reference to its result, and store it in our local dict
call.result = m
self.generator_calls[call] = m
# Create a unique name
# If the Module that comes back is anonymous, start by giving it a name equal to the Generator's
if m.name is None:
m.name = call.gen.func.__name__
# Then add a unique suffix per its parameter-values
m.name += "(" + _unique_name(call.arg) + ")"
# Update the Module's `pymodule`, which generally at this point is `hdl21.generator`
m._pymodule = call.gen.pymodule
# And elaborate the module
return self.elaborate_module(m)
def elaborate_module(self, module: Module) -> Module:
""" Elaborate Module `module`.
Primarily performs flattening of Instance Arrays,
and re-connecting to the resultant flattened instances """
if id(module) in self.modules: # Already done!
return module
if not module.name:
msg = f"Anonymous Module {module} cannot be elaborated (did you forget to name it?)"
raise RuntimeError(msg)
# Flatten Instance arrays
while module.instarrays:
name, array = module.instarrays.popitem()
module.namespace.pop(name)
# Visit the array's target
target = self.elaborate_instance_array(array)
# And do the real work: flattening it.
if array.n < 1:
raise RuntimeError(f"Invalid InstArray {array} with size {array.n}")
# Create the new, flat Instances
new_insts = []
for k in range(array.n):
name = self.flatname(
segments=[array.name, str(k)], avoid=module.namespace
)
inst = module.add(Instance(of=target, name=name))
new_insts.append(inst)
# And connect them
for portname, conn in array.conns.items():
if isinstance(conn, BundleInstance):
# All new instances get the same BundleInstance
for inst in new_insts:
inst.connect(portname, conn)
elif isinstance(conn, (Signal, Slice, Concat)):
# Get the target-module port, particularly for its width
port = target.get(portname)
if not isinstance(port, Signal):
msg = f"Invalid port connection of `{portname}` {port} to {conn} in InstArray {array}"
raise RuntimeError(msg)
if port.width == conn.width:
# All new instances get the same signal
for inst in new_insts:
inst.connect(portname, conn)
elif port.width * array.n == conn.width:
# Each new instance gets a slice, equal to its own width
for k, inst in enumerate(new_insts):
slize = conn[k * port.width : (k + 1) * port.width]
if slize.width != port.width:
msg = f"Width mismatch connecting {slize} to {port}"
raise RuntimeError(msg)
inst.connect(portname, slize)
else: # All other width-values are invalid
msg = f"Invalid connection of {conn} of width {conn.width} to port {portname} on Array {array.name} of width {port.width}. "
msg += f"Valid widths are either {port.width} (broadcasting across instances) and {port.width * array.n} (individually wiring to each)."
raise RuntimeError(msg)
else:
msg = f"Invalid connection to {conn} in InstArray {array}"
raise TypeError(msg)
# Depth-first traverse instances, ensuring their targets are defined
for inst in module.instances.values():
self.elaborate_instance(inst)
# Also visit bundle instances, turning off their pre-elab magic
for bundle in module.bundles.values():
self.elaborate_bundle_instance(bundle)
# Store a reference to the now-expanded Module in our cache, and return it
self.modules[id(module)] = | |
#
# amber_test_generation.py
# Author: <NAME> #
import sys
import re
from configuration import Configuration
# Configuration object to be used in the Amber test generation
# Notes: saturation_level describes the heuristic of running many
# instances of the same test in the same kernel. Each instance of the
# test operate on distinct memory. There are 3 options for saturation
# that dictate how threads are mapped to test instances. NOTE THAT
# SATURATION IS NOT CURRENTLY CURRENTLY SUPPORTED FOR INTRA-WORKGROUP
# AND INTRA-SUBGROUP TESTS:
# The saturation_level can be set to:
# 0 - no saturation
# 1 - Round Robin: threads are assigned to test instances in a round robin fasion
# 2 - Chunking: threads are assigned to test instances in a course
# grained chunk (first N threads are assigned to testing thread 0,
# second N threads are assigned to testing thread 1, etc.
# Workgroups is the max supported number across a variety of GPUs
# (65532). Timeout is how many milliseconds to wait until killing the
# kernel (20 seconds). threads_per_workgroup is set to 1 so that each
# test goes across workgroups. Increasing this will allow
# intra-workgroup behavior to be tested, but can mess up saturation
# hueristics. subgroup will include the GLSL subgroup extension and
# ensure that testing threads are in different subgroups.
default_config = Configuration(timeout=20000, workgroups=65532, threads_per_workgroup=1, saturation_level=0, subgroup=0)
# write the necessary "boiler plate" code to generate an Amber test, along with Shader
# Storage Buffer Object(s), workgroup size, and global variable to
# assign thread IDs. output is the file being written to, timeout determines (in ms) when the
# program will terminate, num_testing_threads is the number of threads being tested, and saturation_level is the
# type of saturation (if any)
def write_metal_prologue(output, timeout, threads_per_workgroup, workgroups, num_testing_threads, saturation_level,
subgroup_setting):
output.write("#include <metal_stdlib>\n")
output.write("using namespace metal;\n")
<<<<<<< HEAD
<<<<<<< HEAD
output.write("kernel void testKernel(device atomic_uint * x, device atomic_uint * y, device atomic_int* count, uint gid_x [[thread_position_in_grid]], uint tid_x [[ threadgroup_position_in_grid ]]) {\n")
=======
output.write("kernel void testKernel(device atomic_uint * x, device atomic_uint * y, device atomic_int* count, uint gid_x [[thread_position_in_grid]], uint total_threads [[threads_per_grid]], uint lane [[ thread_index_in_simdgroup ]], uint simd_width [[ threads_per_simdgroup ]], uint sid_x [[simdgroup_index_in_threadgroup]]) {\n")
>>>>>>> 677d32d84f74aa179120516b6d24c048af67cc82
=======
output.write("kernel void testKernel(device atomic_uint * x, device atomic_uint * y, device atomic_int* count, uint gid_x [[thread_position_in_grid]]) {\n")
>>>>>>> parent of 45d2fc5... updated some files
output.write("\tint pc = 0;\n")
# perform the necessary index computation to update SSBO for "round robin" saturation
if saturation_level == 1:
total_threads = workgroups * threads_per_workgroup
output.write("\n")
output.write("\tint total_num_threads = total_threads;\n")
output.write("\tint num_testing_threads = " + str(num_testing_threads) + ";\n")
output.write("\tuint index = gid_x / num_testing_threads;\n")
# perform the necessary computations of "chunk" size and index to update SSBO for "chunking" saturation
elif saturation_level == 2:
total_threads = workgroups * threads_per_workgroup
output.write("\n")
output.write("\tint total_num_threads = " + str(total_threads) + ";\n")
output.write("\tint num_testing_threads = " + str(num_testing_threads) + ";\n")
output.write("\tint chunk_size = total_num_threads / num_testing_threads;\n")
output.write("\tuint index = gid_x % chunk_size;\n")
output.write("\n")
# write the appropriate content to set up each thread by using the thread_instructions, the thread_number,
# the total number of threads (number_of_testing_threads), the number_of_testing_threads, and saturation_level
def write_metal_thread_program(output, thread_instructions, thread_number, number_of_testing_threads, saturation_level,
subgroup_set):
# compose the "if" statements for each thread depending on the type of saturation requested for the Amber test
if saturation_level == 0:
# write conditionals based on subgroup setting
if subgroup_set == 0:
output.write("\tif (gid_x == " + str(thread_number) + ") { \n")
else:
output.write("\tif (sid_x == " + str(thread_number) + " && lane == 0) { \n")
elif saturation_level == 1:
output.write("\tif (gid_x % num_testing_threads == " + str(thread_number) + ") { \n")
elif saturation_level == 2:
output.write("\tif (gid_x / chunk_size == " + str(thread_number) + ") { \n")
else:
print("Saturation level can only be 0, 1, or 2", file=sys.stderr)
exit(1)
output.write("\t int terminate = 0;\n")
output.write("\n")
output.write("\twhile (true) {\n")
output.write("\t if (terminate == 1) {\n")
output.write("\t break;\n")
output.write("\t}\n")
output.write("\tswitch(pc) {\n")
output.write("\n")
program_end = len(thread_instructions)
# iterate over each instruction assigned to the specific thread and generate the test case
for instruc_id, instruction in enumerate(thread_instructions):
write_metal_thread_instruction(output, instruction, instruc_id, number_of_testing_threads, saturation_level,
program_end)
output.write("\t case " + str(program_end) + ":\n")
output.write("\t\tterminate = 1;\n")
output.write("\t\tbreak;\n")
output.write("\n")
output.write("\t }\n")
output.write("\t }\n")
output.write("\t}\n")
output.write("\n")
# write the appropriate test cases for each instruction based off of the instruction id, number of instructions, and
# type of saturation
def write_metal_thread_instruction(output, instruction, instruc_id, num_of_testing_threads, saturation_level,
program_end):
pattern = "\\((.+?)\\)"
search_pattern = re.search(pattern, instruction)
numerical_representation = " "
if search_pattern:
numerical_representation = search_pattern.group(1)
# create a list of the arguments that are provided from the pseudo instruction
numerical_representation = numerical_representation.split(",")
# extract the memory location that will be written to
memory_location = numerical_representation[0]
output.write("\t case " + str(instruc_id) + ": \n")
# extract the appropriate values for an atomic exchange branch from the numerical_representation of the instruction
# and call handle_atomic_exchange_branch
if instruction.startswith("atomic_exch_branch"):
check_value = numerical_representation[1]
exchange_value = numerical_representation[2]
instruction_address = numerical_representation[3]
handle_atomic_exchange_branch(output, check_value, exchange_value, instruction_address,
saturation_level, memory_location, program_end)
# extract the appropriate values for an atomic exchange branch from the numerical_representation of the instruction
# and call handle_amber_check_branch
elif instruction.startswith("atomic_chk_branch"):
check_value = numerical_representation[1]
instruction_address = numerical_representation[2]
handle_amber_check_branch(output, check_value, instruction_address, saturation_level,
memory_location, program_end)
# extract the appropriate values for an atomic exchange branch from the numerical_representation of the instruction
# and call handle_atomic_store
elif instruction.startswith("atomic_store"):
write_value = numerical_representation[1]
handle_atomic_store(output, write_value, saturation_level, memory_location)
# write the amber test code for an atomic exchange branch instruction
def handle_atomic_exchange_branch(output, check_value, exchange_value, instruction_address, saturation_level,
memory_location, program_end):
# perform the operation either at a single memory location or indexed memory location, depending on saturation level
if saturation_level == 0:
# determine whether to write to memory location x or memory location y
if int(memory_location) == 0:
#output.write("\t\tif (atomicExchange(test.x, " + exchange_value + ") == " + check_value + ") { \n")
output.write("\t\tif (atomic_exchange_explicit(x, " + exchange_value + ", memory_order_relaxed) == " + check_value + ") { \n")
else:
output.write("\t\tif (atomic_exchange_explicit(y, " + exchange_value + ", memory_order_relaxed) == " + check_value + ") { \n")
#output.write("\t\tif (atomicExchange(test.y, " + exchange_value + ") == " + check_value + ") { \n")
elif saturation_level == 1 or saturation_level == 2:
# determine whether to write to memory location x[] or memory location y[]
if int(memory_location) == 0:
output.write("\t\tif (atomic_exchange_explicit(x + index, " + exchange_value + ", memory_order_relaxed) == " + check_value +
") { \n")
else:
output.write("\t\tif (atomic_exchange_explicit(y + index, " + exchange_value + ", memory_order_relaxed) == " + check_value +
") { \n")
if instruction_address == "END":
output.write("\t\t pc = " + str(program_end) + ";\n")
elif instruction_address != "END":
output.write("\t\t pc = " + instruction_address + ";\n")
else:
print("Incorrect instruction_address in handle_amber_check_branch", file=sys.stderr)
exit(1)
output.write("\t\t}\n")
output.write("\t\telse {\n")
output.write("\t\t pc = pc + 1;\n")
output.write("\t\t}\n")
output.write("\t\tbreak;\n")
output.write("\n")
# write the amber test code for an atomic check branch instruction
def handle_amber_check_branch(output, check_value, instruction_address, saturation_level, memory_location, program_end):
# perform the operation either at a single memory location or indexed memory location, depending on saturation level
if saturation_level == 0:
# determine whether to write to memory location x or memory location y
if int(memory_location) == 0:
#output.write("\t\tif (atomicAdd(test.x, 0) == " + check_value + " ) { \n")
output.write("\t\tif (atomic_fetch_add_explicit(x, 0, memory_order_relaxed) == " + check_value + " ) { \n")
else:
output.write("\t\tif (atomic_fetch_add_explicit(y, 0, memory_order_relaxed) == " + check_value + " ) { \n")
#output.write("\t\tif (atomicAdd(test.y, 0) == " + check_value + " ) { \n")
elif saturation_level == 1 or saturation_level == 2:
# determine whether to write to memory location x[] or memory location y[]
if int(memory_location) == 0:
output.write("\t\tif (atomic_fetch_add_explicit(x+index, 0, memory_order_relaxed) == " + check_value + " ) { \n")
#output.write("\t\tif (atomicAdd(out_buf1.x[index], 0) == " + check_value + " ) { \n")
else:
output.write("\t\tif (atomic_fetch_add_explicit(y+index, 0, memory_order_relaxed) == " + check_value + " ) { \n")
#output.write("\t\tif (atomicAdd(out_buf2.y[index], 0) == " + check_value + " ) { \n")
if instruction_address == "END":
output.write("\t\t pc = " + str(program_end) + ";\n")
elif instruction_address != "END":
output.write("\t\t pc = " + instruction_address + ";\n")
else:
print("Incorrect instruction_address in handle_amber_check_branch", file=sys.stderr)
| |
= x.index_select(axis, x_index)
self.assertEqual(
y, np.take(data.reshape(flatten_shape), index, axis=axes[0]))
def test_isfinite(self):
data = np.array([0., float('nan'), float('inf')])
x = new_tensor(data)
self.assertEqual(x.isfinite(), np.isfinite(data))
def test_isinf(self):
data = np.array([0., 1., float('inf')])
x = new_tensor(data)
self.assertEqual(x.isinf(), np.isinf(data))
def test_isnan(self):
data = np.array([0., 1., float('nan')])
x = new_tensor(data)
self.assertEqual(x.isnan(), np.isnan(data))
def test_less(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a < b, np.less(data1, data2))
def test_less_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a <= b, np.less_equal(data1, data2))
def test_log(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.log(), np.log(data))
x.log_()
self.assertEqual(x, np.log(data))
def test_logical_and(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_and(b), np.logical_and(data1, data2))
def test_logical_not(self):
for shape in self.unary_test_shapes:
data = arange(shape)
x = new_tensor(data)
self.assertEqual(x.logical_not(), np.logical_not(data))
def test_logical_or(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_or(b), np.logical_or(data1, data2))
def test_logical_xor(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.logical_xor(b), np.logical_xor(data1, data2))
def test_log_sum_exp(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.logsumexp(0), np.log(np.sum(np.exp(data))))
def test_masked_fill(self):
data = arange((2, 3))
x = new_tensor(data)
mask = x > 2
y = x.masked_fill(mask, 0)
x.masked_fill_(mask, 0)
data[data > 2] = 0
self.assertEqual(x, data)
self.assertEqual(y, data)
def test_matmul(self):
test_shapes = [((2,), (2,)),
((2,), (2, 3)),
((2, 3), (3,)),
((2, 3), (3, 4)),
((2,), (4, 2, 3)),
((4, 2, 3), (3,)),
((1, 2, 3), (2, 3, 4)),
((2, 2, 3), (1, 3, 4)),
((2, 2, 3), (2, 3, 4)),
((2, 1, 2, 3), (2, 3, 4)),
((1, 2, 3), (2, 2, 3, 4)),
((2, 1, 2, 3), (1, 2, 3, 4))]
for a_shape, b_shape in test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.__matmul__(b), np.matmul(data1, data2))
def test_max(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.max(axis, keepdim=keepdims)
result = np.max(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_maximum(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.maximum(b)
self.assertEqual(y, np.maximum(data1, data2))
def test_mean(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.mean(axis, keepdim=keepdims)
result = np.mean(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_min(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.min(axis, keepdim=keepdims)
result = np.min(data, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_minimum(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = uniform(a_shape), uniform(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.minimum(b)
self.assertEqual(y, np.minimum(data1, data2))
def test_mm(self):
entries = [((2, 3), (3, 4))]
for a_shape, b_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
a, b = new_tensor(data1), new_tensor(data2)
y = a.mm(b)
self.assertEqual(y, np.matmul(data1, data2))
def test_mul(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a * b, data1 * data2)
a *= b
self.assertEqual(a, data1 * data2)
def test_multinomial(self):
data = np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]])
x = new_tensor(data)
y = x.multinomial(2)
self.assertEqual(y.shape, (2, 2))
def test_narrow(self):
data = arange((2, 3))
x = new_tensor(data)
self.assertEqual(x.narrow(0, 1, 1), data[1:2, :])
def test_not_equal(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = uniform(a_shape)
data2 = dropout(data1, drop_ratio=0.5)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a != b, np.not_equal(data1, data2))
def test_neg(self):
data = np.array([-1., 0., 1.], 'float32')
x = new_tensor(data)
self.assertEqual(-x, -data)
x.neg_()
self.assertEqual(x, -data)
def test_non_zero(self):
data = arange((2, 3))
x = new_tensor(data)
self.assertEqual((x > 2).nonzero(), np.stack(np.nonzero(data > 2), axis=1))
def test_norm(self):
entries = [(0, True), (0, False),
(1, True), (1, False),
((0, 1), True), ((0, 1), False)]
for axis, keepdims in entries:
for ord in (1, 2, 'fro', None):
data = arange((2, 3))
x = new_tensor(data)
y = x.norm(ord, axis, keepdim=keepdims)
if ord == 1:
result = np.sum(np.abs(data), axis=axis, keepdims=keepdims)
elif ord == 2 or ord == 'fro':
result = np.sum(np.square(data), axis=axis, keepdims=keepdims)
result = np.sqrt(result)
else:
result = np.linalg.norm(data, ord, axis, keepdims=keepdims)
self.assertEqual(y, result)
def test_normal(self):
data = arange((2, 3))
x = new_tensor(data)
x.normal_()
def test_permute(self):
entries = [(0, 2, 1), None]
for perm in entries:
data = arange((2, 3, 4))
x = new_tensor(data)
if perm is None:
self.assertEqual(x.permute(), np.transpose(data))
self.assertEqual(x.T, data.T)
x.permute_()
self.assertEqual(x, np.transpose(data))
else:
self.assertEqual(x.permute(*perm), np.transpose(data, perm))
x.permute_(*perm)
self.assertEqual(x, np.transpose(data, perm))
entries = [(0, 1), (0, 2), (1, 2)]
for dim0, dim1 in entries:
data = arange((2, 3, 4))
x = new_tensor(data)
perm = list(range(len(data.shape)))
perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
self.assertEqual(x.transpose(dim0, dim1), np.transpose(data, perm))
x.transpose_(dim0, dim1)
self.assertEqual(x, np.transpose(data, perm))
def test_pow(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape, 1), arange(b_shape)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.pow(b), np.power(data1, data2))
def test_reciprocal(self):
data = np.array([1., 2., 3.], 'float32')
x = new_tensor(data)
self.assertEqual(x.reciprocal(), np.reciprocal(data))
x.reciprocal_()
self.assertEqual(x, np.reciprocal(data))
def test_repeat(self):
entries = [(2,), (1, 1), (1, 2), (2, 1), (2, 2)]
for repeats in entries:
data = arange((2, 2))
x = new_tensor(data)
y = x.repeat(repeats)
repeats = (1,) * (len(data.shape) - len(repeats)) + repeats
self.assertEqual(y, np.tile(data, repeats))
def test_reshape(self):
entries = [(0, 0), (0, -1)]
for shape in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.reshape(shape)
self.assertEqual(y, data.reshape(y.shape))
x.reshape_(shape)
self.assertEqual(x, data.reshape(y.shape))
self.assertEqual(x.view(data.shape), data)
x.view_(data.shape)
self.assertEqual(x, data)
self.assertEqual(x.view_as(x), data)
def test_roll(self):
entries = [(0, 0), ((0, 0), (0, 1)), ((-1, 1), (0, 1)), (1, None)]
for shift, axis in entries:
data = arange((2, 3))
x = new_tensor(data)
y = x.roll(shift, axis)
self.assertEqual(y, np.roll(data, shift, axis))
def test_round(self):
data = np.array([0.9, 1.4, 1.9], 'float32')
x = new_tensor(data)
self.assertEqual(x.round(), np.round(data))
x.round_()
self.assertEqual(x, np.round(data))
def test_rsqrt(self):
data = np.array([4., 9., 16], 'float32')
x = new_tensor(data)
result = 1. / np.sqrt(data)
self.assertEqual(x.rsqrt(), result)
x.rsqrt_()
self.assertEqual(x, result)
def test_scatter(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 1, 2, 3], [1, 2, 3, 0],
[2, 3, 0, 1], [3, 0, 1, 2]])
data3 = arange((4, 4), 100)
x, index = new_tensor(data1), new_tensor(data2)
v = new_tensor(data3)
y = x.scatter(axis, index, v)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] = data3[i, j]
else:
result[i, data2[i, j]] = data3[i, j]
self.assertEqual(y, result)
x.scatter_(axis, index, v)
self.assertEqual(x, result)
def test_scatter_add(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 0], [0, 0]])
data3 = arange((4, 4), 100)
x, index = new_tensor(data1), new_tensor(data2)
v = new_tensor(data3)
y = x.scatter_add(axis, index, v)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] += data3[i, j]
else:
result[i, data2[i, j]] += data3[i, j]
self.assertEqual(y, result)
x.scatter_(axis, index, v, reduce='add')
self.assertEqual(x, result)
def test_scatter_mul(self):
for axis in range(0, 1):
data1 = arange((4, 4))
data2 = np.array([[0, 1, 2, 3], [1, 2, 3, 0],
[2, 3, 0, 1], [3, 0, 1, 2]])
x, index = new_tensor(data1), new_tensor(data2)
result = data1.copy()
for i, j in itertools.product(*[range(d) for d in data2.shape]):
if axis == 0:
result[data2[i, j], j] *= 2.33
else:
result[i, data2[i, j]] *= 2.33
x.scatter_(axis, index, 2.33, reduce='multiply')
self.assertEqual(x, result)
def test_setitem(self):
data = arange((2, 3))
x = new_tensor(data)
x[x > 2] = 0
data[data > 2] = 0
self.assertEqual(x, data)
entries = [0,
slice(None, None, None),
slice(0, None, None),
slice(0, 0, None),
slice(0, 1, None),
slice(0, 1, 1),
data,
(data, data)]
for item in entries:
try:
x.__setitem__(item, 0)
data.__setitem__(item, 0)
self.assertEqual(x, data)
except (NotImplementedError, ValueError, TypeError):
pass
def test_sign(self):
data = np.array([-1., 0., 1.], 'float32')
x = | |
' + str(nts.shape) + '}' for nts_id_name, nts in
_f.outputs.items()]))
if f_name not in nodes:
nodes[f_name] = (node_x, node_y, node_text)
for label_to, (label_from, node_from) in _f.inputs.items():
to_x, to_y = ind.node2X[_f.id_name], n2d[_f.id_name]
from_x, from_y = ind.node2X[node_from], n2d[node_from]
intermediat_edges = pseudo_edges[(node_from, _f.id_name)]
if len(intermediat_edges) <= 1:
m_x, m_y = (to_x + from_x) / 2, (to_y + from_y) / 2
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
edge_x, edge_y = edges.get(label_from, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
else:
edge_x, edge_y = edges.get(label_from, ([], []))
m_id = intermediat_edges[0][1]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
print(intermediat_edges)
for i in range(1, len(intermediat_edges) - 1):
# print(intermediat_edges[i])
m_id = intermediat_edges[i][0]
from_x, from_y = ind.node2X[m_id], n2d[m_id]
m_id = intermediat_edges[i][1]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x, edge_y = edges.get(dashVis.blank_label, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if dashVis.blank_label not in edges:
edges[dashVis.blank_label] = (edge_x, edge_y)
m_id = intermediat_edges[-1][0]
m_x, m_y = ind.node2X[m_id], n2d[m_id]
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
return nodes, edges, \
(min(ind.depth2nodes.keys()) - 1, max(ind.depth2nodes.keys()) + 1), \
(min(ind.node2X.values()) - 1, max(ind.node2X.values()) + 1)
def NetworkLayout(nn: NeuralNetwork, datasets=[]):
stack = list(nn.functions)
df_name2obj = dict([(_f.id_name, _f) for _f in stack])
inputs = set([id_name for _, id_name in nn.inputs.values()])
pos_y = dict([(id_name, 0) for id_name in inputs])
y_pos = dict([(0, [id_name]) for id_name in inputs])
while stack:
_f = stack.pop()
y_coord = 0
all_found = True
for predecessor in [id_name for _, id_name in _f.inputs.values()]:
if (predecessor not in pos_y
and predecessor not in inputs):
predecessor = df_name2obj.get(predecessor)
stack.append(_f)
try:
stack.remove(predecessor)
stack.append(predecessor)
except ValueError:
pass
all_found = False
break
else:
y_coord = max(pos_y.get(predecessor) + 1, y_coord)
if all_found:
pos_y[_f.id_name] = y_coord
y_pos[y_coord] = y_pos.get(y_coord, []) + [_f]
pos_x = dict([(_id, x) for x, _id in enumerate(inputs)])
y_x = {0: len(inputs)}
for y in range(1, max(y_pos.keys()) + 1):
for _f in y_pos.get(y, []):
predecessors = list([id_name for _, id_name in _f.inputs.values()])
x_pos = set()
pred_x = 0
for pred in predecessors:
x = pos_x[pred]
if x in x_pos:
x += 1
for n in y_pos[pos_y[pred]]:
if isinstance(n, str):
_x = pos_x[n]
pos_x[n] = _x + 1 if _x >= x else _x
else:
_x = pos_x[n.id_name]
pos_x[n.id_name] = _x + 1 if _x >= x else _x
x_pos.add(x)
pred_x += x
_y_x = 0 if y_x.get(y) is None else y_x[y] + 1
y_x[y] = _y_x + pred_x
pred_x = max(pred_x * 1.0 / (len(predecessors) if len(predecessors) > 0 else 1), _y_x)
pos_x[_f.id_name] = pred_x
nodes = dict()
for _in in inputs:
node_x, node_y, node_text = nodes.get(_in, ([], [], []))
node_x.append(pos_x[_in])
node_y.append(pos_y[_in])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}'
for d in datasets if d.id_name == _in for nts_id_name, nts in d.outputs.items()]))
if _in not in nodes:
nodes[_in] = (node_x, node_y, node_text)
for _f in nn.functions:
f_name = _f.__class__.__name__
node_x, node_y, node_text = nodes.get(f_name, ([], [], []))
node_x.append(pos_x[_f.id_name])
node_y.append(pos_y[_f.id_name])
node_text.append(',<br />'.join(
['{' + nts_id_name + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) + '}' for nts_id_name, nts in
_f.outputs.items()]))
if f_name not in nodes:
nodes[f_name] = (node_x, node_y, node_text)
edges = dict()
for _f in nn.functions:
for label_to, (label_from, node_from) in _f.inputs.items():
to_x, to_y = pos_x[_f.id_name], pos_y[_f.id_name]
from_x, from_y = pos_x[node_from], pos_y[node_from]
m_x, m_y = (to_x + from_x) / 2, (to_y + from_y) / 2
edge_x, edge_y = edges.get(label_to, ([], []))
edge_x.extend([m_x, to_x, None])
edge_y.extend([m_y, to_y, None])
if label_to not in edges:
edges[label_to] = (edge_x, edge_y)
edge_x, edge_y = edges.get(label_from, ([], []))
edge_x.extend([from_x, m_x, None])
edge_y.extend([from_y, m_y, None])
if label_from not in edges:
edges[label_from] = (edge_x, edge_y)
return nodes, edges, max(y_pos.keys()) + 1
@self.app.callback(
[Output(component_id='network-structure', component_property='figure'),
Output(component_id='searchSpaceAncestry', component_property='figure')],
[Input(component_id='searchSpaceProjection', component_property='clickData')]
)
def update_searchSpace_info(input_data):
if input_data is None:
return [{'layout': ss_info_ancestry_layout}, {}]
id_name = input_data['points'][0]['text']
dataSaver = DSSqlite3(**db_config)
individual = dataSaver.get_individual_by_name(id_name)
# TODO: got exception in next line
_, ancestry = dataSaver.get_ancestry_for_ind(id_name)
if ancestry is not None:
levelOneAnc = [dataSaver.get_ancestry_for_ind(ind)[1] for ind in ancestry.ancestors]
del dataSaver
nodes, edges, y_range, x_range = NetworkLayout2(individual.network)
adapted_layout = dict(ss_info_ancestry_layout)
adapted_layout['height'] = y_range[1] * 30 + 200
adapted_layout['yaxis'] = dict(adapted_layout['yaxis'])
adapted_layout['yaxis']['range'] = [y_range[0], y_range[1]]
adapted_layout['xaxis'] = dict(adapted_layout['xaxis'])
adapted_layout['xaxis']['range'] = [x_range[0], x_range[1]]
nodes = [{'x': node_x,
'y': node_y,
'text': node_text,
'mode': 'markers',
'marker': {'size': 10,
'symbol': 'circle',
'color': getStructureColor(name)},
'hoverinfo': 'text',
# 'textposition': 'center right',
'showlegend': True,
'name': name
} for name, (node_x, node_y, node_text) in nodes.items()]
edges = [{'x': edge_x,
'y': edge_y,
'mode': 'lines',
'hoverinfo': 'none',
'name': name,
'showlegend': name != dashVis.blank_label,
'line': {'color': getStructureColor(name)}
} for name, (edge_x, edge_y) in edges.items()]
structure_fig = {
'data': edges + nodes,
'layout': adapted_layout,
}
# ==================
nodes = list()
edges = dict()
nodes.append({
'x': [0],
'y': [0],
'mode': 'markers',
'hoverinfo': 'text',
'text': id_name,
'name': 'selected',
'showlegend': False,
'marker': {'size': 10,
'symbol': 'dot',
'color': '#a55'}
})
if ancestry is not None:
tmp = -(len(ancestry.ancestors) - 1) * .5
offsets = [tmp + i for i in range(len(ancestry.ancestors))]
xs, ys = edges.get(ancestry.method, ([], []))
xs.extend([x for offset in offsets for x in [0, offset, None]])
ys.extend([y for _ in offsets for y in [0, 1, None]])
edges[ancestry.method] = xs, ys
nodes.append({
'x': [offset for offset in offsets],
'y': [1 for _ in offsets],
'mode': 'markers',
'hoverinfo': 'text',
'text': ancestry.ancestors,
'name': 'ancestors 0',
'showlegend': False,
'marker': {'size': 10,
'symbol': 'dot',
'color': [getAncColor(c)
for c in ancestry.ancestors]}
})
# TODO: fix this
# for anc, mid in zip(levelOneAnc, offsets):
# if anc is not None:
# anc_l = len(anc.ancestors)
# tmp = -(anc_l - 1) / anc_l * .8 + mid
# _offsets = [tmp + i * .8 / (anc_l - 1) for i in range(anc_l)]
# xs, ys = edges.get(anc.method, ([], []))
# xs.extend([x for offset in _offsets for x in [mid, offset, None]])
# ys.extend([y for _ in _offsets for y in [1, 2, None]])
# edges[anc.method] = xs, ys
# nodes.append({
# 'x': [offset for offset in _offsets],
# 'y': [2 for _ in _offsets],
# 'mode': 'markers',
# 'hoverinfo': 'text',
# 'text': anc.ancestors,
# 'name': 'ancestors 1',
# 'showlegend': False,
# 'marker': {'size': 10,
# 'symbol': 'dot',
# 'color': [getAncColor(c)
# for c in anc.ancestors]}
# })
edges = [{
'x': xs,
'y': ys,
'mode': 'lines',
'hoverinfo': 'none',
'name': method,
'showleged': method != dashVis.blank_label,
'line': {'color': getAncColor(method)}
} for method, (xs, ys) in edges.items()]
adapted_layout = dict(ss_info_ancestry_layout)
adapted_layout['yaxis'] = dict(adapted_layout['yaxis'])
adapted_layout['yaxis']['range'] = [-.5, 2.5]
ancestry_fig = {
'data': edges + nodes,
'layout': adapted_layout,
}
return [structure_fig, ancestry_fig]
@self.app.callback(
[Output(component_id='auto-update-projection', component_property='interval'),
Output(component_id='auto-update-ancestry', component_property='interval'),
Output(component_id='auto-update-metrics', component_property='interval')],
[Input(component_id='update-interval', component_property='value')]
)
def change_update_interval(input_data):
interval = int(input_data) * 1000
return interval, interval, interval
@self.app.callback(
Output(component_id='searchSpaceProjection', component_property='figure')
, [Input(component_id='auto-update-projection', component_property='n_intervals')]
)
def auto_update_projection(data):
print('=============== begin projection ===============')
dataSaver = DSSqlite3(**db_config)
abstract_time_stamps = sorted(dataSaver.get_abstract_time_stamps())
base_ind = [set(dataSaver.get_individual_functions(name)) for name in
dataSaver.get_individual_names_by_abstract_time_stamp(abstract_time_stamps[0])]
function_vectors = dict()
projection_texts = dict()
# for time_stamp in abstract_time_stamps[15:]:
for name in dataSaver.get_individual_names():
print(name)
ind_f = set(dataSaver.get_individual_functions(name))
v = [len(i.union(ind_f))-len(i.intersection(ind_f)) for i in base_ind]
function_vectors[name] = v
projection_texts[name] = dataSaver.get_individual_metrics(name).get('ACC', 0)
#next(iter(ind.metrics.values())) if ind.metrics else 0)
xs, ys = zip(*self.manifold.fit_transform(list(function_vectors.values())))
del dataSaver
# all_ind_names = set(dataSaver.get_individual_names())
# set_names = set(projection_texts.keys())
# new_ind_names = [n for n in all_ind_names if n not in set_names]
# all_ind_names = set_names.union(new_ind_names)
#
# idx_m = len(new_ind_names)
# print(new_ind_names)
# for i, ind0 in enumerate(new_ind_names):
# # ind0_ = dataSaver.get_individual_by_name(ind0)
# ind0_functions = set(dataSaver.get_individual_functions(ind0))
# # projection_texts[ind0] = (next(iter(ind0_.metrics.values())) if ind0_.metrics else 0)
# projection_texts[ind0] = dataSaver.get_individual_metrics(ind0).get('ACC', 0)
# # print(ind0, projection_texts[ind0])
# for ind1 in set_names:
# if (ind0, ind1) not in projection_distances:
# # dist = ind0_.norm(dataSaver.get_individual_by_name(ind1))
# ind1_functions = set(dataSaver.get_individual_functions(ind1))
# dist = len(ind0_functions.union(ind1_functions))-len(ind0_functions.intersection(ind1_functions))
# projection_distances[ind0, ind1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.