code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
'''
Created on 18.03.2015
@author: <NAME>
'''
import pandas as pd
from pandas import Series, DataFrame, MultiIndex
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import numpy as np
from matplotlib.patches import Polygon
from docutils.languages.af import labels
# import HistoQhObs as HistoQhObs
# import HistoQhObs_Together as HistoQhObs_Together
# import plotDiurnalValidateNew as plotDiurnalValidateNew
# import plotWAT as plotWAT
sizeText=10
params = {'backend': 'wxAgg', 'lines.markersize' : 6,
'axes.labelsize': sizeText, "mathtext.default":"regular",
'text.fontsize': sizeText, 'axes.titlesize':sizeText, 'legend.fontsize': sizeText,
'xtick.labelsize': sizeText, 'ytick.labelsize': sizeText}
plt.rcParams.update(params)
fontsize_XLabel = 14
fontsize_YLabel = 14
fontsize_title = 14
fontsize_XTicks = 14
fontsize_YTicks = 14
fontsize_Legend = 14
WithLegendFrame = False
def create_Standardfigure():
"""
prepares a figures """
fontsize_XLabel = 14
fontsize_YLabel = 14
fontsize_title = 14
fontsize_XTicks = 14
fontsize_YTicks = 14
fontsize_Legend = 14
WithLegendFrame = False
fig = plt.figure(figsize=(8, 5))
fig.subplots_adjust(left=0.15)
gs1 = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs1[0, :])
ax.set_ylim(0,1.1)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.3, box.width, box.height * 0.7])
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
#ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),frameon=WithLegendFrame, ncol=2, fontsize=fontsize_Legend)
return fig, ax
def Histogram_AT():
recFolder = 'D:/ghi-mbe/Daten Auswertung/records/AT/'
t_1 = 5.0
t_2 = 11.0
t_3 = 14.0
t_4 = 18.0
n_0 = "<5" # "A"
n_1 = "5>11" # "B"
n_2 = "11>14" # "C"
n_3 = "14>18" # "D"
n_4 = ">18" # "E"
n_0 = "A"
n_1 = "B"
n_2 = "C"
n_3 = "D"
n_4 = "E"
def func_AT(row):
if row["Weather","-","-","AT"] <= t_1:
return n_0
elif t_1 < row["Weather","-","-","AT"] <= t_2:
return n_1
elif t_2 < row["Weather","-","-","AT"] <= t_3:
return n_2
elif t_3 < row["Weather","-","-","AT"] <= t_4:
return n_3
else:
return n_4
def func_rAT(row):
if row["Weather","-","-","rAT"] <= t_1:
return n_0
elif t_1 < row["Weather","-","-","rAT"] <= t_2:
return n_1
elif t_2 < row["Weather","-","-","rAT"] <= t_3:
return n_2
elif t_3 < row["Weather","-","-","rAT"] <= t_4:
return n_3
else:
return n_4
df1=pd.read_csv(recFolder+'AT2012.csv',index_col=0,sep=';', header=[0,1,2,3],low_memory=False,parse_dates=True)
df1["Weather","-","-","rAT"] = df1.apply(pd.Series.round)
df1["Weather","-","-","Kategorie_AT"] = df1.apply(func_AT, axis=1)
df1["Weather","-","-","Kategorie_rAT"] = df1.apply(func_rAT, axis=1)
# Zaehlen der Kategorien
Kategorie_A = df1[df1["Weather","-","-","Kategorie_AT"]=="A"]
Kategorie_B = df1[df1["Weather","-","-","Kategorie_AT"]=="B"]
Kategorie_C = df1[df1["Weather","-","-","Kategorie_AT"]=="C"]
Kategorie_D = df1[df1["Weather","-","-","Kategorie_AT"]=="D"]
Kategorie_E = df1[df1["Weather","-","-","Kategorie_AT"]=="E"]
Kategorie_rA = df1[df1["Weather","-","-","Kategorie_rAT"]=="A"]
Kategorie_rB = df1[df1["Weather","-","-","Kategorie_rAT"]=="B"]
Kategorie_rC = df1[df1["Weather","-","-","Kategorie_rAT"]=="C"]
Kategorie_rD = df1[df1["Weather","-","-","Kategorie_rAT"]=="D"]
Kategorie_rE = df1[df1["Weather","-","-","Kategorie_rAT"]=="E"]
# Zahlen der Kategoriewechsel allgemein
print ("Kategorie A:", len(Kategorie_A), "Kategorie rA:", len(Kategorie_rA))
print ("Kategorie B:", len(Kategorie_B), "Kategorie rB:", len(Kategorie_rB))
print ("Kategorie C:", len(Kategorie_C), "Kategorie rC:", len(Kategorie_rC))
print ("Kategorie D:", len(Kategorie_D), "Kategorie rD:", len(Kategorie_rD))
print ("Kategorie E:", len(Kategorie_E), "Kategorie rE:", len(Kategorie_rE))
print ("Summe Kategorie A-E:", len(Kategorie_A)+len(Kategorie_B)+len(Kategorie_C)+len(Kategorie_D)+len(Kategorie_E))
print ("Summe Kategorie rA-rE:", len(Kategorie_rA)+len(Kategorie_rB)+len(Kategorie_rC)+len(Kategorie_rD)+len(Kategorie_rE))
# Zaehlen der Kategoriewechsel entsprechend der Tage
Wechsel_A_B = 0
Wechsel_B_C = 0
Wechsel_C_D = 0
Wechsel_D_E = 0
for index, line in enumerate(df1.iterrows()):
if index == len(df1.index)-1:
print ("no")
else:
if df1["Weather","-","-","Kategorie_AT"][index] == "A" and df1["Weather","-","-","Kategorie_AT"][index+1] == "B":
Wechsel_A_B = Wechsel_A_B + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "B" and df1["Weather","-","-","Kategorie_AT"][index+1] == "C":
Wechsel_B_C = Wechsel_B_C + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "C" and df1["Weather","-","-","Kategorie_AT"][index+1] == "D":
Wechsel_C_D = Wechsel_C_D + 1
if df1["Weather","-","-","Kategorie_AT"][index] == "D" and df1["Weather","-","-","Kategorie_AT"][index+1] == "E":
Wechsel_D_E = Wechsel_D_E + 1
# Erkennung von Wochentagen, Wochenende
df1['dayNumber'] = df1.index.weekday
onlyWeekdays = df1[df1['dayNumber']<5]
onlyWeekend = df1[df1['dayNumber']>=5]
print ("Histogram_AT done")
def Select_ColorsAndMarkers(Level0="", Level2="",Level3="", Level4="", Level5=""):
markEntr_Alt1 = True
print ("Start SelectAnalysisFunction")
# ColorList Level0
colorsTemperature=["LimeGreen",'Indigo','RoyalBlue','DeepSkyBlue','Orange','Red']
markersTemperature=['^','o','s','*','d','v']
# ColorList Level2
colorsEntrances=["LimeGreen","ForestGreen","DarkGreen","LightSkyBlue","CornflowerBlue","DarkSlateBlue"]
if markEntr_Alt1:
markersEntrances=['^','o','s','*','d','v'] # alternative 1
else:
markersEntrances2=['^','o','s','^','o','s'] # alternative 2
markersEntrances = markersEntrances2
# ColorList Level3
colorsAps=["Sienna","FireBrick","Red","OrangeRed","Tomato","DeepPink","Fuchsia","Magenta","MediumVioletRed","Crimson","LimeGreen"]
markersAps=["s",'^','o','h','+','x','s','p','*','d',None]
# ColorList Level4
colorRooms=["LimeGreen",'Crimson','GoldenRod','CornflowerBlue',"DarkGreen",'MidnightBlue']
markersRooms=[None,'^','o','s','*','d']
# Checklisten
CheckTemperatures = ["T1","T2","T3","T4","T5"]
CheckTemperatures = ["T0","T1","T2","T3","T4","T5"]
CheckEntrances = ["B2E1","B2E2","B2E3","B3E1","B3E2","B3E3"]
CheckApartments = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10",'-']
CheckRooms = ['-', "Room_Bath","Room_Children","Room_Kitchen","Room_Living","Room_Sleeping",]
if Level0 == "T0":
#print "Nur eine Linie, also alle Temperaturbereiche zusammen"
if Level2 == None:
#print "Alle Eingaenge"
if Level3 == "-":
#print "mean von allen Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments","meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings","meanApartments",Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == "-":
print ("mean von allen Rooms")
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,"meanRooms",Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances
markerList = markersEntrances
title = ["T0","allBuildings",Level3,Level4,Level5]
labels = CheckEntrances
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
elif Level2 in CheckEntrances:
#print Level2
if Level3 == "-":
#print "mean von allen Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsEntrances[CheckEntrances.index(Level2)]
markerList = markersEntrances[CheckEntrances.index(Level2)]
title = ["T0", Level2,"meanApartments","meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 == None:
print ("Alle Rooms")
if Level5 == "WP1":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,"meanApartments","allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,"meanApartments",Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 == None:
#print "Alle Apartments"
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments","meanRooms",Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps
markerList = markersAps
title = ["T0", Level2,"allApartments",Level4,Level5]
labels = CheckApartments
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == "-":
#print "mean von allen Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsAps[CheckApartments.index(Level3)]
markerList = markersAps[CheckApartments.index(Level3)]
title = ["T0", Level2,Level3,"meanRooms",Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 == None:
#print "Alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
return colorList, markerList, title, labels
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms
markerList = markersRooms
title = ["T0", Level2,Level3,"allRooms",Level5]
labels = CheckRooms
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorRooms[CheckRooms.index(Level4)]
markerList = markersRooms[CheckRooms.index(Level4)]
title = ["T0", Level2,Level3,Level4,Level5]
labels = [""]
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level0 == None:
print ("Alle Linien, also T0 ..... T5")
if Level2 in CheckEntrances:
#print Level2
if Level3 == "-":
#print "mean alle Apartments"
if Level4 == '-':
#print "mean alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,"meanApartments", Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
#print Level3
if Level4 == '-':
#print "mean alle Rooms"
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3, "meanRooms", Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
#print Level4
if Level5 == "WP1":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
#print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
##print Level5
colorList = colorsTemperature
markerList = markersTemperature
title = [Level2,Level3,Level4, Level5]
labels = CheckTemperatures
selctionStrings = [Level0,Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
#-----------------------------------------------------------------
elif Level0 in ["T1","T2","T3","T4","T5"]:
if Level2 in CheckEntrances:
if Level3 == "-":
if Level4 == "-":
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments","meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,"meanApartments",Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
elif Level3 in CheckApartments:
if Level4 == "-":
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,"meanRooms",Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
elif Level4 in CheckRooms:
if Level5 == "WP1":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP1+2":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WP":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPD":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
elif Level5 == "WPS":
colorList = [colorsTemperature[0]] + [colorsTemperature[CheckTemperatures.index(Level0)]]
markerList = [markersTemperature[0]] + [markersTemperature[CheckTemperatures.index(Level0)]]
title = [Level2,Level3,Level4,Level5]
labels = ["T0",Level0]
selctionStrings = [["T0",Level0],Level2,Level3,Level4,Level5]
return colorList, markerList, title, labels, selctionStrings
#-----------------------------------------------------------------
else:
print ("ERROR: Auswahl Level5 nicht korrekt")
else:
print ("ERROR: Auswahl Level4 nicht korrekt")
else:
print ("ERROR: Auswahl Level3 nicht korrekt")
else:
print ("ERROR: Auswahl Level2 nicht eindeutig")
else:
print ("ERROR: Auswahl Level0[0] nicht eindeutig")
print ("Ende SelectAnalysisFunction")
def english2German(titleList,labelList):
translateDictonary ={"B2E1":"R2E1",
"B2E2":"R2E2",
"B2E3":"R2E3",
"B3E1":"R3E1",
"B3E2":"R3E2",
"B3E2":"R3E3",
"allBuildings": "Gebaeude",
"meanApartment": "Durchschnitt Wohnung",
"allApartments": "Wohnung",
"Room_Sleeping":"Schlafzimmer",
"Room_Kitchen": u"Kueche",
"Room_Children": "Kinderzimmer",
"Room_Living": "Wohnzimmer",
"Room_Bath": "Badezimmer",
"allRooms": "Zimmer",
"meanRooms": "Durchschnitt Zimmer",
"T0": "ATR",
"T1": 'DAT $\leq$ 5',
"T2": "5 $\leq$ DAT $\leq$ 11",
"T3": "11 $\leq$ DAT $\leq$ 14",
"T4": "14 $\leq$ DAT $\leq$ 18",
"T5": "DAT $\geq$ 18",
"-":"Durschnitt"}
new_titleList = []
for titleComponent in titleList:
pass
if titleComponent in translateDictonary.keys():
new_titleList.append(translateDictonary.get(titleComponent))
else:
new_titleList.append(titleComponent)
new_labelList = []
for labelComponent in labelList:
if labelComponent in translateDictonary.keys():
new_labelList.append(translateDictonary.get(labelComponent))
else:
new_labelList.append(labelComponent)
return new_titleList, new_labelList
def codifyL1(codeList):
if isinstance(codeList[0], type(None)):
return codeList
else:
codeListZ=codeList[0]
translateDictonary={'T0':'ATR',
'T1':'5 < AT Daily Average',
'T2':'5 < AT Daily Average <= 11',
'T3':'1 < AT Daily Average <= 14',
'T4':'14 < AT Daily Average <= 18',
'T5':'18 < AT Daily Average'}
if isinstance(codeListZ, basestring): codeListZ=[codeListZ]
new_codeList = []
for titleComponent in codeListZ:
pass
if titleComponent in translateDictonary.keys():
new_codeList.append(translateDictonary.get(titleComponent))
else:
new_codeList.append(titleComponent)
codeList[0]=new_codeList[0]
print (new_codeList[0])
return codeList
def english2English(titleList,labelList):
translateDictonary ={"B2E1":"B2E1",
"B2E2":"B2E2",
"B2E3":"B2E3",
"B3E1":"B3E1",
"B3E2":"B3E2",
"B3E2":"B3E3",
"allBuildings": "all buildings",
"meanApartments": "Mean Apartment",
"allApartments": "all Apartments",
"Room_Sleeping":"Sleeping room",
"Room_Kitchen": "Kitchen",
"Room_Children": "Children room",
"Room_Living": "Living room",
"Room_Bath": "Bathroom",
"allRooms": "all Rooms",
"meanRooms": "Mean roooms",
"T0": "ATR",
"T1": 'DAT $\leq$ 5',
"T2": "5 $\leq$ DAT $\leq$ 11",
"T3": "11 $\leq$ DAT $\leq$ 14",
"T4": "14 $\leq$ DAT $\leq$ 18",
"T5": "DAT $\geq$ 18",
"-":"Average"}
new_titleList = []
for titleComponent in titleList:
pass
if titleComponent in translateDictonary.keys():
new_titleList.append(translateDictonary.get(titleComponent))
else:
new_titleList.append(titleComponent)
new_labelList = []
for labelComponent in labelList:
if labelComponent in translateDictonary.keys():
new_labelList.append(translateDictonary.get(labelComponent))
else:
new_labelList.append(labelComponent)
return new_titleList, new_labelList
def readDF(df1=pd.DataFrame(),df2=pd.DataFrame(),df3=pd.DataFrame(),df4=pd.DataFrame(),df5=pd.DataFrame(),df6=pd.DataFrame(),level0='ATR',level1='Standard Diurnal',level2='MD',level3='B2E1',level4='A01',level5='Room_Living',level6="WP1"):
levels=[level0,level1,level2,level3,level4,level5,level6]
print (levels)
if not df1.empty:
for levelNr,level in enumerate(levels):
if level!=None: df1=df1.iloc[:,df1.columns.get_level_values(levelNr)==level]
if not df2.empty:
for levelNr,level in enumerate(levels):
if level!=None: df2=df2.iloc[:,df2.columns.get_level_values(levelNr)==level]
if not df3.empty:
for levelNr,level in enumerate(levels):
if level!=None: df3=df3.iloc[:,df3.columns.get_level_values(levelNr)==level]
if not df4.empty:
for levelNr,level in enumerate(levels):
if level!=None: df4=df4.iloc[:,df4.columns.get_level_values(levelNr)==level]
if not df5.empty:
for levelNr,level in enumerate(levels):
if level!=None: df5=df5.iloc[:,df5.columns.get_level_values(levelNr)==level]
if not df6.empty:
for levelNr,level in enumerate(levels):
if level!=None: df6=df6.iloc[:,df6.columns.get_level_values(levelNr)==level]
print ("COls: {}".format(df1.columns))
# if level0!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(0)==level0]
# df2=df2.iloc[:,df2.columns.get_level_values(0)==level0]
# df3=df3.iloc[:,df3.columns.get_level_values(0)==level0]
# df4=df4.iloc[:,df4.columns.get_level_values(0)==level0]
# df5=df5.iloc[:,df5.columns.get_level_values(0)==level0]
# df6=df6.iloc[:,df6.columns.get_level_values(0)==level0]
# if level1!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(1)==level1]
# df2=df2.iloc[:,df2.columns.get_level_values(1)==level1]
# df3=df3.iloc[:,df3.columns.get_level_values(1)==level1]
# df4=df4.iloc[:,df4.columns.get_level_values(1)==level1]
# df5=df5.iloc[:,df5.columns.get_level_values(1)==level1]
# df6=df6.iloc[:,df6.columns.get_level_values(1)==level1]
# if level2!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(2)==level2]
# df2=df2.iloc[:,df2.columns.get_level_values(2)==level2]
# df3=df3.iloc[:,df3.columns.get_level_values(2)==level2]
# df4=df4.iloc[:,df4.columns.get_level_values(2)==level2]
# df5=df5.iloc[:,df5.columns.get_level_values(2)==level2]
# df6=df6.iloc[:,df6.columns.get_level_values(2)==level2]
# if level3!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(3)==level3]
# df2=df2.iloc[:,df2.columns.get_level_values(3)==level3]
# df3=df3.iloc[:,df3.columns.get_level_values(3)==level3]
# df4=df4.iloc[:,df4.columns.get_level_values(3)==level3]
# df5=df5.iloc[:,df5.columns.get_level_values(3)==level3]
# df6=df6.iloc[:,df6.columns.get_level_values(3)==level3]
# if level4!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(4)==level4]
# df2=df2.iloc[:,df2.columns.get_level_values(4)==level4]
# df3=df3.iloc[:,df3.columns.get_level_values(4)==level4]
# df4=df4.iloc[:,df4.columns.get_level_values(4)==level4]
# df5=df5.iloc[:,df5.columns.get_level_values(4)==level4]
# df6=df6.iloc[:,df6.columns.get_level_values(4)==level4]
# if level5!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(5)==level5]
# df2=df2.iloc[:,df2.columns.get_level_values(5)==level5]
# df3=df3.iloc[:,df3.columns.get_level_values(5)==level5]
# df4=df4.iloc[:,df4.columns.get_level_values(5)==level5]
# df5=df5.iloc[:,df5.columns.get_level_values(5)==level5]
# df6=df6.iloc[:,df6.columns.get_level_values(5)==level5]
# if level6!=None:
# df1=df1.iloc[:,df1.columns.get_level_values(6)==level6]
# df2=df2.iloc[:,df2.columns.get_level_values(6)==level6]
# df3=df3.iloc[:,df3.columns.get_level_values(6)==level6]
# df4=df4.iloc[:,df4.columns.get_level_values(6)==level6]
# df5=df5.iloc[:,df5.columns.get_level_values(6)==level6]
# df6=df6.iloc[:,df6.columns.get_level_values(6)==level6]
print ("Ende readDF")
def plotDiurnal(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None):
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
fig = plt.figure(figsize=(16./2.54, 10/2.54))
fig.subplots_adjust(left=0.1)
gs1 = gridspec.GridSpec(1, 1)
#ax = plt.subplot(gs1[0, :])
ax = plt.axes([0.1, 0.1, .85, .8])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
ax.set_ylabel("Proportion of windows open")
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
plt.title(title, y=1.05)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.32,
box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.2),frameon=False, ncol=3)
plt.show()
def plotBoxes(df,df2, labels=[],levels=[],title=None,colors=None, savingFolder="", extraName=""):
fig2= plt.figure(figsize=(16./2.54, 8/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.355, .85, .55])
#plt.title(title, y=1.05)
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
ax2.set_ylabel("Simulated-Observed WP profile")
# ax2.set_ylabel("Simulated-Observed WS")
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.02,0.02)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
title=str(np.char.replace(title," ", '_'))
title=str(np.char.replace(title,"Apartment", 'Ap'))
plt.savefig(savingFolder+title+'_BP.png',figure=fig2, format='png')
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
#plt.show()
def plotDiurnalandBoxes(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None, savingFolder="", extraName=""):
print (levels)
if levels[1]== "B2E3" and levels[2]=='A03'and levels[3]=='Room_Kitchen':
return np.empty(6) * np.nan, str(levels)
else:
oldtitle=title
title=desmountTitle(title, extraName)
name=buildName(oldtitle, extraName)
if timeType!='Standard Diurnal':
title=timeType+' - '+ title
name=timeType+' - '+ name
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
print ("WE", df.columns)
print ('We', df2.columns)
fig = plt.figure(figsize=(16./2.54, 10/2.54))
fig.subplots_adjust(left=0.1)
# gs1 = gridspec.GridSpec(1, 1)
# ax = plt.subplot(gs1[0, :])
#ax = fig.add_axes([0.13, 0.1, .85, .8])
ax = fig.add_axes([0.13, 0.355, .85, .55])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
ax.set_ylabel("Proportion of window open")
ax.yaxis.set_label_coords(-0.09, 0.5)
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
plt.title(title, y=1.05)
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.32,
# box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.2),frameon=False, ncol=3)
#ax.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
#ax.xaxis.grid(False)
plt.savefig(savingFolder+name+'.pdf',figure=fig, format='pdf')
fig2= plt.figure(figsize=(16./2.54, 10/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.355, .85, .55])
plt.title(title, y=1.05)
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
ax2.set_ylabel("Simulated-Observed WP profile")
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.1,0.1)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
#plt.show()
return meanValues, str(levels), meanAbsResiduals
def plotDiurnalandBoxesBeta(df,df2, labels=[],levels=[],timeType='Standard Diurnal',dataType='MD',title=None,colors=None, savingFolder="", extraName=""):
print (levels)
if levels[1]== "B2E3" and levels[2]=='A03'and levels[3]=='Room_Kitchen':
return np.empty(6) * np.nan, str(levels)
else:
oldtitle=title
title=desmountTitle(title, extraName)
name=buildName(oldtitle, extraName)
if timeType!='Standard Diurnal':
title=timeType+' - '+ title
name=timeType+' - '+ name
if levels[0]!=None:
df=df.iloc[:,df.columns.get_level_values(0)==levels[0]]
df2=df2.iloc[:,df2.columns.get_level_values(0)==levels[0]]
if timeType!=None:
df=df.iloc[:,df.columns.get_level_values(1)==timeType]
df2=df2.iloc[:,df2.columns.get_level_values(1)==timeType]
if dataType!=None:
df=df.iloc[:,df.columns.get_level_values(2)==dataType]
df2=df2.iloc[:,df2.columns.get_level_values(2)==dataType]
if levels[1]!=None:
df=df.iloc[:,df.columns.get_level_values(3)==levels[1]]
df2=df2.iloc[:,df2.columns.get_level_values(3)==levels[1]]
if levels[2]!=None:
df=df.iloc[:,df.columns.get_level_values(4)==levels[2]]
df2=df2.iloc[:,df2.columns.get_level_values(4)==levels[2]]
if levels[3]!=None:
df=df.iloc[:,df.columns.get_level_values(5)==levels[3]]
df2=df2.iloc[:,df2.columns.get_level_values(5)==levels[3]]
if levels[4]!=None:
df=df.iloc[:,df.columns.get_level_values(6)==levels[4]]
df2=df2.iloc[:,df2.columns.get_level_values(6)==levels[4]]
fig = plt.figure(figsize=(16./2.54, 9/2.54))
fig.subplots_adjust(left=0.1)
# gs1 = gridspec.GridSpec(1, 1)
# ax = plt.subplot(gs1[0, :])
#ax = fig.add_axes([0.13, 0.1, .85, .8])
ax = fig.add_axes([0.13, 0.4, .85, .5])
for index,column in enumerate(df.columns.values):
if index!=10: ax.plot(df.index, df[column], colors[index], linewidth=2.0,label=labels[index],alpha=0.4)
for index,column in enumerate(df2.columns.values):
if index!=10: ax.plot(df.index, df2[column], colors[index], marker="x", linewidth=0.7,markevery=60,mfc='None', mec=colors[index],label=labels[index]+' Sim')
if timeType=='Standard Diurnal': ax.set_ylabel("SD - Aver. WS, "+str(title.split(", ")[1]))
if timeType=='Week End': ax.set_ylabel("WE - Aver. WS, "+str(title.split(", ")[1]))
if timeType=='Week': ax.set_ylabel("WD - Aver. WS, "+str(title.split(", ")[1]))
ax.yaxis.set_label_coords(-0.09, 0.5)
ax.set_xlabel("Time of the day")
ticks = ax.get_xticks()
ax.set_ylim(0,1)
#plt.title(title, y=1.05)
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.32,
# box.width, box.height * 0.68])
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
ax.legend(loc='upper center', bbox_to_anchor=(0.475, -0.25),frameon=False, ncol=3)
#ax.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
#ax.xaxis.grid(False)
titleb=str(np.char.replace(title," ", ''))
titleb=str(np.char.replace(titleb,",", '_'))
plt.savefig(savingFolder+titleb+'.pdf',figure=fig, format='pdf')
fig2= plt.figure(figsize=(16./2.54, 9/2.54))
fig2.subplots_adjust(left=0.1)
#gs2 = gridspec.GridSpec(1, 1)
#ax2 = fig2.add_subplot(gs2[0, :])
ax2 = fig2.add_axes([0.13, 0.4, .85, .5])
#plt.title(title, y=1.05)
print('start')
print (df2.head(1))
print('break')
#print (df.head(1))
print('stop')
bp = ax2.boxplot(df2.values-df.values, sym='-', vert=True, whis=1.5)#, linewidth=2.0,label=labels[index],alpha=0.4)
# Now fill the boxes with desired colors
boxColors = colors
bisColors = [a for a in colors for i in range(2)]
numBoxes = 6
medians = range(numBoxes)
meanValues=DataFrame(df2.values-df.values).mean(axis=0).values
meanAbsResiduals=DataFrame(abs(df2.values-df.values)).mean(axis=0).values
for i in range(numBoxes):
box = bp['boxes'][i]
boxY = []
boxX = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], alpha=0.1,zorder=1)
ax2.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, boxColors[i],linewidth=2)
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], meanValues[i],
color='None', marker='o', markeredgecolor=boxColors[i], markersize=7,zorder=0)
plt.plot([np.average(med.get_xdata())], meanValues[i],
color=boxColors[i], marker='o', markeredgecolor=boxColors[i], markersize=7,alpha=0.2,zorder=3)
plt.setp(bp['medians'][i], color=colors[i]) # DarkSlateGray
plt.setp(bp['boxes'][i], color='DarkSlateGray')
for i in range(len(bisColors)):
plt.setp(bp['whiskers'][i], color='DarkSlateGray')
plt.setp(bp['caps'][i], color='DarkSlateGray')
plt.setp(bp['fliers'], color='Gainsboro')
plt.setp(bp['whiskers'], linestyle='solid')
if timeType=='Standard Diurnal': ax2.set_ylabel("SD - Sim.-Obs. WS, "+str(title.split(", ")[1]))
if timeType=='Week End': ax2.set_ylabel("WE - Sim.-Obs. WS, "+str(title.split(", ")[1]))
if timeType=='Week': ax2.set_ylabel("WD - Sim.-Obs. WS, "+str(title.split(", ")[1]))
ax2.set_ylabel("Sim.-Obs. WS, "+str(title.split(", ")[1]))
ax2.yaxis.set_label_coords(-0.09, 0.5)
ax2.set_ylim(-0.1,0.1)
#ax2.set_yticks([0.2, 0.6, 0.8], minor=False)
ax2.yaxis.set_ticks_position('left')
ax2.xaxis.set_ticks_position('bottom')
#newLabels= ["ATR",'DAT $\leq$ 5'," 5 $\leq$ \nDAT\n $\leq$ 11", "11 $\leq$ \nDAT\n $\leq$ 14","14 $\leq$ \nDAT\n $\leq$ 18","DAT $\geq$ 18"]
xtickNames = plt.setp(ax2, xticklabels=labels)
plt.setp(xtickNames,rotation=30)#, fontsize=8
ax2.yaxis.grid(True,zorder=0, color="Gainsboro", ls="-")
ax2.xaxis.grid(False)
ax2.set_axisbelow(True)
title=str(np.char.replace(title," ", ''))
title=str(np.char.replace(title,",", '_'))
#plt.show()
plt.savefig(savingFolder+title+'_BP.pdf',figure=fig2, format='pdf')
return meanValues, str(levels), meanAbsResiduals
def desmountTitle(title,startTitle):
newTitle=startTitle
for i, word in enumerate(title):
if i== len(title)-1: newTitle=newTitle+str(word)
else:
if i==0:
newTitle=startTitle+' - '
else:
newTitle=newTitle+str(word)+', '
return newTitle
def buildName(title,startTitle):
newTitle=startTitle
for i, word in enumerate(title):
if i== len(title)-1: newTitle=newTitle+str(word)
else:
if i==0:
newTitle=startTitle+'_'
else:
newTitle=newTitle+str(word)+'_'
return newTitle
if __name__ == '__main__':
print ("Start main")
recordFolder='D:/dc224615_Ddiss/Documento/Pictures/MCValidation/B2E1/'
recFolder='D:/EBC0018_PTJ_Volkswohnung_tos/HDF-Programming/pd4hdf/MarkovChain/MC4Windows/records/'
df1=pd.read_csv(recFolder+'diurnals/B2E1_20121_201212diurnals.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
# df1=pd.read_csv(recFolder+'diurnals3/B2E1_20121_201212diurnals_MD.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
df2=pd.read_csv(recFolder+'validationM3_B2E1/proSet_100_B2E1_CDPL.csv', index_col=0, sep=';', header=[0,1,2,4,5,6,7],skiprows=[8], parse_dates=True,low_memory=False)
roomsWP1 = ['Room_Kitchen','Room_Bath','Room_Living']
roomsWP = ['Room_Children','Room_Sleeping']
entrances = ["B2E1"]#,"B2E2","B2E3","B3E1","B3E2","B3E3"]
apartments = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10"]
#apartmentsPlus = ["A01","A02","A03","A04","A05","A06","A07","A08","A09","A10",'-']
results=[]
indicis=[]
columns4Results=[]
for entrance in entrances:
for apartment in apartments:
for room in roomsWP1:
colors,markers,title,labels,keys = Select_ColorsAndMarkers(Level0 = None , Level2=entrance, Level3 = apartment,Level4 = room,Level5 = "WP1")
title,labels = english2English(title,labels)
keys = codifyL1(keys)
values,indice=plotDiurnalandBoxes(df1,df2,levels=keys,labels=labels,title=title,colors=colors,savingFolder=recordFolder,extraName='2012')
results.append(values)
indicis.append(indice)
for room in roomsWP:
print (entrance, apartment, room)
colors,markers,title,labels,keys = Select_ColorsAndMarkers(Level0 = None , Level2=entrance, Level3 = apartment,Level4 = room,Level5 = "WP")
title,labels = english2English(title,labels)
keys = codifyL1(keys)
values,indice=plotDiurnalandBoxes(df1,df2,levels=keys,labels=labels,title=title,colors=colors,savingFolder=recordFolder,extraName='2012')
results.append(values)
indicis.append(indice)
columns4Results=labels
print (results)
resultDF=DataFrame(results, index=indicis,columns=columns4Results)
resultDF.to_csv(recordFolder+"results.csv", ';')
print ("end main") | [
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.axes",
"numpy.empty",
"matplotlib.pyplot.setp",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcP... | [((777, 804), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (796, 804), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1278), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (1262, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1347), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (1341, 1347), True, 'import matplotlib.gridspec as gridspec\n'), ((1357, 1379), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, :]'], {}), '(gs1[0, :])\n', (1368, 1379), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3205), 'pandas.read_csv', 'pd.read_csv', (["(recFolder + 'AT2012.csv')"], {'index_col': '(0)', 'sep': '""";"""', 'header': '[0, 1, 2, 3]', 'low_memory': '(False)', 'parse_dates': '(True)'}), "(recFolder + 'AT2012.csv', index_col=0, sep=';', header=[0, 1, 2,\n 3], low_memory=False, parse_dates=True)\n", (3096, 3205), True, 'import pandas as pd\n'), ((92475, 92489), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92487, 92489), True, 'import pandas as pd\n'), ((92494, 92508), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92506, 92508), True, 'import pandas as pd\n'), ((92513, 92527), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92525, 92527), True, 'import pandas as pd\n'), ((92532, 92546), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92544, 92546), True, 'import pandas as pd\n'), ((92551, 92565), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92563, 92565), True, 'import pandas as pd\n'), ((92570, 92584), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (92582, 92584), True, 'import pandas as pd\n'), ((97984, 98028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 10 / 2.54)'}), '(figsize=(16.0 / 2.54, 10 / 2.54))\n', (97994, 98028), True, 'import matplotlib.pyplot as plt\n'), ((98068, 98091), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (98085, 98091), True, 'import matplotlib.gridspec as gridspec\n'), ((98138, 98169), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.1, 0.1, 0.85, 0.8]'], {}), '([0.1, 0.1, 0.85, 0.8])\n', (98146, 98169), True, 'import matplotlib.pyplot as plt\n'), ((98705, 98729), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.05)'}), '(title, y=1.05)\n', (98714, 98729), True, 'import matplotlib.pyplot as plt\n'), ((99027, 99037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (99035, 99037), True, 'import matplotlib.pyplot as plt\n'), ((99160, 99203), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 8 / 2.54)'}), '(figsize=(16.0 / 2.54, 8 / 2.54))\n', (99170, 99203), True, 'import matplotlib.pyplot as plt\n'), ((101461, 101502), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'color': '"""Gainsboro"""'}), "(bp['fliers'], color='Gainsboro')\n", (101469, 101502), True, 'import matplotlib.pyplot as plt\n'), ((101511, 101554), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'linestyle': '"""solid"""'}), "(bp['whiskers'], linestyle='solid')\n", (101519, 101554), True, 'import matplotlib.pyplot as plt\n'), ((102069, 102102), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xticklabels': 'labels'}), '(ax2, xticklabels=labels)\n', (102077, 102102), True, 'import matplotlib.pyplot as plt\n'), ((102112, 102145), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(30)'}), '(xtickNames, rotation=30)\n', (102120, 102145), True, 'import matplotlib.pyplot as plt\n'), ((102405, 102477), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + title + '_BP.png')"], {'figure': 'fig2', 'format': '"""png"""'}), "(savingFolder + title + '_BP.png', figure=fig2, format='png')\n", (102416, 102477), True, 'import matplotlib.pyplot as plt\n'), ((102482, 102554), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + title + '_BP.pdf')"], {'figure': 'fig2', 'format': '"""pdf"""'}), "(savingFolder + title + '_BP.pdf', figure=fig2, format='pdf')\n", (102493, 102554), True, 'import matplotlib.pyplot as plt\n'), ((117225, 117402), 'pandas.read_csv', 'pd.read_csv', (["(recFolder + 'diurnals/B2E1_20121_201212diurnals.csv')"], {'index_col': '(0)', 'sep': '""";"""', 'header': '[0, 1, 2, 4, 5, 6, 7]', 'skiprows': '[8]', 'parse_dates': '(True)', 'low_memory': '(False)'}), "(recFolder + 'diurnals/B2E1_20121_201212diurnals.csv', index_col\n =0, sep=';', header=[0, 1, 2, 4, 5, 6, 7], skiprows=[8], parse_dates=\n True, low_memory=False)\n", (117236, 117402), True, 'import pandas as pd\n'), ((117564, 117743), 'pandas.read_csv', 'pd.read_csv', (["(recFolder + 'validationM3_B2E1/proSet_100_B2E1_CDPL.csv')"], {'index_col': '(0)', 'sep': '""";"""', 'header': '[0, 1, 2, 4, 5, 6, 7]', 'skiprows': '[8]', 'parse_dates': '(True)', 'low_memory': '(False)'}), "(recFolder + 'validationM3_B2E1/proSet_100_B2E1_CDPL.csv',\n index_col=0, sep=';', header=[0, 1, 2, 4, 5, 6, 7], skiprows=[8],\n parse_dates=True, low_memory=False)\n", (117575, 117743), True, 'import pandas as pd\n'), ((119367, 119425), 'pandas.DataFrame', 'DataFrame', (['results'], {'index': 'indicis', 'columns': 'columns4Results'}), '(results, index=indicis, columns=columns4Results)\n', (119376, 119425), False, 'from pandas import Series, DataFrame, MultiIndex\n'), ((1776, 1818), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%I:%M %p"""'], {}), "('%I:%M %p')\n", (1806, 1818), False, 'import matplotlib\n'), ((98896, 98935), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (98926, 98935), False, 'import matplotlib\n'), ((100189, 100252), 'matplotlib.patches.Polygon', 'Polygon', (['boxCoords'], {'facecolor': 'boxColors[i]', 'alpha': '(0.1)', 'zorder': '(1)'}), '(boxCoords, facecolor=boxColors[i], alpha=0.1, zorder=1)\n', (100196, 100252), False, 'from matplotlib.patches import Polygon\n'), ((101165, 101208), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['medians'][i]"], {'color': 'colors[i]'}), "(bp['medians'][i], color=colors[i])\n", (101173, 101208), True, 'import matplotlib.pyplot as plt\n'), ((101238, 101285), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['boxes'][i], color='DarkSlateGray')\n", (101246, 101285), True, 'import matplotlib.pyplot as plt\n'), ((101343, 101393), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['whiskers'][i], color='DarkSlateGray')\n", (101351, 101393), True, 'import matplotlib.pyplot as plt\n'), ((101406, 101452), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['caps'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['caps'][i], color='DarkSlateGray')\n", (101414, 101452), True, 'import matplotlib.pyplot as plt\n'), ((102303, 102335), 'numpy.char.replace', 'np.char.replace', (['title', '""" """', '"""_"""'], {}), "(title, ' ', '_')\n", (102318, 102335), True, 'import numpy as np\n'), ((102354, 102395), 'numpy.char.replace', 'np.char.replace', (['title', '"""Apartment"""', '"""Ap"""'], {}), "(title, 'Apartment', 'Ap')\n", (102369, 102395), True, 'import numpy as np\n'), ((104385, 104429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 10 / 2.54)'}), '(figsize=(16.0 / 2.54, 10 / 2.54))\n', (104395, 104429), True, 'import matplotlib.pyplot as plt\n'), ((105272, 105296), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.05)'}), '(title, y=1.05)\n', (105281, 105296), True, 'import matplotlib.pyplot as plt\n'), ((105715, 105782), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + name + '.pdf')"], {'figure': 'fig', 'format': '"""pdf"""'}), "(savingFolder + name + '.pdf', figure=fig, format='pdf')\n", (105726, 105782), True, 'import matplotlib.pyplot as plt\n'), ((105793, 105837), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 10 / 2.54)'}), '(figsize=(16.0 / 2.54, 10 / 2.54))\n', (105803, 105837), True, 'import matplotlib.pyplot as plt\n'), ((106019, 106043), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.05)'}), '(title, y=1.05)\n', (106028, 106043), True, 'import matplotlib.pyplot as plt\n'), ((108094, 108135), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'color': '"""Gainsboro"""'}), "(bp['fliers'], color='Gainsboro')\n", (108102, 108135), True, 'import matplotlib.pyplot as plt\n'), ((108144, 108187), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'linestyle': '"""solid"""'}), "(bp['whiskers'], linestyle='solid')\n", (108152, 108187), True, 'import matplotlib.pyplot as plt\n'), ((108651, 108684), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xticklabels': 'labels'}), '(ax2, xticklabels=labels)\n', (108659, 108684), True, 'import matplotlib.pyplot as plt\n'), ((108694, 108727), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(30)'}), '(xtickNames, rotation=30)\n', (108702, 108727), True, 'import matplotlib.pyplot as plt\n'), ((108876, 108948), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + title + '_BP.pdf')"], {'figure': 'fig2', 'format': '"""pdf"""'}), "(savingFolder + title + '_BP.pdf', figure=fig2, format='pdf')\n", (108887, 108948), True, 'import matplotlib.pyplot as plt\n'), ((110764, 110807), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 9 / 2.54)'}), '(figsize=(16.0 / 2.54, 9 / 2.54))\n', (110774, 110807), True, 'import matplotlib.pyplot as plt\n'), ((112449, 112518), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + titleb + '.pdf')"], {'figure': 'fig', 'format': '"""pdf"""'}), "(savingFolder + titleb + '.pdf', figure=fig, format='pdf')\n", (112460, 112518), True, 'import matplotlib.pyplot as plt\n'), ((112529, 112572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16.0 / 2.54, 9 / 2.54)'}), '(figsize=(16.0 / 2.54, 9 / 2.54))\n', (112539, 112572), True, 'import matplotlib.pyplot as plt\n'), ((114946, 114987), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'color': '"""Gainsboro"""'}), "(bp['fliers'], color='Gainsboro')\n", (114954, 114987), True, 'import matplotlib.pyplot as plt\n'), ((114996, 115039), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'linestyle': '"""solid"""'}), "(bp['whiskers'], linestyle='solid')\n", (115004, 115039), True, 'import matplotlib.pyplot as plt\n'), ((115836, 115869), 'matplotlib.pyplot.setp', 'plt.setp', (['ax2'], {'xticklabels': 'labels'}), '(ax2, xticklabels=labels)\n', (115844, 115869), True, 'import matplotlib.pyplot as plt\n'), ((115879, 115912), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(30)'}), '(xtickNames, rotation=30)\n', (115887, 115912), True, 'import matplotlib.pyplot as plt\n'), ((116181, 116253), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savingFolder + title + '_BP.pdf')"], {'figure': 'fig2', 'format': '"""pdf"""'}), "(savingFolder + title + '_BP.pdf', figure=fig2, format='pdf')\n", (116192, 116253), True, 'import matplotlib.pyplot as plt\n'), ((100597, 100650), 'matplotlib.pyplot.plot', 'plt.plot', (['medianX', 'medianY', 'boxColors[i]'], {'linewidth': '(2)'}), '(medianX, medianY, boxColors[i], linewidth=2)\n', (100605, 100650), True, 'import matplotlib.pyplot as plt\n'), ((105481, 105520), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (105511, 105520), False, 'import matplotlib\n'), ((106822, 106885), 'matplotlib.patches.Polygon', 'Polygon', (['boxCoords'], {'facecolor': 'boxColors[i]', 'alpha': '(0.1)', 'zorder': '(1)'}), '(boxCoords, facecolor=boxColors[i], alpha=0.1, zorder=1)\n', (106829, 106885), False, 'from matplotlib.patches import Polygon\n'), ((107798, 107841), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['medians'][i]"], {'color': 'colors[i]'}), "(bp['medians'][i], color=colors[i])\n", (107806, 107841), True, 'import matplotlib.pyplot as plt\n'), ((107871, 107918), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['boxes'][i], color='DarkSlateGray')\n", (107879, 107918), True, 'import matplotlib.pyplot as plt\n'), ((107976, 108026), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['whiskers'][i], color='DarkSlateGray')\n", (107984, 108026), True, 'import matplotlib.pyplot as plt\n'), ((108039, 108085), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['caps'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['caps'][i], color='DarkSlateGray')\n", (108047, 108085), True, 'import matplotlib.pyplot as plt\n'), ((112110, 112149), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (112140, 112149), False, 'import matplotlib\n'), ((112356, 112387), 'numpy.char.replace', 'np.char.replace', (['title', '""" """', '""""""'], {}), "(title, ' ', '')\n", (112371, 112387), True, 'import numpy as np\n'), ((112407, 112440), 'numpy.char.replace', 'np.char.replace', (['titleb', '""","""', '"""_"""'], {}), "(titleb, ',', '_')\n", (112422, 112440), True, 'import numpy as np\n'), ((113674, 113737), 'matplotlib.patches.Polygon', 'Polygon', (['boxCoords'], {'facecolor': 'boxColors[i]', 'alpha': '(0.1)', 'zorder': '(1)'}), '(boxCoords, facecolor=boxColors[i], alpha=0.1, zorder=1)\n', (113681, 113737), False, 'from matplotlib.patches import Polygon\n'), ((114650, 114693), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['medians'][i]"], {'color': 'colors[i]'}), "(bp['medians'][i], color=colors[i])\n", (114658, 114693), True, 'import matplotlib.pyplot as plt\n'), ((114723, 114770), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['boxes'][i], color='DarkSlateGray')\n", (114731, 114770), True, 'import matplotlib.pyplot as plt\n'), ((114828, 114878), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['whiskers'][i], color='DarkSlateGray')\n", (114836, 114878), True, 'import matplotlib.pyplot as plt\n'), ((114891, 114937), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['caps'][i]"], {'color': '"""DarkSlateGray"""'}), "(bp['caps'][i], color='DarkSlateGray')\n", (114899, 114937), True, 'import matplotlib.pyplot as plt\n'), ((116070, 116101), 'numpy.char.replace', 'np.char.replace', (['title', '""" """', '""""""'], {}), "(title, ' ', '')\n", (116085, 116101), True, 'import numpy as np\n'), ((116120, 116152), 'numpy.char.replace', 'np.char.replace', (['title', '""","""', '"""_"""'], {}), "(title, ',', '_')\n", (116135, 116152), True, 'import numpy as np\n'), ((99748, 99781), 'pandas.DataFrame', 'DataFrame', (['(df2.values - df.values)'], {}), '(df2.values - df.values)\n', (99757, 99781), False, 'from pandas import Series, DataFrame, MultiIndex\n'), ((102848, 102859), 'numpy.empty', 'np.empty', (['(6)'], {}), '(6)\n', (102856, 102859), True, 'import numpy as np\n'), ((107230, 107283), 'matplotlib.pyplot.plot', 'plt.plot', (['medianX', 'medianY', 'boxColors[i]'], {'linewidth': '(2)'}), '(medianX, medianY, boxColors[i], linewidth=2)\n', (107238, 107283), True, 'import matplotlib.pyplot as plt\n'), ((109293, 109304), 'numpy.empty', 'np.empty', (['(6)'], {}), '(6)\n', (109301, 109304), True, 'import numpy as np\n'), ((114082, 114135), 'matplotlib.pyplot.plot', 'plt.plot', (['medianX', 'medianY', 'boxColors[i]'], {'linewidth': '(2)'}), '(medianX, medianY, boxColors[i], linewidth=2)\n', (114090, 114135), True, 'import matplotlib.pyplot as plt\n'), ((106381, 106414), 'pandas.DataFrame', 'DataFrame', (['(df2.values - df.values)'], {}), '(df2.values - df.values)\n', (106390, 106414), False, 'from pandas import Series, DataFrame, MultiIndex\n'), ((113233, 113266), 'pandas.DataFrame', 'DataFrame', (['(df2.values - df.values)'], {}), '(df2.values - df.values)\n', (113242, 113266), False, 'from pandas import Series, DataFrame, MultiIndex\n')] |
import bz2
import time
import array
import logging
from typing import Dict, Tuple, Optional, Any
import numpy
from cephlib import sensors_rpc_plugin
from cephlib.units import b2ssize
from cephlib.wally_storage import WallyDB
from . import utils
from .test_run_class import TestRun
from .result_classes import DataSource
from .stage import Stage, StepOrder
plugin_fname = sensors_rpc_plugin.__file__.rsplit(".", 1)[0] + ".py"
SENSORS_PLUGIN_CODE = open(plugin_fname, "rb").read() # type: bytes
logger = logging.getLogger("wally")
# TODO(koder): in case if node has more than one role sensor settings might be incorrect
class StartSensorsStage(Stage):
priority = StepOrder.START_SENSORS
config_block = 'sensors'
def run(self, ctx: TestRun) -> None:
if array.array('L').itemsize != 8:
message = "Python array.array('L') items should be 8 bytes in size, not {}." + \
" Can't provide sensors on this platform. Disable sensors in config and retry"
logger.critical(message.format(array.array('L').itemsize))
raise utils.StopTestError()
# TODO: need carefully fix this
# sensors config is:
# role:
# sensor: [str]
# or
# role:
# sensor:
# allowed: [str]
# dissallowed: [str]
# params: Any
per_role_config = {} # type: Dict[str, Dict[str, str]]
for name, val in ctx.config.sensors.roles_mapping.raw().items():
if isinstance(val, str):
val = {vl.strip(): (".*" if vl.strip() != 'ceph' else {}) for vl in val.split(",")}
elif isinstance(val, list):
val = {vl: (".*" if vl != 'ceph' else {}) for vl in val}
per_role_config[name] = val
if 'all' in per_role_config:
all_vl = per_role_config.pop('all')
all_roles = set(per_role_config)
for node in ctx.nodes:
all_roles.update(node.info.roles) # type: ignore
for name, vals in list(per_role_config.items()):
new_vals = all_vl.copy()
new_vals.update(vals)
per_role_config[name] = new_vals
for node in ctx.nodes:
node_cfg = {} # type: Dict[str, Dict[str, str]]
for role in node.info.roles:
node_cfg.update(per_role_config.get(role, {})) # type: ignore
nid = node.node_id
if node_cfg:
# ceph requires additional settings
if 'ceph' in node_cfg:
node_cfg['ceph'].update(node.info.params['ceph'])
node_cfg['ceph']['osds'] = [osd['id'] for osd in node.info.params['ceph-osds']] # type: ignore
logger.debug("Setting up sensors RPC plugin for node %s", nid)
node.upload_plugin("sensors", SENSORS_PLUGIN_CODE)
ctx.sensors_run_on.add(nid)
logger.debug("Start monitoring node %s", nid)
node.conn.sensors.start(node_cfg)
else:
logger.debug("Skip monitoring node %s, as no sensors selected", nid)
def stop_sensors(ctx: TestRun):
for node in ctx.nodes:
node_id = node.node_id
if node_id in ctx.sensors_run_on:
node.conn.sensors.stop()
def collect_sensors_data(ctx: TestRun, before_test: bool = False):
logger.info("Start loading sensors")
total_sz = 0
# ceph pg and pool data collected separatelly
cluster_metrics = getattr(ctx.config.sensors, 'cluster', [])
pgs_io = 'ceph-pgs-io' in cluster_metrics
pools_io = 'ceph-pools-io' in cluster_metrics
if pgs_io or pools_io:
assert ctx.ceph_master_node is not None
def collect() -> Tuple[Optional[Any], Optional[Any]]:
pg_dump = ctx.ceph_master_node.run(f"ceph {ctx.ceph_extra_args} pg dump --format json") if pgs_io else None
pools_dump = ctx.ceph_master_node.run(f"rados {ctx.ceph_extra_args} df --format json") if pools_io else None
return pg_dump, pools_dump
future = ctx.get_pool().submit(collect)
else:
future = None
ctime = int(time.time())
if not before_test:
for node in ctx.nodes:
node_id = node.node_id
if node_id in ctx.sensors_run_on:
offset_map, compressed_blob, compressed_collected_at_b = node.conn.sensors.get_updates()
total_sz += len(compressed_blob) + len(compressed_collected_at_b) + sum(map(len, offset_map)) + \
16 * len(offset_map)
data_tpl = (offset_map, compressed_blob, compressed_collected_at_b)
for path, value, is_array, units in sensors_rpc_plugin.unpack_rpc_updates(data_tpl):
if path == 'collected_at':
ds = DataSource(node_id=node_id, metric='collected_at', tag='csv')
ctx.rstorage.append_sensor(numpy.array(value), ds, units)
else:
sensor, dev, metric = path.split(".")
ds = DataSource(node_id=node_id, metric=metric, dev=dev, sensor=sensor, tag='csv')
if is_array:
ctx.rstorage.append_sensor(numpy.array(value), ds, units)
else:
if metric == 'historic':
value = bz2.compress(value)
tag = 'bz2'
else:
assert metric == 'perf_dump'
tag = 'txt'
ctx.storage.put_raw(value, WallyDB.ceph_metric.format(node_id=node_id,
metric=metric,
time=ctime,
tag=tag))
if future:
pgs_info, pools_info = future.result()
if pgs_info:
total_sz += len(pgs_info)
ctx.storage.put_raw(bz2.compress(pgs_info.encode('utf8')), WallyDB.pgs_io.format(time=ctime))
if pools_info:
total_sz += len(pools_info)
ctx.storage.put_raw(bz2.compress(pools_info.encode('utf8')), WallyDB.pools_io.format(time=ctime))
logger.info("Download %sB of sensors data", b2ssize(total_sz))
class CollectSensorsStage(Stage):
priority = StepOrder.COLLECT_SENSORS
config_block = 'sensors'
def run(self, ctx: TestRun) -> None:
collect_sensors_data(ctx, False)
class StopSensorsStage(Stage):
priority = StepOrder.STOP_SENSORS
config_block = 'sensors'
def run(self, ctx: TestRun) -> None:
stop_sensors(ctx)
| [
"cephlib.units.b2ssize",
"cephlib.sensors_rpc_plugin.__file__.rsplit",
"time.time",
"bz2.compress",
"cephlib.sensors_rpc_plugin.unpack_rpc_updates",
"cephlib.wally_storage.WallyDB.pgs_io.format",
"array.array",
"cephlib.wally_storage.WallyDB.pools_io.format",
"numpy.array",
"cephlib.wally_storage.... | [((510, 536), 'logging.getLogger', 'logging.getLogger', (['"""wally"""'], {}), "('wally')\n", (527, 536), False, 'import logging\n'), ((376, 418), 'cephlib.sensors_rpc_plugin.__file__.rsplit', 'sensors_rpc_plugin.__file__.rsplit', (['"""."""', '(1)'], {}), "('.', 1)\n", (410, 418), False, 'from cephlib import sensors_rpc_plugin\n'), ((4209, 4220), 'time.time', 'time.time', ([], {}), '()\n', (4218, 4220), False, 'import time\n'), ((6503, 6520), 'cephlib.units.b2ssize', 'b2ssize', (['total_sz'], {}), '(total_sz)\n', (6510, 6520), False, 'from cephlib.units import b2ssize\n'), ((782, 798), 'array.array', 'array.array', (['"""L"""'], {}), "('L')\n", (793, 798), False, 'import array\n'), ((4757, 4804), 'cephlib.sensors_rpc_plugin.unpack_rpc_updates', 'sensors_rpc_plugin.unpack_rpc_updates', (['data_tpl'], {}), '(data_tpl)\n', (4794, 4804), False, 'from cephlib import sensors_rpc_plugin\n'), ((6245, 6278), 'cephlib.wally_storage.WallyDB.pgs_io.format', 'WallyDB.pgs_io.format', ([], {'time': 'ctime'}), '(time=ctime)\n', (6266, 6278), False, 'from cephlib.wally_storage import WallyDB\n'), ((6417, 6452), 'cephlib.wally_storage.WallyDB.pools_io.format', 'WallyDB.pools_io.format', ([], {'time': 'ctime'}), '(time=ctime)\n', (6440, 6452), False, 'from cephlib.wally_storage import WallyDB\n'), ((1045, 1061), 'array.array', 'array.array', (['"""L"""'], {}), "('L')\n", (1056, 1061), False, 'import array\n'), ((4995, 5013), 'numpy.array', 'numpy.array', (['value'], {}), '(value)\n', (5006, 5013), False, 'import numpy\n'), ((5313, 5331), 'numpy.array', 'numpy.array', (['value'], {}), '(value)\n', (5324, 5331), False, 'import numpy\n'), ((5467, 5486), 'bz2.compress', 'bz2.compress', (['value'], {}), '(value)\n', (5479, 5486), False, 'import bz2\n'), ((5725, 5804), 'cephlib.wally_storage.WallyDB.ceph_metric.format', 'WallyDB.ceph_metric.format', ([], {'node_id': 'node_id', 'metric': 'metric', 'time': 'ctime', 'tag': 'tag'}), '(node_id=node_id, metric=metric, time=ctime, tag=tag)\n', (5751, 5804), False, 'from cephlib.wally_storage import WallyDB\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pudb
from easyAI import TwoPlayersGame, DictTT
black_pieces = [[0,3],[0,4],[0,5],[0,6],[0,7],[1,5],[3,0],[3,10],[4,0],[4,10],[5,0],
[5,1],[5,9],[5,10],[6,0],[6,10],[7,0],[7,10],[9,5],[10,3],[10,4],
[10,5],[10,6],[10,7]]
white_pieces = [[3,5],[4,4],[4,5],[4,6],[5,3],[5,4],[5,5],[5,6],[5,7],[6,4],[6,5],[6,6],[7,5]]
# black_pieces_2 = [[5,1],[5,9],[5,10],[6,0],[6,10],[7,0],[7,10]]
# white_pieces_2 = [[5,5],[6,5],[6,6],[7,5]]
black_pieces_2 = [[5,3],[5,2],[5,7],[5,9],[2,3],[2,2],[2,7],[2,9]]
white_pieces_2 = [[5,5],[6,5],[6,6],[7,5]]
king = [5,5]
throne = np.array([[5,5]])
corners = np.array([[0,0],[0,10],[10,0],[10,10]])
pieces = [black_pieces, white_pieces + [king]]
BLACK = 1
WHITE = 2
class Game(TwoPlayersGame):
"""
"""
def __init__(self, players, board_size = (11, 11)):
self.players = players
self.board_size = board_size
self.board = np.zeros(board_size, dtype = int)
for piece in black_pieces_2:
self.board[piece[0]][piece[1]] = 1
for piece in white_pieces_2:
self.board[piece[0]][piece[1]] = 2
self.king = np.array([5,5])
self.nplayer = 1 # player 1 starts.
def possible_moves_for_piece(self, piece):
v_moves = []
# pudb.set_trace()
v_mask = np.ma.masked_where(self.board[:, piece[1]] != 0, self.board[:, piece[1]])
v_slices = np.ma.notmasked_contiguous(v_mask)
try:
v_slice = [slice for slice in v_slices if slice.start <= piece[0]+1 and piece[0]-1 <= slice.stop][0]
except TypeError:
v_slice = v_slices
except IndexError:
v_slice = None
if v_slice is not None:
v_moves = range(v_slice.start, v_slice.stop)
if piece[0] in v_moves:
v_moves.remove(piece[0])
v_moves = [[val, piece[1]] for val in v_moves]
h_moves = []
h_mask = np.ma.masked_where(self.board[piece[0]] != 0, self.board[piece[0]])
h_slices = np.ma.notmasked_contiguous(h_mask)
try:
h_slice = [slice for slice in h_slices if slice.start <= piece[1]+1 and piece[1]-1 <= slice.stop][0]
except TypeError:
h_slice = h_slices
except IndexError:
h_slice = None
if h_slice is not None:
h_moves = range(h_slice.start, h_slice.stop)
if piece[1] in h_moves:
h_moves.remove(piece[1])
h_moves = [[piece[0], val] for val in h_moves]
return [(piece, move) for move in h_moves + v_moves if move not in corners + throne]
def get_piece(self, coord):
try:
return self.board[coord[0]][coord[1]]
except:
return None
def get_pieces(self, player):
pieces = np.where(self.board == player)
return np.dstack(pieces)[0]
def capture(self, move):
directions = [np.array([0,1]),np.array([0,-1]),np.array([1,0]),np.array([-1,0])]
for direction in directions:
target = direction + move
if self.get_piece(target) == self.nopponent:
if self.get_piece(direction + target) == self.nplayer or \
(direction + target) in corners or \
(direction + target) in throne:
self.board[target[0]][target[1]] = 0
def possible_moves(self):
moves = []
pieces = self.get_pieces(self.nplayer)
if self.nmove % 3:
pieces = pieces[::-1]
for piece in pieces:
moves.extend(self.possible_moves_for_piece(piece))
if len(moves) == 0:
pudb.set_trace()
return moves
def make_move(self, move):
current_pos = move[0]
next_pos = move[1]
self.board[current_pos[0]][current_pos[1]] = 0
self.board[next_pos[0]][next_pos[1]] = self.nplayer
if (self.king == current_pos).all():
self.king = next_pos
self.capture(next_pos)
def show(self):
print('\n' + '\n'.join([' 1 2 3 4 5 6 7 8 9 10 11'] +
['ABCDEFGHIJK'[k] +
' ' + ' '.join([['∙', '⚫️', '⚪️', '👑'][self.board[k, i]]
for i in range(self.board_size[0])])
for k in range(self.board_size[1])] + ['']))
def lose(self):
if self.nplayer == BLACK:
self.has_lost = (self.king == corners).any()
else:
self.has_lost = self.get_pieces(WHITE).size == 0
# if not (self.king == self.get_pieces(WHITE)).any():
# return True
return self.has_lost
def scoring(self):
if not self.has_lost:
material = len(self.get_pieces(BLACK))/2./len(self.get_pieces(WHITE))
# king_to_corner = min([np.linalg.norm(np.array(self.king)-corner) for corner in corners])
# attackers_to_king = np.array([np.linalg.norm(np.array(self.king)-piece) for piece in self.get_pieces(BLACK)]).mean()
# return king_to_corner + material**10 - attackers_to_king
# return -attackers_to_king + king_to_corner
return -(material**10)
# return material
else:
return -100
def is_over(self):
return self.lose()
def ttentry(self):
return "".join([".0X"[i] for i in self.board.flatten()])
if __name__ == "__main__":
from easyAI import AI_Player, Negamax
#ai_algo = Negamax(3, None , tt = DictTT())
ai_algo = Negamax(5, None)
game = Game([AI_Player(ai_algo), AI_Player(ai_algo)])
game.play()
print("player %d loses" % (game.nplayer))
| [
"numpy.dstack",
"easyAI.AI_Player",
"numpy.ma.masked_where",
"pudb.set_trace",
"numpy.zeros",
"numpy.where",
"numpy.array",
"easyAI.Negamax",
"numpy.ma.notmasked_contiguous"
] | [((641, 659), 'numpy.array', 'np.array', (['[[5, 5]]'], {}), '([[5, 5]])\n', (649, 659), True, 'import numpy as np\n'), ((669, 715), 'numpy.array', 'np.array', (['[[0, 0], [0, 10], [10, 0], [10, 10]]'], {}), '([[0, 0], [0, 10], [10, 0], [10, 10]])\n', (677, 715), True, 'import numpy as np\n'), ((5545, 5561), 'easyAI.Negamax', 'Negamax', (['(5)', 'None'], {}), '(5, None)\n', (5552, 5561), False, 'from easyAI import AI_Player, Negamax\n'), ((967, 998), 'numpy.zeros', 'np.zeros', (['board_size'], {'dtype': 'int'}), '(board_size, dtype=int)\n', (975, 998), True, 'import numpy as np\n'), ((1189, 1205), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (1197, 1205), True, 'import numpy as np\n'), ((1362, 1435), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(self.board[:, piece[1]] != 0)', 'self.board[:, piece[1]]'], {}), '(self.board[:, piece[1]] != 0, self.board[:, piece[1]])\n', (1380, 1435), True, 'import numpy as np\n'), ((1455, 1489), 'numpy.ma.notmasked_contiguous', 'np.ma.notmasked_contiguous', (['v_mask'], {}), '(v_mask)\n', (1481, 1489), True, 'import numpy as np\n'), ((1991, 2058), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(self.board[piece[0]] != 0)', 'self.board[piece[0]]'], {}), '(self.board[piece[0]] != 0, self.board[piece[0]])\n', (2009, 2058), True, 'import numpy as np\n'), ((2078, 2112), 'numpy.ma.notmasked_contiguous', 'np.ma.notmasked_contiguous', (['h_mask'], {}), '(h_mask)\n', (2104, 2112), True, 'import numpy as np\n'), ((2856, 2886), 'numpy.where', 'np.where', (['(self.board == player)'], {}), '(self.board == player)\n', (2864, 2886), True, 'import numpy as np\n'), ((2902, 2919), 'numpy.dstack', 'np.dstack', (['pieces'], {}), '(pieces)\n', (2911, 2919), True, 'import numpy as np\n'), ((2975, 2991), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2983, 2991), True, 'import numpy as np\n'), ((2991, 3008), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (2999, 3008), True, 'import numpy as np\n'), ((3008, 3024), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3016, 3024), True, 'import numpy as np\n'), ((3024, 3041), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (3032, 3041), True, 'import numpy as np\n'), ((3713, 3729), 'pudb.set_trace', 'pudb.set_trace', ([], {}), '()\n', (3727, 3729), False, 'import pudb\n'), ((5579, 5597), 'easyAI.AI_Player', 'AI_Player', (['ai_algo'], {}), '(ai_algo)\n', (5588, 5597), False, 'from easyAI import AI_Player, Negamax\n'), ((5599, 5617), 'easyAI.AI_Player', 'AI_Player', (['ai_algo'], {}), '(ai_algo)\n', (5608, 5617), False, 'from easyAI import AI_Player, Negamax\n')] |
import matplotlib.pyplot as plt
import numpy as np
import emcee
import corner
import random
import math
import subprocess
from astropy.io import ascii
import pickle
from matplotlib.ticker import MaxNLocator
import sys
import idlsave
from scipy.stats.kde import gaussian_kde
import scipy.stats as stats
import matplotlib.mlab as mlab
import tables
from scipy.interpolate import interp1d
from chainconsumer import ChainConsumer
from multiprocessing import Pool
import os
import time
# -------------------------------------------------------------------------#
## load local modules
from settle import settle
from burstrain import *
from run_model import runmodel
from get_data import get_obs
from mrprior import mr_prior
from get_data import *
from initialise import init
## Now we define the functions that emcee requires
# define likelihood as a function of theta, x, y and yerr as this is what emcee expects as the inputs
ndim, nwalkers, nsteps, run_id, theta, x, y, yerr, tref, bstart, pflux, pfluxe, tobs, numburstssim, numburstsobs, bc, ref_ind, gti_checking, fluen, restart = init()
def lnlike(theta, x, y, yerr):
# define y = "data" parameters
for x, i in zip(
[x for x in range(0, len(bstart) - 1) if x != ref_ind],
[i for i in range(0, len(bstart) - 1) if i != ref_ind],
):
globals()["t%s" % i] = y[x]
for x, i in zip(
range(len(bstart) - 1, len(fluen) + len(bstart) - 1), range(0, len(bstart))
):
globals()["Eb%s" % i] = y[x]
for x, i in zip(
range(len(fluen) + len(bstart) - 1, len(y)), range(0, len(bstart - 1))
):
globals()["a%s" % i] = y[x]
# define yerr as variance terms (errors) for our data parameters (listed in same order as for y)
# *note that we have to enter three time errors for the code to work however in reality the error should be the same for all of them (st0, st2 and st3 are really dummy parameters)
for x, i in zip(
[x for x in range(0, len(bstart) - 1) if x != ref_ind],
[i for i in range(0, len(bstart) - 1) if i != ref_ind],
):
globals()["st%s" % i] = yerr[x]
for x, i in zip(
range(len(bstart) - 1, len(fluen) + len(bstart) - 1), range(0, len(bstart))
):
globals()["sEb%s" % i] = yerr[x]
for x, i in zip(
range(len(fluen) + len(bstart) - 1, len(y)), range(0, len(bstart - 1))
):
globals()["sa%s" % i] = yerr[x]
# define theta = model parameters, which we define priors for
X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius = theta
# Instead of treating s_t as a parameter, we just hardwire it here
s_t = 10.0 / 1440.0
# call model from IDL code defined as modeldata(base, z, x, r1, r2 ,r3)
if gti_checking == 1:
model, valid = runmodel(
theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind, gti_checking,gti_start=st, gti_end=et
)
else:
model, valid = runmodel(
theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind, gti_checking
)
if not valid:
return -np.inf, model
# multiplying by scaling factors to match with the data
model[len(bstart) - 1 : len(fluen) + len(bstart) - 1] *= r3
model[len(fluen) + len(bstart) - 1 : len(y)] *= r2
# To simplify final likelihood expression we define inv_sigma2 for each data parameter that describe the error.
# The variance (eg sEb0) is underestimated by some fractional amount, f, for each set of parameters.
sEb = yerr[len(bstart) - 1 : len(fluen) + len(bstart) - 1]
sa = yerr[len(fluen) + len(bstart) - 1 : len(yerr)]
inv_sigma2 = []
for i in range(0, len(bstart) - 1):
inv_sigma2.append(1.0 / (s_t ** 2))
for i in range(0, len(bstart)):
inv_sigma2.append(1.0 / ((sEb[i] * f_E) ** 2))
for i in range(0, len(bstart) - 1):
inv_sigma2.append(1.0 / ((sa[i] * f_a) ** 2))
# Final likelihood expression
cpts = (y - (model)) ** 2 * inv_sigma2 - (np.log(inv_sigma2))
# Test if the result string is defined here. It is, so we return the selected elements of result
# instead of the downselection in model
base = Q_b
z = Z
x = X
r1 = r1
r2 = r2
r3 = r3
mass = mass
radius = radius
model2 = generate_burst_train(
base,
z,
x,
r1,
r2,
r3,
mass,
radius,
bstart,
pflux,
pfluxe,
tobs,
numburstssim,
run=run,
double=double,
debug=debug,
)
#model2 = np.string_(model2, dtype='S1000')
model2 = str(model2).encode('ASCII')
# Now also return the model
return -0.5 * np.sum(cpts), model2
# -------------------------------------------------------------------------#
# This is the expression for the prior on the model parameters.
# Define priors for theta. mr prior function is located in mrprior.py
def lnZprior(z):
# This beta function for the metallicity prior is from <NAME> and is an approximation of the metallicity of a mock galaxy
# at 2.5-4.5 kpc for the location of 1808. Assuming ZCNO = 0.01 is average value.
from scipy import stats
import numpy as np
beta = stats.beta
ZCNO = 0.01
return np.log(
beta(10, 3).pdf((np.log10(z / ZCNO) + 3) / 3.75) / (3.75 * np.log(10) * z)
)
def lnprior(theta):
import numpy as np
X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius = theta
if (
0.00001 < X < 0.76
and 0.00001 < Z < 0.056
and 0.000001 <= Q_b < 5.0
and 0 < f_a < 100
and 0 < f_E < 100
and 0.005 < r1 < 1.0
and 0.005 < r2 < 3.0
and 0 < r3 * 1e3 < 1000
and 1.15 < mass < 2.5
and 9 < radius < 17
): # upper bound and lower bounds of each parameter defined here. Bounds were found by considering an estimated value for each parameter then giving reasonable limits.
return 0.0 + lnZprior(Z) + mr_prior(mass, radius)
return -np.inf
# -------------------------------------------------------------------------#
# Finally we combine the likelihood and prior into the overall lnprob function, called by emcee
# define lnprob, which is the full log-probability function
def lnprob(theta, x, y, yerr):
import numpy as np
lp = lnprior(theta)
# Now also returns the model, to accumulate along with the likelihoods
like, model = lnlike(theta, x, y, yerr)
if (not np.isfinite(lp)) or (not np.isfinite(like)):
return -np.inf, -np.inf, model
# we return the logprobability as well as the theta parameters at this point so we can extract results later
return lp + like, lp, model
# -------------------------------------------------------------- # | [
"numpy.sum",
"numpy.log",
"mrprior.mr_prior",
"numpy.isfinite",
"run_model.runmodel",
"numpy.log10",
"initialise.init"
] | [((1085, 1091), 'initialise.init', 'init', ([], {}), '()\n', (1089, 1091), False, 'from initialise import init\n'), ((2777, 2897), 'run_model.runmodel', 'runmodel', (['theta', 'y', 'tref', 'bstart', 'pflux', 'pfluxe', 'tobs', 'numburstssim', 'ref_ind', 'gti_checking'], {'gti_start': 'st', 'gti_end': 'et'}), '(theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind,\n gti_checking, gti_start=st, gti_end=et)\n', (2785, 2897), False, 'from run_model import runmodel\n'), ((2944, 3038), 'run_model.runmodel', 'runmodel', (['theta', 'y', 'tref', 'bstart', 'pflux', 'pfluxe', 'tobs', 'numburstssim', 'ref_ind', 'gti_checking'], {}), '(theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind,\n gti_checking)\n', (2952, 3038), False, 'from run_model import runmodel\n'), ((3999, 4017), 'numpy.log', 'np.log', (['inv_sigma2'], {}), '(inv_sigma2)\n', (4005, 4017), True, 'import numpy as np\n'), ((4703, 4715), 'numpy.sum', 'np.sum', (['cpts'], {}), '(cpts)\n', (4709, 4715), True, 'import numpy as np\n'), ((5983, 6005), 'mrprior.mr_prior', 'mr_prior', (['mass', 'radius'], {}), '(mass, radius)\n', (5991, 6005), False, 'from mrprior import mr_prior\n'), ((6474, 6489), 'numpy.isfinite', 'np.isfinite', (['lp'], {}), '(lp)\n', (6485, 6489), True, 'import numpy as np\n'), ((6499, 6516), 'numpy.isfinite', 'np.isfinite', (['like'], {}), '(like)\n', (6510, 6516), True, 'import numpy as np\n'), ((5346, 5356), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (5352, 5356), True, 'import numpy as np\n'), ((5304, 5322), 'numpy.log10', 'np.log10', (['(z / ZCNO)'], {}), '(z / ZCNO)\n', (5312, 5322), True, 'import numpy as np\n')] |
import os
import six
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.operations as ops
from ibis.compat import Path, parse_version
from ibis.impala.tests.conftest import IbisTestEnv as ImpalaEnv
class RoundAwayFromZero(object):
def round(self, series, decimals=0):
if not decimals:
return (-np.sign(series) * np.ceil(-series.abs() - 0.5)).astype(
'int64'
)
return series.round(decimals=decimals)
class RoundHalfToEven(object):
def round(self, series, decimals=0):
result = series.round(decimals=decimals)
if not decimals:
return result.astype('int64')
return result
class Backend(object):
check_dtype = True
check_names = True
supports_arrays = True
supports_arrays_outside_of_select = supports_arrays
supports_window_operations = True
additional_skipped_operations = frozenset()
supports_divide_by_zero = False
returned_timestamp_unit = 'us'
supported_to_timestamp_units = {'s', 'ms', 'us'}
supports_floating_modulus = True
def __init__(self, data_directory):
try:
# check that the backend is available
getattr(ibis, self.name)
except AttributeError:
pytest.skip('Backend {} cannot be imported'.format(self.name))
else:
self.connection = self.connect(data_directory)
@property
def name(self):
return str(self).lower()
def __str__(self):
return self.__class__.__name__
def connect(self, data_directory):
raise NotImplementedError
def assert_series_equal(self, *args, **kwargs):
kwargs.setdefault('check_dtype', self.check_dtype)
kwargs.setdefault('check_names', self.check_names)
tm.assert_series_equal(*args, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
left = left.reset_index(drop=True)
right = right.reset_index(drop=True)
tm.assert_frame_equal(left, right, *args, **kwargs)
def default_series_rename(self, series, name='tmp'):
return series.rename(name)
def greatest(self, f, *args):
return f(*args)
def least(self, f, *args):
return f(*args)
@property
def db(self):
return self.connection.database()
def functional_alltypes(self):
return self.db.functional_alltypes
def batting(self):
return self.db.batting
def awards_players(self):
return self.db.awards_players
@classmethod
def make_context(cls, params=None):
module_name = cls.__name__.lower()
module = getattr(ibis, module_name, None)
if module is None:
pytest.skip('Unable to import backend {!r}'.format(module_name))
return module.dialect.make_context(params=params)
class UnorderedComparator(object):
def assert_series_equal(self, left, right, *args, **kwargs):
left = left.sort_values().reset_index(drop=True)
right = right.sort_values().reset_index(drop=True)
return super(UnorderedComparator, self).assert_series_equal(
left, right, *args, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
columns = list(set(left.columns) & set(right.columns))
left = left.sort_values(by=columns)
right = right.sort_values(by=columns)
return super(UnorderedComparator, self).assert_frame_equal(
left, right, *args, **kwargs)
class Pandas(Backend, RoundHalfToEven):
check_names = False
additional_skipped_operations = frozenset({ops.StringSQLLike})
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
def connect(self, data_directory):
return ibis.pandas.connect({
'functional_alltypes': pd.read_csv(
str(data_directory / 'functional_alltypes.csv'),
index_col=None,
dtype={'bool_col': bool, 'string_col': six.text_type},
parse_dates=['timestamp_col'],
encoding='utf-8'
),
'batting': pd.read_csv(str(data_directory / 'batting.csv')),
'awards_players': pd.read_csv(
str(data_directory / 'awards_players.csv')
),
})
def round(self, series, decimals=0):
result = series.round(decimals=decimals)
if not decimals:
return result.astype('int64')
return result
class Csv(Pandas):
check_names = False
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
def connect(self, data_directory):
filename = data_directory / 'functional_alltypes.csv'
if not filename.exists():
pytest.skip('test data set {} not found'.format(filename))
return ibis.csv.connect(data_directory)
def functional_alltypes(self):
schema = ibis.schema([
('bool_col', 'boolean'),
('string_col', 'string'),
('timestamp_col', 'timestamp')
])
return self.connection.table('functional_alltypes', schema=schema)
class Parquet(Pandas):
check_names = False
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
def connect(self, data_directory):
filename = data_directory / 'functional_alltypes.parquet'
if not filename.exists():
pytest.skip('test data set {} not found'.format(filename))
return ibis.parquet.connect(data_directory)
class SQLite(Backend, RoundAwayFromZero):
supports_arrays = False
supports_arrays_outside_of_select = supports_arrays
supports_window_operations = False
check_dtype = False
returned_timestamp_unit = 's'
def connect(self, data_directory):
path = os.environ.get('IBIS_TEST_SQLITE_DATABASE',
data_directory / 'ibis_testing.db')
path = Path(path)
if not path.exists():
pytest.skip('SQLite testing db {} does not exist'.format(path))
return ibis.sqlite.connect(str(path))
def functional_alltypes(self):
t = self.connection.database().functional_alltypes
return t.mutate(timestamp_col=t.timestamp_col.cast('timestamp'))
class Postgres(Backend, RoundHalfToEven):
# postgres rounds half to even for double precision and half away from zero
# for numeric and decimal
returned_timestamp_unit = 's'
def connect(self, data_directory):
user = os.environ.get('IBIS_TEST_POSTGRES_USER',
os.environ.get('PGUSER', 'postgres'))
password = os.environ.get('IBIS_TEST_POSTGRES_PASSWORD',
os.environ.get('PGPASS', '<PASSWORD>'))
host = os.environ.get('IBIS_TEST_POSTGRES_HOST',
os.environ.get('PGHOST', 'localhost'))
database = os.environ.get('IBIS_TEST_POSTGRES_DATABASE',
os.environ.get('PGDATABASE', 'ibis_testing'))
return ibis.postgres.connect(host=host, user=user, password=password,
database=database)
class MapD(Backend):
check_dtype = False
check_names = False
supports_window_operations = False
supports_divide_by_zero = False
supports_floating_modulus = False
returned_timestamp_unit = 's'
# Exception: Non-empty LogicalValues not supported yet
additional_skipped_operations = frozenset({
ops.Abs, ops.Round, ops.Ceil, ops.Floor, ops.Exp, ops.Sign, ops.Sqrt,
ops.Ln, ops.Log10, ops.Modulus
})
def connect(self, data_directory):
user = os.environ.get('IBIS_TEST_MAPD_USER', 'mapd')
password = os.environ.get(
'IBIS_TEST_MAPD_PASSWORD', '<PASSWORD>')
host = os.environ.get('IBIS_TEST_MAPD_HOST', 'localhost')
database = os.environ.get('IBIS_TEST_MAPD_DATABASE', 'ibis_testing')
return ibis.mapd.connect(
host=host, user=user, password=password, database=database
)
class MySQL(Backend, RoundHalfToEven):
# mysql has the same rounding behavior as postgres
check_dtype = False
supports_window_operations = False
returned_timestamp_unit = 's'
def connect(self, data_directory):
user = os.environ.get('IBIS_TEST_MYSQL_USER', 'ibis')
password = os.environ.get('IBIS_TEST_MYSQL_PASSWORD', '<PASSWORD>')
host = os.environ.get('IBIS_TEST_MYSQL_HOST', 'localhost')
database = os.environ.get('IBIS_TEST_MYSQL_DATABASE', 'ibis_testing')
con = ibis.mysql.connect(host=host, user=user, password=password,
database=database)
# mariadb supports window operations after version 10.2
# but the sqlalchemy version string looks like:
# 5.5.5.10.2.12.MariaDB.10.2.12+maria~jessie
if 'MariaDB' in str(con.version):
# we might move this parsing step to the mysql client
version = tuple(map(int, str(con.version).split('.')[7:9]))
if version >= (10, 2):
self.supports_window_operations = True
elif con.version >= parse_version('8.0'):
# mysql supports window operations after version 8
self.supports_window_operations = True
return con
def functional_alltypes(self):
# BOOLEAN <-> TINYINT(1)
t = self.connection.database().functional_alltypes
return t.mutate(bool_col=t.bool_col == 1)
class Clickhouse(Backend, RoundHalfToEven):
check_dtype = False
supports_window_operations = False
returned_timestamp_unit = 's'
supported_to_timestamp_units = {'s'}
supports_floating_modulus = False
def connect(self, data_directory):
host = os.environ.get('IBIS_TEST_CLICKHOUSE_HOST', 'localhost')
port = int(os.environ.get('IBIS_TEST_CLICKHOUSE_PORT', 9000))
user = os.environ.get('IBIS_TEST_CLICKHOUSE_USER', 'default')
password = os.environ.get('IBIS_TEST_CLICKHOUSE_PASSWORD', '')
database = os.environ.get('IBIS_TEST_CLICKHOUSE_DATABASE',
'ibis_testing')
return ibis.clickhouse.connect(host=host, port=port, password=password,
database=database, user=user)
def functional_alltypes(self):
t = self.connection.database().functional_alltypes
return t.mutate(bool_col=t.bool_col == 1)
def greatest(self, f, *args):
if len(args) > 2:
raise NotImplementedError(
'Clickhouse does not support more than 2 arguments to greatest'
)
return super(Clickhouse, self).least(f, *args)
def least(self, f, *args):
if len(args) > 2:
raise NotImplementedError(
'Clickhouse does not support more than 2 arguments to least'
)
return super(Clickhouse, self).least(f, *args)
class BigQuery(UnorderedComparator, Backend, RoundAwayFromZero):
supports_divide_by_zero = True
supports_floating_modulus = False
returned_timestamp_unit = 'us'
def connect(self, data_directory):
ga = pytest.importorskip('google.auth')
project_id = os.environ.get('GOOGLE_BIGQUERY_PROJECT_ID')
if project_id is None:
pytest.skip('Environment variable GOOGLE_BIGQUERY_PROJECT_ID '
'not defined')
elif not project_id:
pytest.skip('Environment variable GOOGLE_BIGQUERY_PROJECT_ID '
'is empty')
dataset_id = 'testing'
try:
return ibis.bigquery.connect(project_id, dataset_id)
except ga.exceptions.DefaultCredentialsError:
pytest.skip('no bigquery credentials found')
class Impala(UnorderedComparator, Backend, RoundAwayFromZero):
supports_arrays = True
supports_arrays_outside_of_select = False
check_dtype = False
supports_divide_by_zero = True
returned_timestamp_unit = 's'
@classmethod
def connect(cls, data_directory):
env = ImpalaEnv()
hdfs_client = ibis.hdfs_connect(
host=env.nn_host,
port=env.webhdfs_port,
auth_mechanism=env.auth_mechanism,
verify=env.auth_mechanism not in ['GSSAPI', 'LDAP'],
user=env.webhdfs_user
)
auth_mechanism = env.auth_mechanism
if auth_mechanism == 'GSSAPI' or auth_mechanism == 'LDAP':
print("Warning: ignoring invalid Certificate Authority errors")
return ibis.impala.connect(
host=env.impala_host,
port=env.impala_port,
auth_mechanism=env.auth_mechanism,
hdfs_client=hdfs_client,
database='ibis_testing'
)
| [
"ibis.parquet.connect",
"ibis.mysql.connect",
"ibis.hdfs_connect",
"ibis.postgres.connect",
"ibis.clickhouse.connect",
"ibis.impala.tests.conftest.IbisTestEnv",
"ibis.csv.connect",
"ibis.compat.Path",
"pandas.util.testing.assert_frame_equal",
"ibis.impala.connect",
"ibis.bigquery.connect",
"pa... | [((1843, 1882), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['*args'], {}), '(*args, **kwargs)\n', (1865, 1882), True, 'import pandas.util.testing as tm\n'), ((2044, 2095), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['left', 'right', '*args'], {}), '(left, right, *args, **kwargs)\n', (2065, 2095), True, 'import pandas.util.testing as tm\n'), ((4863, 4895), 'ibis.csv.connect', 'ibis.csv.connect', (['data_directory'], {}), '(data_directory)\n', (4879, 4895), False, 'import ibis\n'), ((4949, 5050), 'ibis.schema', 'ibis.schema', (["[('bool_col', 'boolean'), ('string_col', 'string'), ('timestamp_col',\n 'timestamp')]"], {}), "([('bool_col', 'boolean'), ('string_col', 'string'), (\n 'timestamp_col', 'timestamp')])\n", (4960, 5050), False, 'import ibis\n'), ((5512, 5548), 'ibis.parquet.connect', 'ibis.parquet.connect', (['data_directory'], {}), '(data_directory)\n', (5532, 5548), False, 'import ibis\n'), ((5829, 5908), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_SQLITE_DATABASE"""', "(data_directory / 'ibis_testing.db')"], {}), "('IBIS_TEST_SQLITE_DATABASE', data_directory / 'ibis_testing.db')\n", (5843, 5908), False, 'import os\n'), ((5954, 5964), 'ibis.compat.Path', 'Path', (['path'], {}), '(path)\n', (5958, 5964), False, 'from ibis.compat import Path, parse_version\n'), ((7064, 7150), 'ibis.postgres.connect', 'ibis.postgres.connect', ([], {'host': 'host', 'user': 'user', 'password': 'password', 'database': 'database'}), '(host=host, user=user, password=password, database=\n database)\n', (7085, 7150), False, 'import ibis\n'), ((7687, 7732), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MAPD_USER"""', '"""mapd"""'], {}), "('IBIS_TEST_MAPD_USER', 'mapd')\n", (7701, 7732), False, 'import os\n'), ((7752, 7807), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MAPD_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('IBIS_TEST_MAPD_PASSWORD', '<PASSWORD>')\n", (7766, 7807), False, 'import os\n'), ((7836, 7886), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MAPD_HOST"""', '"""localhost"""'], {}), "('IBIS_TEST_MAPD_HOST', 'localhost')\n", (7850, 7886), False, 'import os\n'), ((7906, 7963), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MAPD_DATABASE"""', '"""ibis_testing"""'], {}), "('IBIS_TEST_MAPD_DATABASE', 'ibis_testing')\n", (7920, 7963), False, 'import os\n'), ((7979, 8056), 'ibis.mapd.connect', 'ibis.mapd.connect', ([], {'host': 'host', 'user': 'user', 'password': 'password', 'database': 'database'}), '(host=host, user=user, password=password, database=database)\n', (7996, 8056), False, 'import ibis\n'), ((8327, 8373), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MYSQL_USER"""', '"""ibis"""'], {}), "('IBIS_TEST_MYSQL_USER', 'ibis')\n", (8341, 8373), False, 'import os\n'), ((8393, 8449), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MYSQL_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('IBIS_TEST_MYSQL_PASSWORD', '<PASSWORD>')\n", (8407, 8449), False, 'import os\n'), ((8465, 8516), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MYSQL_HOST"""', '"""localhost"""'], {}), "('IBIS_TEST_MYSQL_HOST', 'localhost')\n", (8479, 8516), False, 'import os\n'), ((8536, 8594), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_MYSQL_DATABASE"""', '"""ibis_testing"""'], {}), "('IBIS_TEST_MYSQL_DATABASE', 'ibis_testing')\n", (8550, 8594), False, 'import os\n'), ((8609, 8687), 'ibis.mysql.connect', 'ibis.mysql.connect', ([], {'host': 'host', 'user': 'user', 'password': 'password', 'database': 'database'}), '(host=host, user=user, password=password, database=database)\n', (8627, 8687), False, 'import ibis\n'), ((9804, 9860), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_CLICKHOUSE_HOST"""', '"""localhost"""'], {}), "('IBIS_TEST_CLICKHOUSE_HOST', 'localhost')\n", (9818, 9860), False, 'import os\n'), ((9946, 10000), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_CLICKHOUSE_USER"""', '"""default"""'], {}), "('IBIS_TEST_CLICKHOUSE_USER', 'default')\n", (9960, 10000), False, 'import os\n'), ((10020, 10071), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_CLICKHOUSE_PASSWORD"""', '""""""'], {}), "('IBIS_TEST_CLICKHOUSE_PASSWORD', '')\n", (10034, 10071), False, 'import os\n'), ((10091, 10154), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_CLICKHOUSE_DATABASE"""', '"""ibis_testing"""'], {}), "('IBIS_TEST_CLICKHOUSE_DATABASE', 'ibis_testing')\n", (10105, 10154), False, 'import os\n'), ((10204, 10303), 'ibis.clickhouse.connect', 'ibis.clickhouse.connect', ([], {'host': 'host', 'port': 'port', 'password': 'password', 'database': 'database', 'user': 'user'}), '(host=host, port=port, password=password, database=\n database, user=user)\n', (10227, 10303), False, 'import ibis\n'), ((11203, 11237), 'pytest.importorskip', 'pytest.importorskip', (['"""google.auth"""'], {}), "('google.auth')\n", (11222, 11237), False, 'import pytest\n'), ((11260, 11304), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_BIGQUERY_PROJECT_ID"""'], {}), "('GOOGLE_BIGQUERY_PROJECT_ID')\n", (11274, 11304), False, 'import os\n'), ((12112, 12123), 'ibis.impala.tests.conftest.IbisTestEnv', 'ImpalaEnv', ([], {}), '()\n', (12121, 12123), True, 'from ibis.impala.tests.conftest import IbisTestEnv as ImpalaEnv\n'), ((12146, 12324), 'ibis.hdfs_connect', 'ibis.hdfs_connect', ([], {'host': 'env.nn_host', 'port': 'env.webhdfs_port', 'auth_mechanism': 'env.auth_mechanism', 'verify': "(env.auth_mechanism not in ['GSSAPI', 'LDAP'])", 'user': 'env.webhdfs_user'}), "(host=env.nn_host, port=env.webhdfs_port, auth_mechanism=\n env.auth_mechanism, verify=env.auth_mechanism not in ['GSSAPI', 'LDAP'],\n user=env.webhdfs_user)\n", (12163, 12324), False, 'import ibis\n'), ((12588, 12745), 'ibis.impala.connect', 'ibis.impala.connect', ([], {'host': 'env.impala_host', 'port': 'env.impala_port', 'auth_mechanism': 'env.auth_mechanism', 'hdfs_client': 'hdfs_client', 'database': '"""ibis_testing"""'}), "(host=env.impala_host, port=env.impala_port,\n auth_mechanism=env.auth_mechanism, hdfs_client=hdfs_client, database=\n 'ibis_testing')\n", (12607, 12745), False, 'import ibis\n'), ((6601, 6637), 'os.environ.get', 'os.environ.get', (['"""PGUSER"""', '"""postgres"""'], {}), "('PGUSER', 'postgres')\n", (6615, 6637), False, 'import os\n'), ((6738, 6776), 'os.environ.get', 'os.environ.get', (['"""PGPASS"""', '"""<PASSWORD>"""'], {}), "('PGPASS', '<PASSWORD>')\n", (6752, 6776), False, 'import os\n'), ((6865, 6902), 'os.environ.get', 'os.environ.get', (['"""PGHOST"""', '"""localhost"""'], {}), "('PGHOST', 'localhost')\n", (6879, 6902), False, 'import os\n'), ((7003, 7047), 'os.environ.get', 'os.environ.get', (['"""PGDATABASE"""', '"""ibis_testing"""'], {}), "('PGDATABASE', 'ibis_testing')\n", (7017, 7047), False, 'import os\n'), ((9880, 9929), 'os.environ.get', 'os.environ.get', (['"""IBIS_TEST_CLICKHOUSE_PORT"""', '(9000)'], {}), "('IBIS_TEST_CLICKHOUSE_PORT', 9000)\n", (9894, 9929), False, 'import os\n'), ((11348, 11422), 'pytest.skip', 'pytest.skip', (['"""Environment variable GOOGLE_BIGQUERY_PROJECT_ID not defined"""'], {}), "('Environment variable GOOGLE_BIGQUERY_PROJECT_ID not defined')\n", (11359, 11422), False, 'import pytest\n'), ((11654, 11699), 'ibis.bigquery.connect', 'ibis.bigquery.connect', (['project_id', 'dataset_id'], {}), '(project_id, dataset_id)\n', (11675, 11699), False, 'import ibis\n'), ((9193, 9213), 'ibis.compat.parse_version', 'parse_version', (['"""8.0"""'], {}), "('8.0')\n", (9206, 9213), False, 'from ibis.compat import Path, parse_version\n'), ((11491, 11562), 'pytest.skip', 'pytest.skip', (['"""Environment variable GOOGLE_BIGQUERY_PROJECT_ID is empty"""'], {}), "('Environment variable GOOGLE_BIGQUERY_PROJECT_ID is empty')\n", (11502, 11562), False, 'import pytest\n'), ((11766, 11810), 'pytest.skip', 'pytest.skip', (['"""no bigquery credentials found"""'], {}), "('no bigquery credentials found')\n", (11777, 11810), False, 'import pytest\n'), ((385, 400), 'numpy.sign', 'np.sign', (['series'], {}), '(series)\n', (392, 400), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from geometry import *
def collectFaceComponents(facial_points):
# Function to collect landmarks points, grouped, as polygons\shapes
faceShape = np.concatenate((facial_points[0:17],facial_points[[78,74,79,73,80,71,70,69,76,75,77,0]]))
leftEye = np.concatenate((facial_points[36:42],np.array([facial_points[36]])))
rightEye = np.concatenate((facial_points[42:47],np.array([facial_points[42]])))
leftIBrow = facial_points[17:22]
rightIBrow = facial_points[22:27]
noseLine = facial_points[27:31]
noseArc = facial_points[31:36]
upperLip = facial_points[[50,51,52,63,62,61,50]]
lowerLip = facial_points[[67,66,65,56,57,58,67]]
faceComponents = {
"face_shape":faceShape,
"left_eye":leftEye,
"right_eye":rightEye,
"left_i_brow":leftIBrow,
"right_i_brow":rightIBrow,
"nose_line":noseLine,
"nose_arc":noseArc,
"upper_lip":upperLip,
"lower_lip":lowerLip
}
return faceComponents
def face_parts_imgs(image, landmarks_points, options):
# Facial feature extraction
# Input:
# Image to process
# landmarks coordinates of the 81 landmark dlib
# options: array of strings, the features to be extracted
# mentioned below
# Output:
# dictionary, {'feature_name': array(image)}
# Get & Initialize face components
faceComponents = collectFaceComponents(landmarks_points)
face_shape = faceComponents["face_shape"]
leftEye, rightEye = faceComponents["left_eye"], faceComponents["right_eye"]
left_ibrow, right_ibrow = faceComponents["left_i_brow"], faceComponents["right_i_brow"]
nose_line, nose_arc = faceComponents["nose_line"], faceComponents["nose_arc"]
upper_lip = faceComponents["upper_lip"]
if 'all' in options:
options.extend(['forehead', 'left_eyebrow', 'right_eyebrow',
'both_eyebrow', 'clear_eyebrow', 'left_eye',
'right_eye', 'both_eye', 'clear_eye',
'left_eye_eyebrow', 'right_eye_eyebrow',
'both_eye_eyebrow', 'clear_eye_eyebrow',
'nose', 'mouth', 'eye_nose_mouth_eyebrow'
])
# Initialize response
features = {}
# Detect the clear face side (Better capture for eye+brows))
# distance between nose bottom-point & eyes angle-point
lefteyeside = leftEye[3]
righteyeside = rightEye[0]
noseTip = nose_line[nose_line.shape[0]-1]
if righteyeside[0] - noseTip[0] < 0:
# (in your perspective), person is looking to right direction -> so Left eye is clear
clear_eye = leftEye
clear_ibrow = left_ibrow
clearer_left_side = True
elif noseTip[0] - lefteyeside[0] < 0:
# Person is looking to left direction -> so right eye is clear
clear_eye = rightEye
clear_ibrow = right_ibrow
clearer_left_side = False
else:
# Decide which side is clearer (person is slightly looking to right or left)
nose_eye_diff = abs(noseTip[0]-lefteyeside[0]) - abs(noseTip[0]-righteyeside[0])
ibrow_position = "right" if nose_eye_diff <= 1 else "left"
clear_eye = faceComponents[ibrow_position+"_eye"]
clear_ibrow = faceComponents[ibrow_position+"_i_brow"]
clearer_left_side = True if ibrow_position == 'left' else False
##### Forehead #####
if 'forehead' in options:
x, y, x2, y2 = np.min(face_shape[:, 0]), np.min(face_shape[:, 1]), np.max(face_shape[:, 0]), np.min(clear_ibrow[:, 1])
forehead_img = image[y:y2, x:x2] # Best resolution (224, 64)
features['forehead'] = forehead_img
##### Left eyebrow #####
if 'left_eyebrow' in options:
# x = between left eyebrow and left side faceshape landmark [index 0 of 81]
# x2 = nose top landmark (between eyebrows)
# y = top eyebrow landmark, y2 = bottom eyebrow landmark
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2)
x2 = nose_line[0, 0]
y, y2 = np.min(left_ibrow[:, 1]), np.max(left_ibrow[:, 1])
left_ibrow_img = image[y:y2, x:x2]
features['left_eyebrow'] = left_ibrow_img
##### Right eyebrow #####
if 'right_eyebrow' in options:
# x = nose top landmark (between eyebrows)
# x2 = between right eyebrow and right side faceshape landmark [index 16 of 81]
# y = top eyebrow landmark, y2 = bottom eyebrow landmark
# y2 = eyebrow bottom landmark
x = nose_line[0,0]
x2 = int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y, y2 = np.min(right_ibrow[:, 1]), np.max(right_ibrow[:, 1])
right_ibrow_img = image[y:y2, x:x2]
features['right_eyebrow'] = right_ibrow_img
##### Left eye #####
if 'left_eye' in options:
# x = between left eye and left side faceshape landmark [index 0 of 81]
# x2 = top landmark of nose (between eyes)
# y = between eye top landmark & eyebrow top landmark
# y2 = second top nose landmark
x = int((leftEye[0, 0] + face_shape[0, 0]) / 2)
x2 = nose_line[0, 0]
y = int((np.min(left_ibrow[:, 1]) + np.min(leftEye[:, 1])) / 2)
y2 = nose_line[1, 1]
leftEye_img = image[y:y2, x:x2]
features['left_eye'] = leftEye_img
##### Right eye #####
if 'right_eye' in options:
# x = top landmark of nose (between eyes)
# x2 = between right eye and right side faceshape landmark [index 16 of 81]
# y = between eye top landmark & eyebrow top landmark
# y2 = second top nose landmark
x = nose_line[0, 0]
x2 = int((rightEye[4, 0] + face_shape[16, 0]) / 2)
y = int((np.min(right_ibrow[:, 1]) + np.min(rightEye[:, 1])) / 2)
y2 = nose_line[1, 1]
rightEye_img = image[y:y2, x:x2]
features['right_eye'] = rightEye_img
##### Both eyebrows #####
if 'both_eyebrow' in options:
# x = between left eyebrow and left side faceshape landmark [index 0 of 81]
# x2 = between right eyebrow and right side faceshape landmark [index 16 of 81]
# y = top landmark of left/right eyebrow (maximum top is selected)
# y2 = bottom landmark of left/right eyebrow (maximum bottom is selected)
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2)
x2 = int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y = min(np.min(left_ibrow[:, 1]), np.min(right_ibrow[:, 1]))
y2 = max(np.max(left_ibrow[:, 1]), np.max(right_ibrow[:, 1]))
both_eyebrows_img = image[y:y2, x:x2]
features['both_eyebrow'] = both_eyebrows_img
##### Both eyes #####
if 'both_eye' in options:
# x = between left eye and left side faceshape landmark [index 0 of 81]
# x2 = between right eye and right side faceshape landmark [index 16 of 81]
# y = between clear eyebrow & clear eye
# y2 = second top nose landmark
x = int((leftEye[0, 0] + face_shape[0, 0]) / 2)
x2 = int((rightEye[4, 0] + face_shape[16, 0]) / 2)
y = int((np.min(clear_ibrow[:, 1]) + np.min(clear_eye[:, 1])) / 2)
y2 = nose_line[1, 1]
both_eyes_img = image[y:y2, x:x2]
features['both_eye'] = both_eyes_img
##### Eye and Eyebrow LEFT #####
if 'left_eye_eyebrow' in options:
# x = between left eyebrow and left side faceshape landmark [index 0 of 81]
# x2 = nose top landmark (between eyebrows)
# y = top left eyebrow landmark
# y2 = second top nose landmark
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2)
x2 = nose_line[0, 0]
y = np.min(left_ibrow[:, 1])
y2 = nose_line[1, 1]
eye_eyebrow_left = image[y:y2, x:x2]
features['left_eye_eyebrow'] = eye_eyebrow_left
##### Eye and Eyebrow RIGHT #####
if 'right_eye_eyebrow' in options:
# x = top landmark of nose (between eyes)
# x2 = between right eyebrow and right side faceshape landmark [index 16 of 81]
# y = top right eyebrow landmark
# y2 = second top nose landmark
x = nose_line[0, 0]
x2 = int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y = np.min(right_ibrow[:, 1])
y2 = nose_line[1, 1]
eye_eyebrow_right = image[y:y2, x:x2]
features['right_eye_eyebrow'] = eye_eyebrow_right
##### Eye and Eyebrow LEFT & RIGHT #####
if 'both_eye_eyebrow' in options:
# x = between left eyebrow and left side faceshape landmark [index 0 of 81]
# x2 = between right eyebrow and right side faceshape landmark [index 16 of 81]
# y = top eyebrow landmark
# y2 = second top nose landmark
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2)
x2 = int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y = min(np.min(left_ibrow[:, 1]), np.min(right_ibrow[:, 1]))
y2 = nose_line[1, 1]
eye_eyebrow_all = image[y:y2, x:x2]
features['both_eye_eyebrow'] = eye_eyebrow_all
##### Clear Eyebrow #####
if 'clear_eyebrow' in options:
# x = left face side OR nose top landmark (between eyebrows)
# x2 = between clearer eyebrow and clearer side faceshape landmark [index 16 of 81]
# y = top eyebrow landmark
# y2 = eyebrow bottom landmark
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2) if clearer_left_side else nose_line[0,0]
x2 = nose_line[0,0] if clearer_left_side else int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y, y2 = np.min(clear_ibrow[:, 1]), np.max(clear_ibrow[:, 1])
clear_ibrow_img = image[y:y2, x:x2]
features['clear_eyebrow'] = clear_ibrow_img
##### Clear Eye #####
if 'clear_eye' in options:
# x = leftEye.x OR rightEye.x
# x2 = leftEye.x2 OR rightEye.x2
# y = between clear eyebrow and clear eye
# y2 = second top nose landmark
x = int((leftEye[0, 0] + face_shape[0, 0]) / 2) if clearer_left_side else nose_line[0, 0]
x2 = nose_line[0, 0] if clearer_left_side else int((rightEye[4, 0] + face_shape[16, 0]) / 2)
y = int((np.min(clear_ibrow[:, 1]) + np.min(clear_eye[:, 1])) / 2)
y2 = nose_line[1,1]
clear_eye_img = image[y:y2, x:x2]
features['clear_eye'] = clear_eye_img
##### Clear Eye and Eyebrow #####
if 'clear_eye_eyebrow' in options:
# x = left face side OR nose top landmark (between eyebrows)
# x2 = between clearer eyebrow and clearer side faceshape landmark [index 16 of 81]
# y = top eyebrow landmark
# y2 = second top nose landmark
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2) if clearer_left_side else nose_line[0,0]
x2 = nose_line[0,0] if clearer_left_side else int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y = np.min(clear_ibrow[:, 1])
y2 = nose_line[1,1]
clear_eye_eyebrow = image[y:y2, x:x2]
features['clear_eye_eyebrow'] = clear_eye_eyebrow
##### Nose #####
if 'nose' in options:
# x = the most right landmark of left eye || nose bottom landmark if it's more left
# x2 = the most left landmark of right eye|| nose bottom landmark if it's more right
# y = average point on Y-axis of eyebrow
# y2 = upper lip top landmark
x = min(leftEye[3,0], nose_arc[0, 0])
x2 = max(rightEye[0,0], nose_arc[4, 0])
y = int(np.average(clear_ibrow[:, 1]))
y2 = upper_lip[2, 1]
nose = image[y:y2, x:x2]
features['nose'] = nose
##### Mouth #####
if 'mouth' in options:
# x = left cheek [index 5 of 81]
# x2 = right cheek [index 11 of 81]
# y = nose bottom landmark
# y2 = point between chin bottom landmark and lower lip [index 68 of 81]
x = face_shape[5,0]
x2 = face_shape[11,0]
y = nose_arc[2, 1]
y2 = landmarks_points[8,1] - int((landmarks_points[8,1]-landmarks_points[57,1]) / 2)
mouth = image[y:y2, x:x2]
features['mouth'] = mouth
##### Eyebrow Eye Nose Mouth #####
if 'eye_nose_mouth_eyebrow' in options:
# x = between left eyebrow and left side faceshape landmark [index 0 of 81]
# x2 = between right eyebrow and right side faceshape landmark [index 16 of 81]
# y = top eyebrow landmark
# y2 =point between chin bottom landmark and lower lip [index 68 of 81]
x = int((left_ibrow[0, 0] + face_shape[0, 0]) / 2)
x2 = int((right_ibrow[4, 0] + face_shape[16, 0]) / 2)
y = min(np.min(left_ibrow[:, 1]), np.min(right_ibrow[:, 1]))
y2 = landmarks_points[8,1] - int((landmarks_points[8,1]-landmarks_points[57,1]) / 2)
general_features = image[y:y2, x:x2]
features['eye_nose_mouth_eyebrow'] = general_features
return features
| [
"numpy.average",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.concatenate"
] | [((193, 298), 'numpy.concatenate', 'np.concatenate', (['(facial_points[0:17], facial_points[[78, 74, 79, 73, 80, 71, 70, 69, 76, 75,\n 77, 0]])'], {}), '((facial_points[0:17], facial_points[[78, 74, 79, 73, 80, 71,\n 70, 69, 76, 75, 77, 0]]))\n', (207, 298), True, 'import numpy as np\n'), ((7813, 7837), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (7819, 7837), True, 'import numpy as np\n'), ((8372, 8397), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (8378, 8397), True, 'import numpy as np\n'), ((11034, 11059), 'numpy.min', 'np.min', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (11040, 11059), True, 'import numpy as np\n'), ((335, 364), 'numpy.array', 'np.array', (['[facial_points[36]]'], {}), '([facial_points[36]])\n', (343, 364), True, 'import numpy as np\n'), ((419, 448), 'numpy.array', 'np.array', (['[facial_points[42]]'], {}), '([facial_points[42]])\n', (427, 448), True, 'import numpy as np\n'), ((3581, 3605), 'numpy.min', 'np.min', (['face_shape[:, 0]'], {}), '(face_shape[:, 0])\n', (3587, 3605), True, 'import numpy as np\n'), ((3607, 3631), 'numpy.min', 'np.min', (['face_shape[:, 1]'], {}), '(face_shape[:, 1])\n', (3613, 3631), True, 'import numpy as np\n'), ((3633, 3657), 'numpy.max', 'np.max', (['face_shape[:, 0]'], {}), '(face_shape[:, 0])\n', (3639, 3657), True, 'import numpy as np\n'), ((3659, 3684), 'numpy.min', 'np.min', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (3665, 3684), True, 'import numpy as np\n'), ((4183, 4207), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (4189, 4207), True, 'import numpy as np\n'), ((4209, 4233), 'numpy.max', 'np.max', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (4215, 4233), True, 'import numpy as np\n'), ((4748, 4773), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (4754, 4773), True, 'import numpy as np\n'), ((4775, 4800), 'numpy.max', 'np.max', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (4781, 4800), True, 'import numpy as np\n'), ((6575, 6599), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (6581, 6599), True, 'import numpy as np\n'), ((6601, 6626), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (6607, 6626), True, 'import numpy as np\n'), ((6645, 6669), 'numpy.max', 'np.max', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (6651, 6669), True, 'import numpy as np\n'), ((6671, 6696), 'numpy.max', 'np.max', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (6677, 6696), True, 'import numpy as np\n'), ((9003, 9027), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (9009, 9027), True, 'import numpy as np\n'), ((9029, 9054), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (9035, 9054), True, 'import numpy as np\n'), ((9714, 9739), 'numpy.min', 'np.min', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (9720, 9739), True, 'import numpy as np\n'), ((9741, 9766), 'numpy.max', 'np.max', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (9747, 9766), True, 'import numpy as np\n'), ((11635, 11664), 'numpy.average', 'np.average', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (11645, 11664), True, 'import numpy as np\n'), ((12779, 12803), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (12785, 12803), True, 'import numpy as np\n'), ((12805, 12830), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (12811, 12830), True, 'import numpy as np\n'), ((5299, 5323), 'numpy.min', 'np.min', (['left_ibrow[:, 1]'], {}), '(left_ibrow[:, 1])\n', (5305, 5323), True, 'import numpy as np\n'), ((5326, 5347), 'numpy.min', 'np.min', (['leftEye[:, 1]'], {}), '(leftEye[:, 1])\n', (5332, 5347), True, 'import numpy as np\n'), ((5868, 5893), 'numpy.min', 'np.min', (['right_ibrow[:, 1]'], {}), '(right_ibrow[:, 1])\n', (5874, 5893), True, 'import numpy as np\n'), ((5896, 5918), 'numpy.min', 'np.min', (['rightEye[:, 1]'], {}), '(rightEye[:, 1])\n', (5902, 5918), True, 'import numpy as np\n'), ((7242, 7267), 'numpy.min', 'np.min', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (7248, 7267), True, 'import numpy as np\n'), ((7270, 7293), 'numpy.min', 'np.min', (['clear_eye[:, 1]'], {}), '(clear_eye[:, 1])\n', (7276, 7293), True, 'import numpy as np\n'), ((10317, 10342), 'numpy.min', 'np.min', (['clear_ibrow[:, 1]'], {}), '(clear_ibrow[:, 1])\n', (10323, 10342), True, 'import numpy as np\n'), ((10345, 10368), 'numpy.min', 'np.min', (['clear_eye[:, 1]'], {}), '(clear_eye[:, 1])\n', (10351, 10368), True, 'import numpy as np\n')] |
"""
Test for PyBullet cartpole environment
"""
import time
import tigercontrol
import numpy as np
import jax.random as random
from tigercontrol.utils import generate_key
import pybullet as pybullet
import os.path
from os import path
from pathlib import Path
# Compute control input
def compute_control(y, K):
# y is vector of depth measurements
print("y : " + str(y))
print("K: " + str(K))
print("1/y: " + str(1./y))
u_diff = np.matmul(1./y, K)
print("u_diff: " + str(u_diff))
return u_diff
def compute_fgm_angle(y_bad, thetas_nominal_bad, state):
print(" ---------------- compute_fgm_angle ----------------")
# fix bad list
y = 1.0 / np.flip(y_bad[0])
thetas_nominal = [t[0] for t in thetas_nominal_bad]
x_coord, y_coord, curr_angle = state[0], state[1], state[2]
car_width = 1.2 * (2 * 0.27) # safety threshold * width of car
d_min = 3.0
threshold = 1.0 / d_min # inverse of distance
theta_delta = (2.0 * np.pi / 3) / 401 # 401 rays of observation
theta_min = 2 * np.tan(car_width / (2 * d_min))
ind_min_width = 2.0 * int(np.ceil(theta_min / theta_delta)) # increase min width
half_ind_min = int(ind_min_width / 2)
valid_gaps = []
gap_costs = []
left, curr_gap = 0, 0
for i in range(half_ind_min, len(y) - half_ind_min):
mean_cost = np.mean(y[i-half_ind_min:i+half_ind_min])
max_cost = np.mean(y[i-half_ind_min:i+half_ind_min])
cost = max_cost
if cost < threshold:
valid_gaps.append(i)
gap_costs.append(cost)
if valid_gaps == []:
return 0.95 * curr_angle # slowly moves toward center
target_angle = x_coord / 10.0 # 10 is width of field
angles = [thetas_nominal[i] for i in valid_gaps]
angle_diff = [np.abs(ang - target_angle) for ang in angles]
curr_angle_diff = [np.abs(ang - curr_angle) for ang in angles]
a,b,c = 10 / np.mean(gap_costs), 2 / np.mean(angle_diff), 4 / np.mean(curr_angle_diff) # learned parameter
total_cost = [a * d + b * t + c * p for d,t,p in zip(gap_costs, angle_diff, curr_angle_diff)]
opt_angle = angles[np.argmin(np.array(total_cost))]
print("\ncosts: " + str(gap_costs))
print("\naction: " + str(opt_angle))
return opt_angle
# Precompute costs for different environments and controllers
def precompute_environment_costs(numEnvs, K, L, params, husky, sphere, GUI, seed, environment, obsUid):
# Parameters
numRays = params['numRays']
senseRadius = params['senseRadius']
robotRadius = params['robotRadius']
robotHeight = params['robotHeight']
thetas_nominal = params['thetas_nominal']
T_horizon = params['T_horizon']
# Fix random seed for consistency of results
np.random.seed(seed)
# Initialize costs for the different environments and different controllers
costs = np.zeros((numEnvs, L))
for env in range(0,numEnvs):
# Print
if (env%10 == 0):
print(env, "out of", numEnvs)
# Sample environment
# heightObs = 20*robotHeight
# obsUid = generate_obstacles(pybullet, heightObs, robotRadius)
for l in range(0,L):
# Initialize position of robot
state = [0.0, 1.0, 0.0] # [x, y, theta]
quat = pybullet.getQuaternionFromEuler([0.0, 0.0, state[2]+np.pi/2]) # pi/2 since Husky visualization is rotated by pi/2
pybullet.resetBasePositionAndOrientation(husky, [state[0], state[1], 0.0], quat)
pybullet.resetBasePositionAndOrientation(sphere, [state[0], state[1], robotHeight], [0,0,0,1])
# Cost for this particular controller (lth controller) in this environment
cost_env_l = 0.0
all_angles = []
for t in range(0, T_horizon):
# Get sensor measurement
y = environment.getDistances(pybullet)
# Compute control input
# u = compute_control(y, K[l])
angle = compute_fgm_angle(y, thetas_nominal, state)
all_angles.append(angle)
# Update state
# state = robot_update_state(state, u)
state, cost_env_l, done, _ = environment.step_fgm(angle)
if cost_env_l == 1.0:
print(state)
print("\nwoops\n")
# Update position of pybullet object
# quat = pybullet.getQuaternionFromEuler([0.0, 0.0, state[2]+np.pi/2]) # pi/2 since Husky visualization is rotated by pi/2
# pybullet.resetBasePositionAndOrientation(husky, [state[0], state[1], 0.0], quat)
# pybullet.resetBasePositionAndOrientation(sphere, [state[0], state[1], robotHeight], [0,0,0,1])
if (GUI):
pybullet.resetDebugVisualizerCamera(cameraDistance=5.0, cameraYaw=0.0, cameraPitch=-45.0, cameraTargetPosition=[state[0], state[1], 2*robotHeight])
time.sleep(0.025)
# Check if the robot is in collision. If so, cost = 1.0.
# Get closest points. Note: Last argument is distance threshold. Since it's set to 0, the function will only return points if the distance is less than zero. So, closestPoints is non-empty iff there is a collision.
# closestPoints = pybullet.getClosestPoints(sphere, obsUid, 0.0)
# See if the robot is in collision. If so, cost = 1.0.
'''
if closestPoints: # Check if closestPoints is non-empty
cost_env_l = 1.0
break # break out of simulation for this environment
'''
if cost_env_l == 1.0:
break;
# Check that cost is between 0 and 1 (for sanity)
if (cost_env_l > 1.0):
raise ValueError("Cost is greater than 1!")
if (cost_env_l < 0.0):
raise ValueError("Cost is less than 0!")
# Record cost for this environment and this controller
costs[env][l] = cost_env_l
# Remove obstacles
pybullet.removeBody(obsUid)
print("mean angle = " + str(sum(all_angles)/len(all_angles)))
return costs
# cartpole test
def test_obstacles(verbose=False):
environment = tigercontrol.environment("PyBullet-Obstacles")
# obs = environment.reset(render=verbose)
# controller = tigercontrol.controllers("CartPoleNN")
# controller.initialize(environment.get_observation_space(), environment.get_action_space())
# Initial setup
# Flag that sets if things are visualized
# GUI = True; # Only for debugging purposes
GUI = True
random_seed =5
m = 1
state, params, husky, sphere, obsUid = environment.reset(render=verbose)
numRays = params['numRays']
thetas_nominal = params['thetas_nominal']
# Controller and optimization setup
# Choose L controllers
num_x_intercepts = 1
num_y_intercepts = 1
L = num_x_intercepts*num_y_intercepts
x_intercepts = np.linspace(0.1, 5.0, num_x_intercepts)
y_intercepts = np.linspace(0.0, 10.0, num_y_intercepts)
print("x_intercepts: " + str(x_intercepts))
print("y_intercepts: " + str(y_intercepts))
print("thetas_nominal: " + str(thetas_nominal))
print("numRays: " + str(numRays))
K = L*[None]
for i in range(num_x_intercepts):
for j in range(num_y_intercepts):
K[i*num_y_intercepts + j] = np.zeros((numRays,1))
for r in range(numRays):
if (thetas_nominal[r] > 0):
K[i*num_y_intercepts + j][r] = y_intercepts[j]*(x_intercepts[i] - thetas_nominal[r])/x_intercepts[i]
else:
K[i*num_y_intercepts + j][r] = y_intercepts[j]*(-x_intercepts[i] - thetas_nominal[r])/x_intercepts[i]
print("First K = " + str(K))
costs_precomputed = precompute_environment_costs(m, K, L, params, husky, sphere, GUI, random_seed, environment, obsUid)
print("costs_precomputed: " + str(costs_precomputed))
print("test_obstacles passed")
if __name__ == "__main__":
#test_obstacles(verbose=True)
pass
| [
"pybullet.getQuaternionFromEuler",
"numpy.random.seed",
"numpy.flip",
"numpy.abs",
"numpy.ceil",
"numpy.zeros",
"pybullet.resetDebugVisualizerCamera",
"time.sleep",
"pybullet.removeBody",
"numpy.tan",
"numpy.mean",
"numpy.array",
"numpy.linspace",
"numpy.matmul",
"tigercontrol.environmen... | [((451, 472), 'numpy.matmul', 'np.matmul', (['(1.0 / y)', 'K'], {}), '(1.0 / y, K)\n', (460, 472), True, 'import numpy as np\n'), ((2760, 2780), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2774, 2780), True, 'import numpy as np\n'), ((2878, 2900), 'numpy.zeros', 'np.zeros', (['(numEnvs, L)'], {}), '((numEnvs, L))\n', (2886, 2900), True, 'import numpy as np\n'), ((6445, 6491), 'tigercontrol.environment', 'tigercontrol.environment', (['"""PyBullet-Obstacles"""'], {}), "('PyBullet-Obstacles')\n", (6469, 6491), False, 'import tigercontrol\n'), ((7191, 7230), 'numpy.linspace', 'np.linspace', (['(0.1)', '(5.0)', 'num_x_intercepts'], {}), '(0.1, 5.0, num_x_intercepts)\n', (7202, 7230), True, 'import numpy as np\n'), ((7250, 7290), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', 'num_y_intercepts'], {}), '(0.0, 10.0, num_y_intercepts)\n', (7261, 7290), True, 'import numpy as np\n'), ((690, 707), 'numpy.flip', 'np.flip', (['y_bad[0]'], {}), '(y_bad[0])\n', (697, 707), True, 'import numpy as np\n'), ((1050, 1081), 'numpy.tan', 'np.tan', (['(car_width / (2 * d_min))'], {}), '(car_width / (2 * d_min))\n', (1056, 1081), True, 'import numpy as np\n'), ((1352, 1397), 'numpy.mean', 'np.mean', (['y[i - half_ind_min:i + half_ind_min]'], {}), '(y[i - half_ind_min:i + half_ind_min])\n', (1359, 1397), True, 'import numpy as np\n'), ((1413, 1458), 'numpy.mean', 'np.mean', (['y[i - half_ind_min:i + half_ind_min]'], {}), '(y[i - half_ind_min:i + half_ind_min])\n', (1420, 1458), True, 'import numpy as np\n'), ((1793, 1819), 'numpy.abs', 'np.abs', (['(ang - target_angle)'], {}), '(ang - target_angle)\n', (1799, 1819), True, 'import numpy as np\n'), ((1862, 1886), 'numpy.abs', 'np.abs', (['(ang - curr_angle)'], {}), '(ang - curr_angle)\n', (1868, 1886), True, 'import numpy as np\n'), ((6255, 6282), 'pybullet.removeBody', 'pybullet.removeBody', (['obsUid'], {}), '(obsUid)\n', (6274, 6282), True, 'import pybullet as pybullet\n'), ((1112, 1144), 'numpy.ceil', 'np.ceil', (['(theta_min / theta_delta)'], {}), '(theta_min / theta_delta)\n', (1119, 1144), True, 'import numpy as np\n'), ((1924, 1942), 'numpy.mean', 'np.mean', (['gap_costs'], {}), '(gap_costs)\n', (1931, 1942), True, 'import numpy as np\n'), ((1948, 1967), 'numpy.mean', 'np.mean', (['angle_diff'], {}), '(angle_diff)\n', (1955, 1967), True, 'import numpy as np\n'), ((1973, 1997), 'numpy.mean', 'np.mean', (['curr_angle_diff'], {}), '(curr_angle_diff)\n', (1980, 1997), True, 'import numpy as np\n'), ((2150, 2170), 'numpy.array', 'np.array', (['total_cost'], {}), '(total_cost)\n', (2158, 2170), True, 'import numpy as np\n'), ((3342, 3407), 'pybullet.getQuaternionFromEuler', 'pybullet.getQuaternionFromEuler', (['[0.0, 0.0, state[2] + np.pi / 2]'], {}), '([0.0, 0.0, state[2] + np.pi / 2])\n', (3373, 3407), True, 'import pybullet as pybullet\n'), ((3469, 3554), 'pybullet.resetBasePositionAndOrientation', 'pybullet.resetBasePositionAndOrientation', (['husky', '[state[0], state[1], 0.0]', 'quat'], {}), '(husky, [state[0], state[1], 0.0], quat\n )\n', (3509, 3554), True, 'import pybullet as pybullet\n'), ((3562, 3663), 'pybullet.resetBasePositionAndOrientation', 'pybullet.resetBasePositionAndOrientation', (['sphere', '[state[0], state[1], robotHeight]', '[0, 0, 0, 1]'], {}), '(sphere, [state[0], state[1],\n robotHeight], [0, 0, 0, 1])\n', (3602, 3663), True, 'import pybullet as pybullet\n'), ((7616, 7638), 'numpy.zeros', 'np.zeros', (['(numRays, 1)'], {}), '((numRays, 1))\n', (7624, 7638), True, 'import numpy as np\n'), ((4866, 5023), 'pybullet.resetDebugVisualizerCamera', 'pybullet.resetDebugVisualizerCamera', ([], {'cameraDistance': '(5.0)', 'cameraYaw': '(0.0)', 'cameraPitch': '(-45.0)', 'cameraTargetPosition': '[state[0], state[1], 2 * robotHeight]'}), '(cameraDistance=5.0, cameraYaw=0.0,\n cameraPitch=-45.0, cameraTargetPosition=[state[0], state[1], 2 *\n robotHeight])\n', (4901, 5023), True, 'import pybullet as pybullet\n'), ((5035, 5052), 'time.sleep', 'time.sleep', (['(0.025)'], {}), '(0.025)\n', (5045, 5052), False, 'import time\n')] |
import os
from os.path import exists as pexists
from os.path import join as pjoin
import numpy as np
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from .base import EpochBaseLightningModel
from .. import models
from ..data.imagenet_datasets import MyConcatDataset
from ..data.imagenet_datasets import MyImageFolder, MyImagenetBoundingBoxFolder
from ..data.in9_datasets import IN9Dataset
from ..inpainting.Baseline import TileInpainter
class IN9LightningModel(EpochBaseLightningModel):
train_dataset = 'original'
milestones = [6, 12, 18]
max_epochs = 25
def init_setup(self):
# Resnet 50
if 'arch' not in self.hparams:
self.hparams.arch = 'BiT-S-R50x1'
if 'data_ratio' not in self.hparams:
self.hparams.data_ratio = 1.
if 'bbox_noise' not in self.hparams:
self.hparams.bbox_noise = 0.
head_size = 18 if self.hparams.cf.startswith('channels') else 9
self.model = models.KNOWN_MODELS[self.hparams.arch](
head_size=head_size,
zero_head=False)
if self.hparams.finetune:
if not pexists(f"models/{self.hparams.arch}.npz"):
os.system(f'wget -O models/{self.hparams.arch}.npz '
f'https://storage.googleapis.com/bit_models/{self.hparams.arch}.npz')
self.my_logger.info("Fine-tuning from BiT")
self.model.load_from(np.load(f"models/{self.hparams.arch}.npz"))
def configure_optimizers(self):
optim = torch.optim.SGD(
self.model.parameters(), lr=self.hparams.base_lr,
momentum=0.9, weight_decay=1e-4)
scheduler = {
# Total 50 epochs
'scheduler': torch.optim.lr_scheduler.MultiStepLR(
optim, milestones=self.milestones, gamma=0.1),
'interval': 'epoch',
}
return [optim], [scheduler]
def get_inpainting_model(self, inpaint):
if inpaint == 'cagan':
return None
if inpaint == 'tile' and self.hparams.mask == 'seg':
return TileInpainter(use_bbox_to_mask=True)
return super().get_inpainting_model(inpaint)
def _make_train_val_dataset(self):
cf_inpaint_dir = None
if self.hparams.inpaint == 'cagan':
if self.hparams.bbox_noise == 0.:
cf_inpaint_dir = './datasets/bg_challenge/train/original_bbox_cf_cagan/train/'
else:
cf_inpaint_dir = './datasets/bg_challenge/train/cf_cagan_bbox_noise_%s/train/' \
% self.hparams.bbox_noise
if self.hparams.mask == 'bbox':
assert self.hparams.bbox_noise == 0.
train_d = MyImagenetBoundingBoxFolder(
'./datasets/bg_challenge/train/%s/train/' % self.train_dataset,
'./datasets/imagenet/LOC_train_solution.csv',
cf_inpaint_dir=cf_inpaint_dir,
transform=MyImagenetBoundingBoxFolder.get_train_transform(
self.hparams.test_run))
else:
train_d = IN9Dataset(
'./datasets/bg_challenge/train/%s/train/' % self.train_dataset,
no_fg_dir='./datasets/bg_challenge/train/no_fg/train/',
cf_inpaint_dir=cf_inpaint_dir,
bbox_noise=self.hparams.bbox_noise,
transform=IN9Dataset.get_train_transform(self.hparams.test_run)
)
if self.hparams.data_ratio == 1.:
pass
elif 0. < self.hparams.data_ratio < 1.:
num_data = int(len(train_d) * self.hparams.data_ratio)
train_d, _ = self.sub_dataset(train_d, num_data)
elif (self.hparams.data_ratio > 1. or self.hparams.data_ratio == -1) \
and self.train_dataset == 'original':
orig_filenames = set()
with open('./datasets/bg_challenge/train/original/train_filenames') as fp:
for line in fp:
orig_filenames.add(line.strip())
def is_valid_file(path):
return os.path.basename(path) not in orig_filenames
more_train_d = MyImageFolder(
'./datasets/bg_challenge/train/in9l/train/',
is_valid_file=is_valid_file,
transform=MyImageFolder.get_train_transform(self.hparams.test_run))
if self.hparams.data_ratio > 1.:
more_data = self.hparams.data_ratio - 1.
num_data = int(len(train_d) * more_data)
if num_data < len(more_train_d):
more_train_d, _ = self.sub_dataset(more_train_d, num_data)
train_d = MyConcatDataset([train_d, more_train_d])
else:
if self.hparams.data_ratio != 1.:
raise NotImplementedError(
'Data ratio is wronly specified: ' + str(self.hparams.data_ratio))
val_d = MyImageFolder(
'./datasets/bg_challenge/train/%s/val/' % self.train_dataset,
transform=MyImageFolder.get_val_transform(self.hparams.test_run))
orig_test_d = MyImageFolder(
'./datasets/bg_challenge/test/original/val/',
transform=MyImageFolder.get_val_transform(self.hparams.test_run))
mixed_same_test_d = MyImageFolder(
'./datasets/bg_challenge/test/mixed_same/val/',
transform=MyImageFolder.get_val_transform(self.hparams.test_run))
mixed_rand_test_d = MyImageFolder(
'./datasets/bg_challenge/test/mixed_rand/val/',
transform=MyImageFolder.get_val_transform(self.hparams.test_run))
mixed_next_test_d = MyImageFolder(
'./datasets/bg_challenge/test/mixed_next/val/',
transform=MyImageFolder.get_val_transform(self.hparams.test_run))
val_ds = [
val_d, orig_test_d, mixed_same_test_d,
mixed_rand_test_d, mixed_next_test_d]
val_sets_names = [
'val', 'orig', 'mixed_same', 'mixed_rand', 'mixed_next'
]
return train_d, val_ds, val_sets_names
def is_data_ratio_exp(self):
return self.hparams.data_ratio != 1. or '_dr' in self.hparams.name
def is_bbox_noise_exp(self):
return self.hparams.bbox_noise > 0. or '_bn' in self.hparams.name
@classmethod
def add_model_specific_args(cls, parser):
# To use bbox as mask or segmentation mask
parser.add_argument("--mask", type=str, default='seg',
choices=['bbox', 'seg'])
parser.add_argument("--arch", type=str, default='BiT-S-R50x1')
parser.add_argument("--max_epochs", type=int, default=cls.max_epochs)
parser.add_argument("--batch", type=int, default=32,
help="Batch size.")
parser.add_argument("--val_batch", type=int, default=256,
help="Batch size.")
parser.add_argument("--batch_split", type=int, default=1,
help="Number of batches to compute gradient on before updating weights.")
parser.add_argument("--base_lr", type=float, default=0.05)
parser.add_argument("--pl_model", type=str, default=cls.__name__)
parser.add_argument("--reg_anneal", type=float, default=0.)
parser.add_argument("--data_ratio", type=float, default=1.,
help='Specifies how many data to use. '
'Default is 1: it means just using the original dataset.'
'If bigger than 1, e.g. 2, then it adds 1x more data from'
'in9l dataset. If it is -1, then it uses all data in9l.')
parser.add_argument("--bbox_noise", type=float, default=0.,
help='If bigger than 0, we randomly shuffle the foreground mask')
return parser
def pl_trainer_args(self):
checkpoint_callback = ModelCheckpoint(
filepath=pjoin(self.hparams.logdir, self.hparams.name, '{epoch}'),
save_top_k=1,
save_last=True,
verbose=True,
mode='max',
monitor='val_acc1',
)
args = dict()
args['max_epochs'] = self.hparams.max_epochs
args['checkpoint_callback'] = checkpoint_callback
last_ckpt = pjoin(self.hparams.logdir, self.hparams.name, 'last.ckpt')
if pexists(last_ckpt):
args['resume_from_checkpoint'] = last_ckpt
return args
def get_grad_cam_layer(self):
return self.model.head[1]
class IN9LLightningModel(IN9LightningModel):
train_dataset = 'in9l'
milestones = [5, 10, 15]
max_epochs = 20
| [
"numpy.load",
"os.path.basename",
"os.path.exists",
"os.system",
"os.path.join",
"torch.optim.lr_scheduler.MultiStepLR"
] | [((8344, 8402), 'os.path.join', 'pjoin', (['self.hparams.logdir', 'self.hparams.name', '"""last.ckpt"""'], {}), "(self.hparams.logdir, self.hparams.name, 'last.ckpt')\n", (8349, 8402), True, 'from os.path import join as pjoin\n'), ((8414, 8432), 'os.path.exists', 'pexists', (['last_ckpt'], {}), '(last_ckpt)\n', (8421, 8432), True, 'from os.path import exists as pexists\n'), ((1742, 1828), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optim'], {'milestones': 'self.milestones', 'gamma': '(0.1)'}), '(optim, milestones=self.milestones,\n gamma=0.1)\n', (1778, 1828), False, 'import torch\n'), ((1145, 1187), 'os.path.exists', 'pexists', (['f"""models/{self.hparams.arch}.npz"""'], {}), "(f'models/{self.hparams.arch}.npz')\n", (1152, 1187), True, 'from os.path import exists as pexists\n'), ((1205, 1333), 'os.system', 'os.system', (['f"""wget -O models/{self.hparams.arch}.npz https://storage.googleapis.com/bit_models/{self.hparams.arch}.npz"""'], {}), "(\n f'wget -O models/{self.hparams.arch}.npz https://storage.googleapis.com/bit_models/{self.hparams.arch}.npz'\n )\n", (1214, 1333), False, 'import os\n'), ((1444, 1486), 'numpy.load', 'np.load', (['f"""models/{self.hparams.arch}.npz"""'], {}), "(f'models/{self.hparams.arch}.npz')\n", (1451, 1486), True, 'import numpy as np\n'), ((7986, 8042), 'os.path.join', 'pjoin', (['self.hparams.logdir', 'self.hparams.name', '"""{epoch}"""'], {}), "(self.hparams.logdir, self.hparams.name, '{epoch}')\n", (7991, 8042), True, 'from os.path import join as pjoin\n'), ((4101, 4123), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4117, 4123), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class SVR(object):
def __init__(self, epsilon=0.5):
self.epsilon = epsilon
def fit(self, X, y, epochs=100, learning_rate=0.1):
self.sess = tf.Session()
feature_len = X.shape[-1] if len(X.shape) > 1 else 1
if len(X.shape) == 1:
X = X.reshape(-1, 1)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
self.X = tf.placeholder(dtype=tf.float32, shape=(None, feature_len))
self.y = tf.placeholder(dtype=tf.float32, shape=(None, 1))
self.W = tf.Variable(tf.random_normal(shape=(feature_len, 1)))
self.b = tf.Variable(tf.random_normal(shape=(1,)))
self.y_pred = tf.matmul(self.X, self.W) + self.b
# Seconds part of following equation, loss is a function of how much the error exceeds a defined value, epsilon
# Error lower than epsilon = no panalty
self.loss = tf.norm(self.W) / 2 + tf.reduce_mean(tf.maximum(0.00, tf.abs(self.y_pred - self.y) - self.epsilon))
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
opt_op = opt.minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
for i in range(epochs):
loss = self.sess.run(self.loss, { self.X: X, self.y: y })
print(f"\r{i+1}/{epochs}: loss:{loss}", end="")
self.sess.run(opt_op, { self.X:X, self.y:y })
return self
def predict(self, X, y=None):
if len(X.shape) == 1:
X = X.reshape(-1, 1)
y_pred = self.sess.run(self.y_pred, { self.X: X })
return y_pred
if __name__ == "__main__":
x = np.linspace(start=0, stop=5, num=20)
m = 2
c = 1
y = m * x + c
y += np.random.normal(size=(len(y),))
model = SVR(epsilon=0.2)
model.fit(x, y)
plt.plot(x, y, "x", x, model.predict(x), "-")
plt.legend(["actual", "prediction"])
plt.show()
| [
"matplotlib.pyplot.show",
"tensorflow.compat.v1.placeholder",
"matplotlib.pyplot.legend",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.random_normal",
"numpy.linspace",
"tenso... | [((85, 109), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (107, 109), True, 'import tensorflow.compat.v1 as tf\n'), ((1748, 1784), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(5)', 'num': '(20)'}), '(start=0, stop=5, num=20)\n', (1759, 1784), True, 'import numpy as np\n'), ((1970, 2006), 'matplotlib.pyplot.legend', 'plt.legend', (["['actual', 'prediction']"], {}), "(['actual', 'prediction'])\n", (1980, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((279, 291), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (289, 291), True, 'import tensorflow.compat.v1 as tf\n'), ((508, 567), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, feature_len)'}), '(dtype=tf.float32, shape=(None, feature_len))\n', (522, 567), True, 'import tensorflow.compat.v1 as tf\n'), ((585, 634), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 1)'}), '(dtype=tf.float32, shape=(None, 1))\n', (599, 634), True, 'import tensorflow.compat.v1 as tf\n'), ((1128, 1190), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1161, 1190), True, 'import tensorflow.compat.v1 as tf\n'), ((665, 705), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', ([], {'shape': '(feature_len, 1)'}), '(shape=(feature_len, 1))\n', (681, 705), True, 'import tensorflow.compat.v1 as tf\n'), ((736, 764), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (752, 764), True, 'import tensorflow.compat.v1 as tf\n'), ((789, 814), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['self.X', 'self.W'], {}), '(self.X, self.W)\n', (798, 814), True, 'import tensorflow.compat.v1 as tf\n'), ((1254, 1287), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1285, 1287), True, 'import tensorflow.compat.v1 as tf\n'), ((1013, 1028), 'tensorflow.compat.v1.norm', 'tf.norm', (['self.W'], {}), '(self.W)\n', (1020, 1028), True, 'import tensorflow.compat.v1 as tf\n'), ((1067, 1095), 'tensorflow.compat.v1.abs', 'tf.abs', (['(self.y_pred - self.y)'], {}), '(self.y_pred - self.y)\n', (1073, 1095), True, 'import tensorflow.compat.v1 as tf\n')] |
import numpy
from .agent_base import AgentBase
class RandomAgent(AgentBase):
def reset_target(self):
boundary = self.world.get_boundary()
self.target = numpy.array([
numpy.random.uniform(boundary[0][0], boundary[0][1]),
numpy.random.uniform(boundary[1][0], boundary[1][1]),
])
| [
"numpy.random.uniform"
] | [((202, 254), 'numpy.random.uniform', 'numpy.random.uniform', (['boundary[0][0]', 'boundary[0][1]'], {}), '(boundary[0][0], boundary[0][1])\n', (222, 254), False, 'import numpy\n'), ((268, 320), 'numpy.random.uniform', 'numpy.random.uniform', (['boundary[1][0]', 'boundary[1][1]'], {}), '(boundary[1][0], boundary[1][1])\n', (288, 320), False, 'import numpy\n')] |
from numpy.core.numeric import roll
from libs.effects.cube_effect import CubeEffect
import numpy as np
import random
import time
class CubeEffectA(CubeEffect):
def run(self):
effect_config = self.get_effect_config("effect_power")
led_count = self._device.device_config["led_count"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
led_mid = self._device.device_config["led_mid"]
audio_data = self.get_audio_data()
y = self.get_mel(audio_data)
if y is None:
return
y_max = np.max(y)
if y_max > 0:
y_d = y * 8 / y_max
else:
y_d = y
self.no_voice = (y_max == 0)
y_d = y_d.astype(int)
if y is None:
return
output = np.zeros((3, self.kx * self.ky * self.kz))
for band in range(0, 7):
for i in range(0, y_d[band]):
front = self.get_position(band, 0, i)
right = self.get_position(0, 7-band, i)
left = self.get_position(7, band, i)
back = self.get_position(7- band, 7, i)
output[0][front] = 255
output[1][front] = 0
output[2][front] = 0
output[0][right] = 0
output[1][right] = 0
output[2][right] = 255
output[0][left] = 0
output[1][left] = 255
output[2][left] = 0
output[0][back] = 255
output[1][back] = 255
output[2][back] = 0
self.queue_output_array_noneblocking(output) | [
"numpy.max",
"numpy.zeros"
] | [((569, 578), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (575, 578), True, 'import numpy as np\n'), ((802, 844), 'numpy.zeros', 'np.zeros', (['(3, self.kx * self.ky * self.kz)'], {}), '((3, self.kx * self.ky * self.kz))\n', (810, 844), True, 'import numpy as np\n')] |
"""
Plot results of a particle tracking experiment, specific to experiments about
vertical mixing of particles.
e.g. from:
python tracker.py -exp vmix -3d True -clb True -no_advection True
"""
# setup
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
sys.path.append(os.path.abspath('../plotting'))
import pfun
import matplotlib.pyplot as plt
import netCDF4 as nc4
import numpy as np
import seawater as sw
Ldir = Lfun.Lstart()
if True:
# Choose an experiment to plot from.
indir0 = Ldir['LOo'] + 'tracks/'
indir_list_raw = os.listdir(indir0)
indir_list = []
for d in indir_list_raw:
if os.path.isdir(indir0 + d):
indir_list.append(d)
indir_list.sort()
indir_list = [item for item in indir_list if 'vmix' in item]
Npt = len(indir_list)#
print('\n%s\n' % '** Choose Experiment to plot **')
for npt in range(Npt):
print(str(npt) + ': ' + indir_list[npt])
my_npt = input('-- Experiment number (return = 0) --')
if len(my_npt)==0:
my_npt = 0
indir = indir_list[int(my_npt)] + '/'
# Choose a release from this experiment.
rel_list = [rel for rel in os.listdir(indir0 + indir) if 'release' in rel]
rel_list.sort()
Nrl = len(rel_list)
print('\n%s\n' % '** Choose Release file to plot **')
for nrl in range(Nrl):
print(str(nrl) + ': ' + rel_list[nrl])
my_nrl = input('-- Release number (return = 0) -- ')
if len(my_nrl)==0:
my_nrl = 0
rel = rel_list[int(my_nrl)]
else:
# get release Dataset
indir0 = Ldir['LOo'] + 'tracks/'
indir = 'vmix_ndiv12_3d_nadv/'
rel = 'release_2019.07.04.nc'
dsr = nc4.Dataset(indir0 + indir + rel)
NT, NP = dsr['lon'].shape
# get a list of datetimes
ot_vec = dsr['ot'][:]
dt_list = [Lfun.modtime_to_datetime(ot) for ot in ot_vec]
t = (ot_vec - ot_vec[0])/3600
# Gather particle data
# packed [time, particle #]
lon = dsr['lon'][:]
lat = dsr['lat'][:]
z = dsr['z'][:]
h = dsr['h'][:]
salt = dsr['salt'][:]
temp = dsr['temp'][:]
cs = dsr['cs'][:]
zeta = dsr['zeta'][:]
dsr.close()
# rescale z to remove tides
ZZ = cs*(h)
if False:
# generate random samples
aa = np.nan * np.ones((28,4000))
abins = np.linspace(0,1,29)
for ii in range(4000):
a = np.random.random(4000)
aa[:,ii], aobins = np.histogram(a, bins=abins)
amin = aa.min(axis=1)
amax = aa.max(axis=1)
else:
amin = 102.5 * np.ones(28)
amax = 187.3 * np.ones(28)
# PLOTTING
#plt.close('all')
fs = 14
plt.rc('font', size=fs)
fig = plt.figure(figsize=(20,10))
# Histograms
title_list = ['Slope', '<NAME>', '<NAME>']
for jj in [1,2,3]:
NN = int(lon.shape[1]/3)
zz = ZZ[:,NN*(jj-1):NN*jj - 1]
zmin = zz.min()
zs = zz/(-zmin)
ax = fig.add_subplot(1,3,jj)
bins=np.linspace(-1, 0, 29)
for ii in range(NT):
counts, obins = np.histogram(zs[ii,:], bins=bins)
ax.plot(counts, bins[:-1],'-o', label='Hour = %d' % (t[ii]))
ax.plot(amin, bins[:-1],'-k',lw=3, alpha=.3)
ax.plot(amax, bins[:-1],'-k',lw=3, alpha=.3)
ax.set_xlim(0,300)
ax.set_xlabel('Counts')
ax.set_ylabel('Scaled Z')
ax.set_title(title_list[jj-1])
plt.show()
plt.rcdefaults()
| [
"netCDF4.Dataset",
"os.path.abspath",
"matplotlib.pyplot.show",
"Lfun.modtime_to_datetime",
"os.path.isdir",
"numpy.ones",
"matplotlib.pyplot.rcdefaults",
"matplotlib.pyplot.figure",
"numpy.random.random",
"numpy.histogram",
"matplotlib.pyplot.rc",
"Lfun.Lstart",
"numpy.linspace",
"os.list... | [((439, 452), 'Lfun.Lstart', 'Lfun.Lstart', ([], {}), '()\n', (450, 452), False, 'import Lfun\n'), ((1666, 1699), 'netCDF4.Dataset', 'nc4.Dataset', (['(indir0 + indir + rel)'], {}), '(indir0 + indir + rel)\n', (1677, 1699), True, 'import netCDF4 as nc4\n'), ((2511, 2534), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'fs'}), "('font', size=fs)\n", (2517, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2541, 2569), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2551, 2569), True, 'import matplotlib.pyplot as plt\n'), ((3186, 3196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3194, 3196), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3213), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (3211, 3213), True, 'import matplotlib.pyplot as plt\n'), ((234, 261), 'os.path.abspath', 'os.path.abspath', (['"""../alpha"""'], {}), "('../alpha')\n", (249, 261), False, 'import os, sys\n'), ((291, 321), 'os.path.abspath', 'os.path.abspath', (['"""../plotting"""'], {}), "('../plotting')\n", (306, 321), False, 'import os, sys\n'), ((562, 580), 'os.listdir', 'os.listdir', (['indir0'], {}), '(indir0)\n', (572, 580), False, 'import os, sys\n'), ((1787, 1815), 'Lfun.modtime_to_datetime', 'Lfun.modtime_to_datetime', (['ot'], {}), '(ot)\n', (1811, 1815), False, 'import Lfun\n'), ((2215, 2236), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(29)'], {}), '(0, 1, 29)\n', (2226, 2236), True, 'import numpy as np\n'), ((2796, 2818), 'numpy.linspace', 'np.linspace', (['(-1)', '(0)', '(29)'], {}), '(-1, 0, 29)\n', (2807, 2818), True, 'import numpy as np\n'), ((641, 666), 'os.path.isdir', 'os.path.isdir', (['(indir0 + d)'], {}), '(indir0 + d)\n', (654, 666), False, 'import os, sys\n'), ((2184, 2203), 'numpy.ones', 'np.ones', (['(28, 4000)'], {}), '((28, 4000))\n', (2191, 2203), True, 'import numpy as np\n'), ((2274, 2296), 'numpy.random.random', 'np.random.random', (['(4000)'], {}), '(4000)\n', (2290, 2296), True, 'import numpy as np\n'), ((2325, 2352), 'numpy.histogram', 'np.histogram', (['a'], {'bins': 'abins'}), '(a, bins=abins)\n', (2337, 2352), True, 'import numpy as np\n'), ((2430, 2441), 'numpy.ones', 'np.ones', (['(28)'], {}), '(28)\n', (2437, 2441), True, 'import numpy as np\n'), ((2461, 2472), 'numpy.ones', 'np.ones', (['(28)'], {}), '(28)\n', (2468, 2472), True, 'import numpy as np\n'), ((2868, 2902), 'numpy.histogram', 'np.histogram', (['zs[ii, :]'], {'bins': 'bins'}), '(zs[ii, :], bins=bins)\n', (2880, 2902), True, 'import numpy as np\n'), ((1167, 1193), 'os.listdir', 'os.listdir', (['(indir0 + indir)'], {}), '(indir0 + indir)\n', (1177, 1193), False, 'import os, sys\n')] |
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
self.src_extensions.append('.cu')
if hasattr(self, 'compiler_so'): # add by hwx at 20180408
default_compiler_so = self.compiler_so
# default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.bbox",
["bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"utils.cython_nms",
["cython_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
]
setup(
ext_modules=ext_modules,
cmdclass={'build_ext': custom_build_ext},
)
| [
"os.path.abspath",
"distutils.core.setup",
"numpy.get_numpy_include",
"os.path.exists",
"distutils.extension.Extension",
"numpy.get_include",
"Cython.Distutils.build_ext.build_extensions",
"os.path.join"
] | [((1768, 1840), 'distutils.core.setup', 'setup', ([], {'ext_modules': 'ext_modules', 'cmdclass': "{'build_ext': custom_build_ext}"}), "(ext_modules=ext_modules, cmdclass={'build_ext': custom_build_ext})\n", (1773, 1840), False, 'from distutils.core import setup\n'), ((439, 455), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (453, 455), True, 'import numpy as np\n'), ((1399, 1537), 'distutils.extension.Extension', 'Extension', (['"""utils.bbox"""', "['bbox.pyx']"], {'extra_compile_args': "{'gcc': ['-Wno-cpp', '-Wno-unused-function']}", 'include_dirs': '[numpy_include]'}), "('utils.bbox', ['bbox.pyx'], extra_compile_args={'gcc': [\n '-Wno-cpp', '-Wno-unused-function']}, include_dirs=[numpy_include])\n", (1408, 1537), False, 'from distutils.extension import Extension\n'), ((1578, 1727), 'distutils.extension.Extension', 'Extension', (['"""utils.cython_nms"""', "['cython_nms.pyx']"], {'extra_compile_args': "{'gcc': ['-Wno-cpp', '-Wno-unused-function']}", 'include_dirs': '[numpy_include]'}), "('utils.cython_nms', ['cython_nms.pyx'], extra_compile_args={'gcc':\n ['-Wno-cpp', '-Wno-unused-function']}, include_dirs=[numpy_include])\n", (1587, 1727), False, 'from distutils.extension import Extension\n'), ((300, 316), 'os.path.join', 'pjoin', (['dir', 'name'], {}), '(dir, name)\n', (305, 316), True, 'from os.path import join as pjoin\n'), ((328, 351), 'os.path.exists', 'os.path.exists', (['binpath'], {}), '(binpath)\n', (342, 351), False, 'import os\n'), ((499, 521), 'numpy.get_numpy_include', 'np.get_numpy_include', ([], {}), '()\n', (519, 521), True, 'import numpy as np\n'), ((1345, 1377), 'Cython.Distutils.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (1371, 1377), False, 'from Cython.Distutils import build_ext\n'), ((372, 396), 'os.path.abspath', 'os.path.abspath', (['binpath'], {}), '(binpath)\n', (387, 396), False, 'import os\n')] |
import numpy as np
class Sudoku:
def __init__(self,main,size):
self.cell=np.zeros([size,size])
for i in main:
self.cell[i[0]-1,i[1]-1]=main[i]
def p(self,guess=False):
print(self.cell)
def solve(self,i,j):
if self.cell[i-1,j-1]!=0:
return
temp=[k for k in range(1,10)]
#import pdb;pdb.set_trace()
value_set=set(np.append(self.cell[i-1,:],self.cell[:,j-1]))
(m,n)=((i-1)//3,(j-1)//3)
for row in range(3):
for col in range(3):
value_set.add(self.cell[m*3+row,n*3+col])
if len(value_set)==9 and 0 in value_set:
self.cell[i-1,j-1]=list(set(range(1,10))-value_set)[0]
return
def solver(self):
for i in range(1,10):
for j in range(1,10):
self.solve(i,j)
main={(1,1):5,(1,2):3,(2,1):6,(3,2):9,(3,3):8,(1,5):7,(2,4):1,
(2,5):9,(2,6):5,(3,8):6,(4,1):8,(5,1):4,(6,1):7,(5,4):8,(4,5):6,
(6,5):2,(5,6):3,(4,9):3,(5,9):1,(6,9):6,(7,2):6,(8,4):4,(8,5):1,
(8,6):9,(9,5):8,(7,7):2,(7,8):8,(8,9):5,(9,8):7,(9,9):9}
game=Sudoku(main,9)
game.p()
for _ in range(5):
#import pdb;pdb.set_trace()
game.solver()
game.p()
print('-'*30)
| [
"numpy.append",
"numpy.zeros"
] | [((86, 108), 'numpy.zeros', 'np.zeros', (['[size, size]'], {}), '([size, size])\n', (94, 108), True, 'import numpy as np\n'), ((410, 461), 'numpy.append', 'np.append', (['self.cell[i - 1, :]', 'self.cell[:, j - 1]'], {}), '(self.cell[i - 1, :], self.cell[:, j - 1])\n', (419, 461), True, 'import numpy as np\n')] |
import io, sys
import numpy as np
from heapq import *
def load_vectors(filename):
fin = io.open(filename, 'r', encoding='utf-8', newline='\n')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.asarray(list(map(float, tokens[1:])))
return data
## This function computes the cosine similarity between vectors u and v
def cosine(u, v):
## FILL CODE
return u.dot(v)/(np.linalg.norm(u)* np.linalg.norm(v))
## This function returns the word corresponding to
## nearest neighbor vector of x
## The list exclude_words can be used to exclude some
## words from the nearest neighbors search
def nearest_neighbor(x, word_vectors, exclude_words=[]):
best_score = -1.0
best_word = None
## FILL CODE
for words in word_vectors.keys():
temp = cosine(word_vectors[words], x)
if temp > best_score and words not in exclude_words:
best_score = temp
best_word = words
return best_word
## This function return the words corresponding to the
## K nearest neighbors of vector x.
## You can use the functions heappush and heappop.
def knn(x, vectors, k):
heap = []
## FILL CODE
exclude = []
for ki in range(k+1):
b_word = nearest_neighbor(x, vectors, exclude)
score = cosine(x, vectors[b_word])
heap.append((score, b_word))
exclude.append(b_word)
return [heappop(heap) for i in range(len(heap))][::-1][:-1] # reverse and don't take the last element
## This function return the word d, such that a:b and c:d
## verifies the same relation
def analogy(a, b, c, word_vectors):
## FILL CODE
a = a.lower()
b = b.lower()
c = c.lower()
best_anal = -np.inf
best_anal_word = ''
x_a = word_vectors[a]/np.linalg.norm(word_vectors[a])
x_b = word_vectors[b]/np.linalg.norm(word_vectors[b])
x_c = word_vectors[c]/np.linalg.norm(word_vectors[c])
for word in word_vectors.keys():
if True in [i in word for i in [a, b, c]]:
continue
word_vectors[word] = word_vectors[word]/ np.linalg.norm(word_vectors[word])
anal = (x_c+x_b-x_a).dot(word_vectors[word])
if anal > best_anal:
best_anal = anal
best_anal_word = word
return best_anal_word
## Compute the association strength between:
## - a word w
## - two sets of attributes A and B
def association_strength(w, A, B, vectors):
strength = 0.0
## FILL CODE
a_sum = 0.0
b_sum = 0.0
for a in A :
a_sum += cosine(vectors[w], vectors[a])
for b in B :
b_sum += cosine(vectors[w], vectors[b])
strength = 1/len(A) * a_sum - 1/len(B) * b_sum
return strength
## Perform the word embedding association test between:
## - two sets of words X and Y
## - two sets of attributes A and B
def weat(X, Y, A, B, vectors):
score = 0.0
## FILL CODE
score_1 = 0.0
score_2 = 0.0
for w in X:
score_1 += association_strength(w, A, B, vectors)
for z in Y:
score_2 += association_strength(z, A, B, vectors)
score = score_1 - score_2
return score
######## MAIN ########
print('')
print(' ** Word vectors ** ')
print('')
word_vectors = load_vectors(sys.argv[1])
print('similarity(apple, apples) = %.3f' %
cosine(word_vectors['apple'], word_vectors['apples']))
print('similarity(apple, banana) = %.3f' %
cosine(word_vectors['apple'], word_vectors['banana']))
print('similarity(apple, tiger) = %.3f' %
cosine(word_vectors['apple'], word_vectors['tiger']))
print('')
print('The nearest neighbor of cat is: ' +
nearest_neighbor(word_vectors['cat'], word_vectors))
knn_cat = knn(word_vectors['cat'], word_vectors, 5)
print('')
print('cat')
print('--------------')
for score, word in knn(word_vectors['cat'], word_vectors, 5):
print (word + '\t%.3f' % score)
print('')
print('france - paris + rome = ' + analogy('paris', 'france', 'rome', word_vectors))
## A word about biases in word vectors:
print('')
print('similarity(genius, man) = %.3f' %
cosine(word_vectors['man'], word_vectors['genius']))
print('similarity(genius, woman) = %.3f' %
cosine(word_vectors['woman'], word_vectors['genius']))
## Replicate one of the experiments from:
##
## Semantics derived automatically from language corpora contain human-like biases
## Caliskan, Bryson, Narayanan (2017)
career = ['executive', 'management', 'professional', 'corporation',
'salary', 'office', 'business', 'career']
family = ['home', 'parents', 'children', 'family',
'cousins', 'marriage', 'wedding', 'relatives']
male = ['john', 'paul', 'mike', 'kevin', 'steve', 'greg', 'jeff', 'bill']
female = ['amy', 'joan', 'lisa', 'sarah', 'diana', 'kate', 'ann', 'donna']
print('')
print('Word embedding association test: %.3f' %
weat(career, family, male, female, word_vectors))
| [
"numpy.linalg.norm",
"io.open"
] | [((93, 147), 'io.open', 'io.open', (['filename', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""'}), "(filename, 'r', encoding='utf-8', newline='\\n')\n", (100, 147), False, 'import io, sys\n'), ((1871, 1902), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[a]'], {}), '(word_vectors[a])\n', (1885, 1902), True, 'import numpy as np\n'), ((1929, 1960), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[b]'], {}), '(word_vectors[b])\n', (1943, 1960), True, 'import numpy as np\n'), ((1987, 2018), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[c]'], {}), '(word_vectors[c])\n', (2001, 2018), True, 'import numpy as np\n'), ((482, 499), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (496, 499), True, 'import numpy as np\n'), ((501, 518), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (515, 518), True, 'import numpy as np\n'), ((2204, 2238), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[word]'], {}), '(word_vectors[word])\n', (2218, 2238), True, 'import numpy as np\n')] |
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference data model.
This defines the common objects used by the reference models, and
the utility routines for creating them for example from FAD objects.
"""
from collections import namedtuple
import enum
import numpy as np
from reference_models.geo import drive
from sas_test_harness import generateCbsdReferenceId
class ProtectedEntityType(enum.Enum):
"""The protected entity type.
"""
GWPZ_AREA = 1
PPA_AREA = 2
FSS_CO_CHANNEL = 3
FSS_BLOCKING = 4
ESC = 5
DPA = 6
# A protection constraint.
ProtectionConstraint = namedtuple('ProtectionConstraint',
['latitude', 'longitude',
'low_frequency', 'high_frequency',
'entity_type'])
# A CBSD Grant.
class CbsdGrantInfo(namedtuple('CbsdGrantInfo',
[# Installation params
'latitude', 'longitude', 'height_agl',
'indoor_deployment', 'cbsd_category',
'antenna_azimuth', 'antenna_gain', 'antenna_beamwidth',
# Grant params
'max_eirp',
'low_frequency', 'high_frequency',
'is_managed_grant'])):
"""CbsdGrantInfo.
Holds all parameters of a CBSD grant.
Suitable to be used as a key in dictionaries and sets.
Attributes:
latitude: The CBSD latitude (degrees).
longitude: The CBSD longitude (degrees).
height_agl: The height above ground level (meters).
indoor_deployment: True if indoor, False if outdoor.
antenna_azimuth: The antenna pointing azimuth relative to true north and defined
in clockwise fashion.
antenna_gain: The antenna nominal gain (dBi).
antenna_beamwidth: The 3dB antenna beamwidth (degrees).
cbsd_category: Either 'A' for Cat A or 'B' for Cat B CBSD.
max_eirp: The maximum EIRP of the CBSD (dBm per MHz).
low_frequency: The grant min frequency (Hz).
high_frequency: The gran max frequency (Hz).
is_managed_grant: True iff the grant belongs to the managing SAS.
"""
__slots__ = ()
def uniqueCbsdKey(self):
"""Returns unique CBSD key (ie key based on installation params only)."""
return self[0:8]
# Define FSS Protection Point, i.e., a tuple with named fields of
# 'latitude', 'longitude', 'height_agl', 'max_gain_dbi', 'pointing_azimuth',
# 'pointing_elevation'
FssInformation = namedtuple('FssInformation',
['height_agl', 'max_gain_dbi',
'pointing_azimuth', 'pointing_elevation'])
# Define ESC information, i.e., a tuple with named fields of
# 'antenna_height', 'antenna_azimuth', 'antenna_gain_pattern'
EscInformation = namedtuple('EscInformation',
['antenna_height', 'antenna_azimuth',
'antenna_gain_pattern'])
def getFssInfo(fss_record):
"""Extracts FSS information from a FSS record.
Args:
fss_record: A FSS record (dict).
Returns:
A tuple of:
fss_point: A (longitude, latitude) tuple.
fss_info: A |FssInformation| tuple.
fss_freq_range: A (freq_min, freq_max) tuple.
"""
# Extract installation parameters from FSS record.
fss_install_params = fss_record['record']['deploymentParam'][0]['installationParam']
fss_latitude = fss_install_params['latitude']
fss_longitude = fss_install_params['longitude']
fss_azimuth = fss_install_params['antennaAzimuth']
fss_pointing_elevation = -fss_install_params['antennaDowntilt']
fss_max_gain_dbi = fss_install_params['antennaGain']
# Convert the height to AGL if it was AMSL type.
# TODO(sbdt): verify if FSS can really be AMSL ??
fss_height = fss_install_params['height']
fss_height_type = fss_install_params['heightType']
if fss_height_type == 'AMSL':
fss_altitude = drive.terrain_driver.GetTerrainElevation(fss_latitude, fss_longitude)
fss_height_agl = fss_height - fss_altitude
else:
fss_height_agl = fss_height
fss_info = FssInformation(height_agl=fss_height_agl,
max_gain_dbi=fss_max_gain_dbi,
pointing_azimuth=fss_azimuth,
pointing_elevation=fss_pointing_elevation)
fss_point = (fss_longitude, fss_latitude)
# Get the frequency range of the FSS
fss_deploy_params = fss_record['record']['deploymentParam'][0]['operationParam']
fss_low_freq = fss_deploy_params['operationFrequencyRange']['lowFrequency']
fss_high_freq = fss_deploy_params['operationFrequencyRange']['highFrequency']
fss_freq_range = (fss_low_freq, fss_high_freq)
return fss_point, fss_info, fss_freq_range
def getEscInfo(esc_record):
"""Extracts ESC information from a ESC record.
Args:
esc_record: A ESC record (dict of schema |EscSensorRecord|)).
Returns:
A tuple of:
esc_point: A (longitude, latitude) tuple.
esc_info: A |EscInformation| tuple.
"""
esc_install_params = esc_record['installationParam']
esc_point = (esc_install_params['longitude'], esc_install_params['latitude'])
ant_pattern = esc_install_params['azimuthRadiationPattern']
ant_pattern = sorted([(pat['angle'], pat['gain']) for pat in ant_pattern])
angles, gains = zip(*ant_pattern)
if angles != tuple(range(360)):
raise ValueError('ESC pattern inconsistent')
ant_gain_pattern = np.array(gains)
esc_info = EscInformation(
antenna_height=esc_install_params['height'],
antenna_azimuth=esc_install_params['antennaAzimuth'],
antenna_gain_pattern=ant_gain_pattern)
return esc_point, esc_info
def constructCbsdGrantInfo(reg_request, grant_request, is_managing_sas=True):
"""Constructs a |CbsdGrantInfo| tuple from the given data."""
lat_cbsd = reg_request['installationParam']['latitude']
lon_cbsd = reg_request['installationParam']['longitude']
height_cbsd = reg_request['installationParam']['height']
height_type_cbsd = reg_request['installationParam']['heightType']
if height_type_cbsd == 'AMSL':
# TODO(sbdt): move the feature of AMSL support within the prop models.
altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)
height_cbsd = height_cbsd - altitude_cbsd
max_eirp, low_frequency, high_frequency = None, None, None
if grant_request is not None:
max_eirp = grant_request['operationParam']['maxEirp']
low_frequency = grant_request['operationParam']['operationFrequencyRange']['lowFrequency']
high_frequency = grant_request['operationParam']['operationFrequencyRange']['highFrequency']
return CbsdGrantInfo(
# Get information from the registration
latitude=lat_cbsd,
longitude=lon_cbsd,
height_agl=height_cbsd,
indoor_deployment=reg_request['installationParam']['indoorDeployment'],
antenna_azimuth=reg_request['installationParam']['antennaAzimuth'],
antenna_gain=reg_request['installationParam']['antennaGain'],
antenna_beamwidth=reg_request['installationParam']['antennaBeamwidth'],
cbsd_category=reg_request['cbsdCategory'],
max_eirp=max_eirp,
low_frequency=low_frequency,
high_frequency=high_frequency,
is_managed_grant=is_managing_sas)
def getCbsdsNotPartOfPpaCluster(cbsds, ppa_record):
"""Returns the CBSDs that are not part of a PPA cluster list.
Args:
cbsds : List of CBSDData objects.
ppa_record : A PPA record dictionary.
Returns:
A list of CBSDs that are not part of the PPA cluster list.
"""
cbsds_not_part_of_ppa_cluster = []
# Compare the list of CBSDs with the PPA cluster list
for cbsd in cbsds:
if cbsd['id'] not in ppa_record['ppaInfo']['cbsdReferenceId']:
cbsds_not_part_of_ppa_cluster.append(cbsd)
return cbsds_not_part_of_ppa_cluster
def getAllGrantInfoFromCbsdDataDump(cbsd_data_records, is_managing_sas=True,
ppa_record=None):
"""Returns a list of |CbsdGrantInfo| from FAD object.
Args:
cbsd_data_records: A list of |CbsdData| objects retrieved from FAD records.
is_managing_sas: Flag indicating if the `cbsd_data_record` from the managing SAS
(True) or a peer SAS (False).
ppa_record: A PPA record dictionary. If None, ignored. If set, the returned grants
are not part of the PPA cluster list.
"""
grant_objects = []
if ppa_record is not None:
cbsd_data_records = getCbsdsNotPartOfPpaCluster(cbsd_data_records, ppa_record)
# Loop over each CBSD grant
for cbsd_data_record in cbsd_data_records:
for grant in cbsd_data_record['grants']:
grant_objects.append(
constructCbsdGrantInfo(
cbsd_data_record['registration'],
grant,
is_managing_sas=is_managing_sas))
return grant_objects
def getGrantObjectsFromFAD(sas_uut_fad_object, sas_th_fad_objects,
ppa_record=None):
"""Returns a list of |CbsdGrantInfo| for SAS UUT and peer SAS TH.
Args:
sas_uut_fad_object: FAD object from SAS UUT
sas_th_fad_objects: a list of FAD objects from SAS Test Harness
ppa_record: A PPA record dictionary. If None, ignored. If set, the returned grants
are not part of the PPA cluster list.
"""
# List of CBSD grant tuples extracted from FAD record
grants = getAllGrantInfoFromCbsdDataDump(
sas_uut_fad_object.getCbsdRecords(), True, ppa_record)
for fad in sas_th_fad_objects:
grants.extend(getAllGrantInfoFromCbsdDataDump(
fad.getCbsdRecords(), False, ppa_record))
return grants
def getGrantsFromRequests(registration_requests, grant_requests, is_managing_sas=True):
"""Returns a list of |CbsdGrantInfo| from some registration/grant requests.
Args:
registration_requests: A list of CBSD registration requests, each one being a
dict containing the CBSD registration information (field 'installationParam').
grant_requests: A list of CBSD grant requests related to the corresponding
`registration_request`, each one being a dict containing the grant information
(field 'operationParam').
is_managing_sas: Flag indicating if the `cbsd_data_record` from the managing SAS
(True) or a peer SAS (False).
"""
grants = []
for reg_request, grant_request in zip(registration_requests, grant_requests):
grants.append(
constructCbsdGrantInfo(
reg_request,
grant_request,
is_managing_sas))
return grants
def getAuthorizedGrantsFromDomainProxies(domain_proxies, ppa_record=None):
"""Returns a list of |CbsdGrantInfo| from some Domain Proxy objects.
Args:
domain_proxies: A list of DomainProxy objects to build |CbsdGrantInfo| from.
ppa_record: Optional. A PPA record dictionary. Iff set, the returned grant
info is not part of the PPA cluster list.
Returns:
A list of |CbsdGrantInfo| for each authorized grant in the given Domain
Proxies.
"""
grants = []
for domain_proxy in domain_proxies:
for cbsd in domain_proxy.getCbsdsWithAtLeastOneAuthorizedGrant():
if ppa_record and cbsd.getCbsdId() in ppa_record['ppaInfo']['cbsdReferenceId']:
continue
for grant in cbsd.getAuthorizedGrants():
grants.append(
constructCbsdGrantInfo(
cbsd.getRegistrationRequest(),
grant.getGrantRequest()))
return grants
| [
"reference_models.geo.drive.terrain_driver.GetTerrainElevation",
"numpy.array",
"collections.namedtuple"
] | [((1181, 1296), 'collections.namedtuple', 'namedtuple', (['"""ProtectionConstraint"""', "['latitude', 'longitude', 'low_frequency', 'high_frequency', 'entity_type']"], {}), "('ProtectionConstraint', ['latitude', 'longitude',\n 'low_frequency', 'high_frequency', 'entity_type'])\n", (1191, 1296), False, 'from collections import namedtuple\n'), ((1434, 1675), 'collections.namedtuple', 'namedtuple', (['"""CbsdGrantInfo"""', "['latitude', 'longitude', 'height_agl', 'indoor_deployment',\n 'cbsd_category', 'antenna_azimuth', 'antenna_gain', 'antenna_beamwidth',\n 'max_eirp', 'low_frequency', 'high_frequency', 'is_managed_grant']"], {}), "('CbsdGrantInfo', ['latitude', 'longitude', 'height_agl',\n 'indoor_deployment', 'cbsd_category', 'antenna_azimuth', 'antenna_gain',\n 'antenna_beamwidth', 'max_eirp', 'low_frequency', 'high_frequency',\n 'is_managed_grant'])\n", (1444, 1675), False, 'from collections import namedtuple\n'), ((3138, 3244), 'collections.namedtuple', 'namedtuple', (['"""FssInformation"""', "['height_agl', 'max_gain_dbi', 'pointing_azimuth', 'pointing_elevation']"], {}), "('FssInformation', ['height_agl', 'max_gain_dbi',\n 'pointing_azimuth', 'pointing_elevation'])\n", (3148, 3244), False, 'from collections import namedtuple\n'), ((3439, 3534), 'collections.namedtuple', 'namedtuple', (['"""EscInformation"""', "['antenna_height', 'antenna_azimuth', 'antenna_gain_pattern']"], {}), "('EscInformation', ['antenna_height', 'antenna_azimuth',\n 'antenna_gain_pattern'])\n", (3449, 3534), False, 'from collections import namedtuple\n'), ((6066, 6081), 'numpy.array', 'np.array', (['gains'], {}), '(gains)\n', (6074, 6081), True, 'import numpy as np\n'), ((4554, 4623), 'reference_models.geo.drive.terrain_driver.GetTerrainElevation', 'drive.terrain_driver.GetTerrainElevation', (['fss_latitude', 'fss_longitude'], {}), '(fss_latitude, fss_longitude)\n', (4594, 4623), False, 'from reference_models.geo import drive\n'), ((6812, 6872), 'reference_models.geo.drive.terrain_driver.GetTerrainElevation', 'drive.terrain_driver.GetTerrainElevation', (['lat_cbsd', 'lon_cbsd'], {}), '(lat_cbsd, lon_cbsd)\n', (6852, 6872), False, 'from reference_models.geo import drive\n')] |
__author__ = 'HarperMain'
from scipy.stats import norm
import numpy as np
# class EuropeanLookbackGreeks():
#
# def __init__(self, spot, strike, rate, dividend, sigma, expiry, t):
#
# self.spot = spot
# self.strike = strike
# self.rate = rate
# self.dividend = dividend
# self.sigma = sigma
# self.expiry = expiry
# self.t = t
#
# self.tau = tau = self.expiry-self.t
# self.d1 = (np.log(spot/strike) + (rate - dividend + 1/2 * sigma**2)*tau) / (np.sqrt(tau))
# self.d2 = self.d1 - sigma * np.sqrt(tau)
#
# self.Delta = self.EuroDelta()
# self.Gamma = self.EuroGamma()
# self.Theta = self.EuroTheta(rate, strike, tau, sigma, spot)
# self.Rho = self.EuroRho(tau, strike, rate)
# self.Vega = self.EuroVega(tau, spot)
def EuroDelta(d1):
# Delta is the price sensitivity
Delta = norm.cdf(d1)
return Delta
def EuroGamma(d1, spot, sigma, tau):
# Gamma is a second order time-price sensitivity
Gamma = norm.ppf(norm.cdf(d1)) / (spot*sigma*np.sqrt(tau))
return Gamma
def EuroTheta(d1, d2, rate, strike, tau, sigma, spot):
# Theta is the time sensitivity
Theta = -rate*strike*np.exp(-rate*tau) * norm.cdf(d2) - \
(sigma*spot*norm.ppf(norm.cdf(d1))/(2*tau))
return Theta
def EuroRho(d2, tau, strike, rate):
# Rho is the interest rate sensitivity
Rho = tau*strike*np.exp(-rate*tau) * norm.cdf(d2)
return Rho
def EuroVega(d1, tau, spot):
# Vega is a volatility sensitivity
Vega = np.sqrt(tau)*spot*norm.cdf(d1)
return Vega
def EuroD1(spot, strike, rate, dividend, sigma, tau):
d1 = ((np.log(spot/strike) + (rate - dividend + 1/2 *
sigma**2)*tau))/ (np.sqrt(tau))
return d1
def EuroD2(d1, sigma, tau):
d2 = d1 - sigma * np.sqrt(tau)
return d2
# def ReturnGreeks(self):
# EuroGreeks = {}
# EuroGreeks['Delta'] = self.Delta
# EuroGreeks['Gamma'] = self.Gamma
# EuroGreeks['Theta'] = self.Theta
# EuroGreeks['Rho'] = self.Rho
# EuroGreeks['Vega'] = self.Vega
#
# return EuroGreeks | [
"scipy.stats.norm.cdf",
"numpy.log",
"numpy.exp",
"numpy.sqrt"
] | [((909, 921), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (917, 921), False, 'from scipy.stats import norm\n'), ((1458, 1470), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (1466, 1470), False, 'from scipy.stats import norm\n'), ((1584, 1596), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (1592, 1596), False, 'from scipy.stats import norm\n'), ((1777, 1789), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (1784, 1789), True, 'import numpy as np\n'), ((1051, 1063), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (1059, 1063), False, 'from scipy.stats import norm\n'), ((1079, 1091), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (1086, 1091), True, 'import numpy as np\n'), ((1247, 1259), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (1255, 1259), False, 'from scipy.stats import norm\n'), ((1438, 1457), 'numpy.exp', 'np.exp', (['(-rate * tau)'], {}), '(-rate * tau)\n', (1444, 1457), True, 'import numpy as np\n'), ((1566, 1578), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (1573, 1578), True, 'import numpy as np\n'), ((1679, 1700), 'numpy.log', 'np.log', (['(spot / strike)'], {}), '(spot / strike)\n', (1685, 1700), True, 'import numpy as np\n'), ((1856, 1868), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (1863, 1868), True, 'import numpy as np\n'), ((1227, 1246), 'numpy.exp', 'np.exp', (['(-rate * tau)'], {}), '(-rate * tau)\n', (1233, 1246), True, 'import numpy as np\n'), ((1297, 1309), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (1305, 1309), False, 'from scipy.stats import norm\n')] |
import os
import pandas as pd
import numpy as np
import itertools
import pickle
class PickleStore(object):
"""Storage object to read and write very large dataset as pickle file"""
def __init__(self, fname):
self.fname = fname
def write(self, dataset, chunksize=500):
"""Write dataset in chunks to pickle"""
total = len(dataset)
step = chunksize
n = total // chunksize
iterator = (itertools.count(start = 0, step = step))
with open(self.fname, 'wb') as f:
print(f'Saving {self.fname} ...')
for i in (next(iterator) for _ in range(n+1)):
end = i + step
if end > total:
end = total
if isinstance(dataset, (list, np.ndarray)):
pickle.dump(dataset[i:end], f, pickle.HIGHEST_PROTOCOL)
elif isinstance(dataset, pd.DataFrame):
pickle.dump(dataset.iloc[i:end], f, pickle.HIGHEST_PROTOCOL)
else:
raise TypeError('Only list, numpy array, and pandas DataFrame are supported')
print(f'\t... {end} records', end='\r')
print('\nDone!!')
def load(self, asPandasDF=False, columns=None):
"""Load pickle in chunks"""
results = []
with open(self.fname, 'rb') as f:
print(f'Loading {self.fname} ...')
while True:
try:
results.extend(pickle.load(f))
print(f'\t... {len(results)} records', end='\r')
except EOFError:
print('\nDone!!')
break
if asPandasDF:
if columns is None:
raise TypeError('Columns can not be None')
return pd.DataFrame.from_records(results, columns=columns)
return results
class NpyStore(object):
"""Storage object to read and write very large dataset as npy file"""
def __init__(self, fname):
self.fname = fname
def write(self, dataset, chunksize=500):
total = len(dataset)
step = chunksize
n = total // chunksize
iterator = (itertools.count(start = 0, step = step))
with open(self.fname, 'ab') as f:
print(f'Saving {self.fname} ...')
for i in (next(iterator) for _ in range(n+1)):
end = i + step
if end > total:
end = total
if isinstance(dataset, (list, np.ndarray)):
np.save(f, dataset[i:end])
else:
raise TypeError('Only list and numpy array are supported')
print(f'\t... {end} records', end='\r')
print('\nDone!!')
def load(self, axis=0):
results = []
with open(self.fname, "rb") as f:
print(f'Loading {self.fname} ...')
fsz = os.fstat(f.fileno()).st_size
results = np.load(f, allow_pickle=True)
while f.tell() < fsz:
results = np.concatenate((results, np.load(f, allow_pickle=True)), axis=axis)
print(f'\t... {len(results)} records', end='\r')
print('\nDone!!')
return results;
@property
def header(self):
"""Read the header of the npy file"""
with open(self.fname, 'rb') as f:
version = np.lib.format.read_magic(f)
shape, fortran, dtype = np.lib.format._read_array_header(f, version)
return version, {'descr': dtype,
'fortran_order' : fortran,
'shape' : shape}
| [
"numpy.load",
"pickle.dump",
"numpy.save",
"numpy.lib.format.read_magic",
"itertools.count",
"pickle.load",
"pandas.DataFrame.from_records",
"numpy.lib.format._read_array_header"
] | [((461, 496), 'itertools.count', 'itertools.count', ([], {'start': '(0)', 'step': 'step'}), '(start=0, step=step)\n', (476, 496), False, 'import itertools\n'), ((2346, 2381), 'itertools.count', 'itertools.count', ([], {'start': '(0)', 'step': 'step'}), '(start=0, step=step)\n', (2361, 2381), False, 'import itertools\n'), ((1917, 1968), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['results'], {'columns': 'columns'}), '(results, columns=columns)\n', (1942, 1968), True, 'import pandas as pd\n'), ((3206, 3235), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (3213, 3235), True, 'import numpy as np\n'), ((3653, 3680), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['f'], {}), '(f)\n', (3677, 3680), True, 'import numpy as np\n'), ((3717, 3761), 'numpy.lib.format._read_array_header', 'np.lib.format._read_array_header', (['f', 'version'], {}), '(f, version)\n', (3749, 3761), True, 'import numpy as np\n'), ((847, 902), 'pickle.dump', 'pickle.dump', (['dataset[i:end]', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dataset[i:end], f, pickle.HIGHEST_PROTOCOL)\n', (858, 902), False, 'import pickle\n'), ((2732, 2758), 'numpy.save', 'np.save', (['f', 'dataset[i:end]'], {}), '(f, dataset[i:end])\n', (2739, 2758), True, 'import numpy as np\n'), ((979, 1039), 'pickle.dump', 'pickle.dump', (['dataset.iloc[i:end]', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dataset.iloc[i:end], f, pickle.HIGHEST_PROTOCOL)\n', (990, 1039), False, 'import pickle\n'), ((1580, 1594), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1591, 1594), False, 'import pickle\n'), ((3321, 3350), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (3328, 3350), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys # NOQA isort:skip
sys.path.insert(0, 'datasets') # NOQA isort:skip
import argparse
import glob
import json
import os
import zipfile
from PIL import Image
import chainer
from chainer import serializers
from chainercv import evaluations
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import train_segnet
from zipped_cityscapes_road_dataset import ZippedCityscapesRoadDataset
chainer.config.train = False
def save_labels(param_dir, iteration, gpu, img_zip_fn, label_zip_fn, out_dir,
start_index, end_index, soft_label, eval_shape,
save_each=False):
train_args = json.load(open(os.path.join(param_dir, 'args.txt')))
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except Exception:
pass
# Find the target snapshot
snapshots = sorted(glob.glob(os.path.join(param_dir, 'snapshot_*')))
for snapshot in snapshots:
if 'iter_{}'.format(iteration) in snapshot:
break
# Create model
if train_args['model'] == 'basic':
model = train_segnet.SegNetBasic(n_class=2, pred_shape=eval_shape)
elif train_args['model'] == 'normal':
model = train_segnet.SegNet(n_class=2)
# Load model parameters
serializers.load_npz(
snapshot, model, path='updater/model:main/predictor/')
if gpu >= 0:
chainer.cuda.get_device_from_id(gpu).use()
model.to_gpu(gpu)
# Create dataset
d = ZippedCityscapesRoadDataset(
img_zip_fn, label_zip_fn, train_args['input_shape'])
if end_index > len(d):
raise ValueError(
'end_index option should be less than the length of dataset '
'{} but {} was given.'.format(len(d), end_index))
if not save_each:
pred_and_scores = {}
for i in tqdm(range(start_index, end_index)):
img, label = d[i]
pred, score = model.predict([img], True)[0]
assert pred.ndim == 2, pred.ndim
assert pred.shape == tuple(eval_shape), \
'pred:{} but eval_shape:{}'.format(pred.shape, eval_shape)
assert score.ndim == 3, score.ndim
assert score.shape[1:] == tuple(eval_shape), \
'score[1:]:{} but eval_shape: {}'.format(
score.shape[1:], eval_shape)
# Evaluate prediction
ret = evaluations.calc_semantic_segmentation_confusion([pred], [label])
TP = int(ret[1, 1])
FP = int(ret[0, 1])
FN = int(ret[1, 0])
precision = float(TP / (TP + FP)) if TP + FP > 0 else None
recall = float(TP / (TP + FN)) if TP + FN > 0 else None
iou = evaluations.calc_semantic_segmentation_iou(ret)
pred = pred.astype(np.bool)
score = score.astype(np.float32)
fn_base = os.path.splitext(os.path.basename(d.img_fns[i]))[0]
save_fn = os.path.join(out_dir, fn_base)
if save_each:
np.save(save_fn, pred)
np.save(save_fn + '_scores', pred)
else:
pred_and_scores[save_fn] = pred
pred_and_scores[save_fn + '_scores'] = score
plt.clf()
fig, axes = plt.subplots(1, 3)
fig.set_dpi(300)
axes[0].axis('off')
axes[1].axis('off')
axes[2].axis('off')
# Show result
img = np.array(Image.open(d.img_fns[i]), dtype=np.uint8)
axes[0].imshow(img)
axes[0].imshow(pred, alpha=0.4, cmap=plt.cm.Set1_r)
axes[0].set_title('Estimated road mask (input image overlayed)',
fontsize=4)
# Show labels
axes[1].imshow(label == 1)
axes[1].set_title('Ground truth road mask', fontsize=4)
# Show road estimation
axes[2].imshow(pred)
axes[2].set_title('Estimated road mask', fontsize=4)
plt.savefig(os.path.join(out_dir, os.path.basename(d.img_fns[i])),
bbox_inches='tight')
plt.close()
with open(os.path.join(out_dir, 'result.json'), 'a') as fp:
result_info = {
'img_fn': d.img_fns[i],
'label_fn': d.label_fns[i],
'road_iou': iou[1],
'non_road_iou': iou[0],
'precision': precision,
'recall': recall,
'TP': TP,
'FP': FP,
'FN': FN
}
result_info.update({
'param_dir': param_dir,
'iteration': iteration,
'gpu': gpu,
'img_zip_fn': img_zip_fn,
'label_zip_fn': label_zip_fn,
'out_dir': out_dir,
'start_index': start_index,
'end_index': end_index,
'soft_label': soft_label,
'eval_shape': eval_shape,
'save_each': save_each,
})
result_info.update({'train_args': train_args})
print(json.dumps(result_info), file=fp)
chainer.cuda.memory_pool.free_all_blocks()
del model
if not save_each:
return pred_and_scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--param_dir', type=str)
parser.add_argument('--iteration', type=int)
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--img_zip_fn', type=str)
parser.add_argument('--label_zip_fn', type=str)
parser.add_argument('--out_dir', type=str)
parser.add_argument('--start_index', type=int)
parser.add_argument('--end_index', type=int)
parser.add_argument('--soft_label', action='store_true', default=False)
parser.add_argument(
'--eval_shape', type=int, nargs=2, default=[1024, 2048])
args = parser.parse_args()
save_labels(
args.param_dir, args.iteration, args.gpu, args.img_zip_fn,
args.label_zip_fn, args.out_dir, args.start_index, args.end_index,
args.soft_label, args.eval_shape, True)
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"json.dumps",
"zipped_cityscapes_road_dataset.ZippedCityscapesRoadDataset",
"chainercv.evaluations.calc_semantic_segmentation_iou",
"os.path.join",
"chainer.serializers.load_npz",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.su... | [((77, 107), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""datasets"""'], {}), "(0, 'datasets')\n", (92, 107), False, 'import sys\n'), ((1327, 1402), 'chainer.serializers.load_npz', 'serializers.load_npz', (['snapshot', 'model'], {'path': '"""updater/model:main/predictor/"""'}), "(snapshot, model, path='updater/model:main/predictor/')\n", (1347, 1402), False, 'from chainer import serializers\n'), ((1536, 1621), 'zipped_cityscapes_road_dataset.ZippedCityscapesRoadDataset', 'ZippedCityscapesRoadDataset', (['img_zip_fn', 'label_zip_fn', "train_args['input_shape']"], {}), "(img_zip_fn, label_zip_fn, train_args['input_shape']\n )\n", (1563, 1621), False, 'from zipped_cityscapes_road_dataset import ZippedCityscapesRoadDataset\n'), ((5017, 5059), 'chainer.cuda.memory_pool.free_all_blocks', 'chainer.cuda.memory_pool.free_all_blocks', ([], {}), '()\n', (5057, 5059), False, 'import chainer\n'), ((5170, 5195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5193, 5195), False, 'import argparse\n'), ((751, 774), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (765, 774), False, 'import os\n'), ((1146, 1204), 'train_segnet.SegNetBasic', 'train_segnet.SegNetBasic', ([], {'n_class': '(2)', 'pred_shape': 'eval_shape'}), '(n_class=2, pred_shape=eval_shape)\n', (1170, 1204), False, 'import train_segnet\n'), ((2399, 2464), 'chainercv.evaluations.calc_semantic_segmentation_confusion', 'evaluations.calc_semantic_segmentation_confusion', (['[pred]', '[label]'], {}), '([pred], [label])\n', (2447, 2464), False, 'from chainercv import evaluations\n'), ((2694, 2741), 'chainercv.evaluations.calc_semantic_segmentation_iou', 'evaluations.calc_semantic_segmentation_iou', (['ret'], {}), '(ret)\n', (2736, 2741), False, 'from chainercv import evaluations\n'), ((2908, 2938), 'os.path.join', 'os.path.join', (['out_dir', 'fn_base'], {}), '(out_dir, fn_base)\n', (2920, 2938), False, 'import os\n'), ((3167, 3176), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3174, 3176), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3215), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (3209, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3990), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3988, 3990), True, 'import matplotlib.pyplot as plt\n'), ((701, 736), 'os.path.join', 'os.path.join', (['param_dir', '"""args.txt"""'], {}), "(param_dir, 'args.txt')\n", (713, 736), False, 'import os\n'), ((801, 821), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (812, 821), False, 'import os\n'), ((930, 967), 'os.path.join', 'os.path.join', (['param_dir', '"""snapshot_*"""'], {}), "(param_dir, 'snapshot_*')\n", (942, 967), False, 'import os\n'), ((1263, 1293), 'train_segnet.SegNet', 'train_segnet.SegNet', ([], {'n_class': '(2)'}), '(n_class=2)\n', (1282, 1293), False, 'import train_segnet\n'), ((2973, 2995), 'numpy.save', 'np.save', (['save_fn', 'pred'], {}), '(save_fn, pred)\n', (2980, 2995), True, 'import numpy as np\n'), ((3008, 3042), 'numpy.save', 'np.save', (["(save_fn + '_scores')", 'pred'], {}), "(save_fn + '_scores', pred)\n", (3015, 3042), True, 'import numpy as np\n'), ((3371, 3395), 'PIL.Image.open', 'Image.open', (['d.img_fns[i]'], {}), '(d.img_fns[i])\n', (3381, 3395), False, 'from PIL import Image\n'), ((1437, 1473), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['gpu'], {}), '(gpu)\n', (1468, 1473), False, 'import chainer\n'), ((2855, 2885), 'os.path.basename', 'os.path.basename', (['d.img_fns[i]'], {}), '(d.img_fns[i])\n', (2871, 2885), False, 'import os\n'), ((3897, 3927), 'os.path.basename', 'os.path.basename', (['d.img_fns[i]'], {}), '(d.img_fns[i])\n', (3913, 3927), False, 'import os\n'), ((4010, 4046), 'os.path.join', 'os.path.join', (['out_dir', '"""result.json"""'], {}), "(out_dir, 'result.json')\n", (4022, 4046), False, 'import os\n'), ((4978, 5001), 'json.dumps', 'json.dumps', (['result_info'], {}), '(result_info)\n', (4988, 5001), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 18:01:21 2020
@author: Connor
Preprocessing the Atari environment to fit our model, following the findings found in the paper
"""
#Importing Libraries
import numpy as np
import collections
import cv2
import gym
# Repeats action and takes max of last 2 frames
class RepeatActionMaxFrame(gym.Wrapper):
def __init__(self, env = None, repeat = 4, clip_reward = False, no_ops = 0, fire_first = False):
super(RepeatActionMaxFrame, self).__init__(env)
self.repeat = repeat
self.shape = env.observation_space.low.shape
self.frame_buffer = np.zeros_like((2, self.shape))
self.clip_reward = clip_reward
self.no_ops = no_ops
self.fire_first = fire_first
def step(self, action):
total_reward = 0.0
done = False
for i in range(self.repeat):
obs, reward, done, info = self.env.step(action)
if self.clip_reward:
reward = np.clip(np.array([reward]), -1, 1)[0]
total_reward += reward
index = i % 2
self.frame_buffer[index] = obs
if done:
break
max_frame = np.maximum(self.frame_buffer[0], self.frame_buffer[1])
return max_frame, total_reward, done, info
def reset(self):
obs = self.env.reset()
no_ops = np.random.randint(self.no_ops) + 1 if self.no_ops > 0 else 0
for _ in range(no_ops):
_, _, done, _ = self.env.step(0)
if done:
self.env.reset()
if self.fire_first:
assert self.env.unwrapped.get_action_meanings()[1] == 'Fire'
obs, _, _, _ = self.env.step(1)
self.frame_buffer = np.zeros_like((2, self.shape))
self.frame_buffer[0] = obs
return obs
# Converts observation to grayscale, changes shape for pytorch syntax, resizes and downscales frame for faster processing
class PreProcessFrame(gym.ObservationWrapper):
def __init__(self, shape, env = None):
super(PreProcessFrame, self).__init__(env)
self.shape = (shape[2], shape[0], shape[1])
self.observation_space = gym.spaces.Box(low = 0.0, high = 1.0, shape = self.shape, dtype = np.float32)
def observation(self, obs):
new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
resized_screen = cv2.resize(new_frame, self.shape[1:], interpolation = cv2.INTER_AREA)
new_obs = np.array(resized_screen, dtype = np.uint8).reshape(self.shape)
new_obs = new_obs / 255.0
return new_obs
# Stacks frame necessary for observations
class StackFrame(gym.ObservationWrapper):
def __init__(self, env, repeat):
super(StackFrame, self).__init__(env)
self.observation_space = gym.spaces.Box(
env.observation_space.low.repeat(repeat, axis = 0),
env.observation_space.high.repeat(repeat, axis = 0),
dtype = np.float32)
self.stack = collections.deque(maxlen = repeat)
def reset(self):
self.stack.clear()
observation = self.env.reset()
for _ in range(self.stack.maxlen):
self.stack.append(observation)
return np.array(self.stack).reshape(self.observation_space.low.shape)
def observation(self, observation):
self.stack.append(observation)
return np.array(self.stack).reshape(self.observation_space.low.shape)
# Creates the environment with all above preprocesing
# Added reward clipping, no ops and fire first for testing purposes
def make_env(env_name, shape = (84, 84, 1), repeat = 4, clip_rewards = False, no_ops = 0, fire_first = False):
env = gym.make(env_name)
env = RepeatActionMaxFrame(env, repeat, clip_rewards, no_ops, fire_first)
env = PreProcessFrame(shape, env)
env = StackFrame(env, repeat)
return env
| [
"numpy.zeros_like",
"numpy.maximum",
"gym.make",
"cv2.cvtColor",
"collections.deque",
"numpy.random.randint",
"numpy.array",
"gym.spaces.Box",
"cv2.resize"
] | [((3803, 3821), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (3811, 3821), False, 'import gym\n'), ((620, 650), 'numpy.zeros_like', 'np.zeros_like', (['(2, self.shape)'], {}), '((2, self.shape))\n', (633, 650), True, 'import numpy as np\n'), ((1210, 1264), 'numpy.maximum', 'np.maximum', (['self.frame_buffer[0]', 'self.frame_buffer[1]'], {}), '(self.frame_buffer[0], self.frame_buffer[1])\n', (1220, 1264), True, 'import numpy as np\n'), ((1764, 1794), 'numpy.zeros_like', 'np.zeros_like', (['(2, self.shape)'], {}), '((2, self.shape))\n', (1777, 1794), True, 'import numpy as np\n'), ((2207, 2276), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': 'self.shape', 'dtype': 'np.float32'}), '(low=0.0, high=1.0, shape=self.shape, dtype=np.float32)\n', (2221, 2276), False, 'import gym\n'), ((2342, 2379), 'cv2.cvtColor', 'cv2.cvtColor', (['obs', 'cv2.COLOR_RGB2GRAY'], {}), '(obs, cv2.COLOR_RGB2GRAY)\n', (2354, 2379), False, 'import cv2\n'), ((2405, 2472), 'cv2.resize', 'cv2.resize', (['new_frame', 'self.shape[1:]'], {'interpolation': 'cv2.INTER_AREA'}), '(new_frame, self.shape[1:], interpolation=cv2.INTER_AREA)\n', (2415, 2472), False, 'import cv2\n'), ((3083, 3115), 'collections.deque', 'collections.deque', ([], {'maxlen': 'repeat'}), '(maxlen=repeat)\n', (3100, 3115), False, 'import collections\n'), ((1399, 1429), 'numpy.random.randint', 'np.random.randint', (['self.no_ops'], {}), '(self.no_ops)\n', (1416, 1429), True, 'import numpy as np\n'), ((2493, 2533), 'numpy.array', 'np.array', (['resized_screen'], {'dtype': 'np.uint8'}), '(resized_screen, dtype=np.uint8)\n', (2501, 2533), True, 'import numpy as np\n'), ((3320, 3340), 'numpy.array', 'np.array', (['self.stack'], {}), '(self.stack)\n', (3328, 3340), True, 'import numpy as np\n'), ((3491, 3511), 'numpy.array', 'np.array', (['self.stack'], {}), '(self.stack)\n', (3499, 3511), True, 'import numpy as np\n'), ((1004, 1022), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (1012, 1022), True, 'import numpy as np\n')] |
import sys
import numpy as np
from ares.util import ParameterBundle
from scipy.interpolate import interp1d
from ares.populations import GalaxyPopulation
try:
import h5py
except ImportError:
pass
try:
import genmassfct as gmf
from classy import Class
except ImportError:
sys.exit()
# Grab a vanilla ST mass function. dndm has shape (z, M)
pop = GalaxyPopulation()
tab_z, tab_M, tab_dndm = pop.halos.tab_z, pop.halos.tab_M, pop.halos.tab_dndm
#
zmin = tab_z.min()
zmax = tab_z.max()
Nz = tab_z.size
def get_ps(**kwargs):
Om = kwargs['omega_m_0']
Ob = kwargs['omega_b_0']
s8 = kwargs['sigma_8']
h0 = kwargs['hubble_0']
ns = kwargs['primordial_index']
Omega_ncdm = Om - Ob
#m_ncdm = 77319.85488
mTH = kwargs['hmf_extra_par1']
#Bozek2015
msn = 3.9*(mTH)**1.294*(0.1198/0.1225)**(-0.33333)*1000
params = {
'h': h0,
'T_cmb': 2.726,
'Omega_b': Ob,
'Omega_cdm': 1e-10,
'Omega_ncdm': Omega_ncdm,
'N_eff': 3.04,
'N_ncdm': 1,
'm_ncdm': msn,
#'m_ncdm_in_eV': msn * 1e3,
'T_ncdm': 0.715985, # ?
'n_s': ns,
#'sigma8': s8,
'A_s': 2.02539e-09,
'P_k_max_h/Mpc': 500., # Shouldn't need to be more than ~500?
'k_per_decade_for_pk': 20, # Shouldn't need to be more than ~20?
'output': 'mPk',
'ncdm_fluid_approximation': 3,
'use_ncdm_psd_files': 0,
# tolerances
'tol_ncdm_bg': 1e-3,
#'tol_ncdm': 1e-3,
'tol_perturb_integration': 1e-6,
'perturb_sampling_stepsize': 0.01,
'format': 'camb',
}
# Revert to CDM
if not np.isfinite(mTH):
params['N_ncdm'] = 0
params['Omega_ncdm'] = 0
params['Omega_cdm'] = Om - Ob
ncdm_pars = ['tol_ncdm_bg', 'Omega_ncdm', 'use_ncdm_psd_files',
'm_ncdm', 'T_ncdm']
for par in ncdm_pars:
del params[par]
classinst = Class()
classinst.set(params)
classinst.compute()
k_bin = np.logspace(-5, 2.5, 200)
pk_bin = np.array([classinst.pk_lin(k_bin[i],0) for i in range(len(k_bin))])
return k_bin / h0, pk_bin * h0**3
def hmf_wrapper(**kwargs):
"""
Compute the HMF from some kwargs. In this case, just truncate below
some mass threshold and return modified HMF.
In practice this will be a wrapper around some other function. We just
need to rename parameters so ARES understands them, hence the
`hmf_extra_par0` parameter below, which is serving as our mass cutoff.
"""
assert 'hmf_extra_par0' in kwargs
# Use Aurel's code to compute dndm
par = gmf.par()
# 0 = filename supplied directly
# 1 = WDM mass supplied, PS generated on-the-fly
par0 = kwargs['hmf_extra_par0']
par1 = kwargs['hmf_extra_par1']
if type(par0) is str:
# Interpolate from a grid
if par0.endswith('hdf5'):
f = h5py.File(par0, 'r')
zvals = np.array(f[('tab_z')])
mxvals = np.array(f[('m_x')])
dndm_all = np.array(f[('tab_dndm')])
f.close()
# Linearly interpolate between m_x values
i1 = np.argmin(np.abs(mxvals - par1))
if mxvals[i1] > par1:
i1 -= 1
i2 = i1 + 1
if mxvals[i1] == par1:
return dndm_all[i1]
dndm_lo = dndm_all[i1]
dndm_hi = dndm_all[i2]
func = interp1d(mxvals[i1:i1+2], np.array([dndm_lo, dndm_hi]),
axis=0)
return func(par1)
else:
fn = par0
else:
# Will create this file then read it in
fn = 'wdm_model.txt'
# Generate matter PS using CLASS, save to disk so gmf can read it in.
k, ps = get_ps(**kwargs)
np.savetxt(fn, np.array([k, ps]).T)
par.file.psfct = fn
par.code.window = kwargs['hmf_window']
par.code.Nrbin = 100
par.code.rmin = 0.002
par.code.rmax = 25.
par.cosmo.zmin = zmin
par.cosmo.zmax = zmax
par.cosmo.Nz = Nz
#par.rhoc =
par.Om = kwargs['omega_m_0']
par.Ob = kwargs['omega_b_0']
par.s8 = kwargs['sigma_8']
par.h0 = kwargs['hubble_0']
par.ns = kwargs['primordial_index']
par.mf.q = 0.707 if par.code.window == 'tophat' else 1
h = par.cosmo.h0
m, _dndlnm = gmf.dndlnm(par)
dndlnm = np.array(_dndlnm)
dndm = dndlnm / m
# Need to interpolate back to ARES mass range.
new_dndm = np.zeros_like(tab_dndm)
for i, z in enumerate(tab_z):
new_dndm[i,:] = np.interp(np.log10(tab_M), np.log10(m / h),
np.log10(dndm[i,:] * h**4), left=-np.inf, right=-np.inf)
return 10**new_dndm
popIII_uv = ParameterBundle('pop:fcoll') \
+ ParameterBundle('sed:uv')
popIII_uv['pop_Tmin'] = 500. # Restrict PopIII to molecular cooling halos
popIII_uv['pop_Tmax'] = 1e4 # Restrict PopIII to molecular cooling halos
popIII_uv['pop_fstar'] = 1e-4 # Will be free parameter
popIII_uv.num = 2 # Population #2
# Add PopIII X-ray emission. Assume "MCD" spectrum and evolve the X-ray
# background properly. Attach ID number 3.
popIII_xr = ParameterBundle('pop:fcoll') \
+ ParameterBundle('sed:mcd') \
+ ParameterBundle('physics:xrb')
popIII_xr['pop_sfr_model'] = 'link:sfrd:2'
popIII_xr.num = 3
popIII_basic = popIII_uv + popIII_xr
| [
"genmassfct.dndlnm",
"numpy.zeros_like",
"h5py.File",
"numpy.abs",
"numpy.logspace",
"genmassfct.par",
"ares.util.ParameterBundle",
"numpy.isfinite",
"ares.populations.GalaxyPopulation",
"numpy.array",
"classy.Class",
"numpy.log10",
"sys.exit"
] | [((368, 386), 'ares.populations.GalaxyPopulation', 'GalaxyPopulation', ([], {}), '()\n', (384, 386), False, 'from ares.populations import GalaxyPopulation\n'), ((2070, 2077), 'classy.Class', 'Class', ([], {}), '()\n', (2075, 2077), False, 'from classy import Class\n'), ((2149, 2174), 'numpy.logspace', 'np.logspace', (['(-5)', '(2.5)', '(200)'], {}), '(-5, 2.5, 200)\n', (2160, 2174), True, 'import numpy as np\n'), ((2783, 2792), 'genmassfct.par', 'gmf.par', ([], {}), '()\n', (2790, 2792), True, 'import genmassfct as gmf\n'), ((4673, 4688), 'genmassfct.dndlnm', 'gmf.dndlnm', (['par'], {}), '(par)\n', (4683, 4688), True, 'import genmassfct as gmf\n'), ((4702, 4719), 'numpy.array', 'np.array', (['_dndlnm'], {}), '(_dndlnm)\n', (4710, 4719), True, 'import numpy as np\n'), ((4817, 4840), 'numpy.zeros_like', 'np.zeros_like', (['tab_dndm'], {}), '(tab_dndm)\n', (4830, 4840), True, 'import numpy as np\n'), ((5068, 5096), 'ares.util.ParameterBundle', 'ParameterBundle', (['"""pop:fcoll"""'], {}), "('pop:fcoll')\n", (5083, 5096), False, 'from ares.util import ParameterBundle\n'), ((5111, 5136), 'ares.util.ParameterBundle', 'ParameterBundle', (['"""sed:uv"""'], {}), "('sed:uv')\n", (5126, 5136), False, 'from ares.util import ParameterBundle\n'), ((5612, 5642), 'ares.util.ParameterBundle', 'ParameterBundle', (['"""physics:xrb"""'], {}), "('physics:xrb')\n", (5627, 5642), False, 'from ares.util import ParameterBundle\n'), ((292, 302), 'sys.exit', 'sys.exit', ([], {}), '()\n', (300, 302), False, 'import sys\n'), ((1772, 1788), 'numpy.isfinite', 'np.isfinite', (['mTH'], {}), '(mTH)\n', (1783, 1788), True, 'import numpy as np\n'), ((5528, 5556), 'ares.util.ParameterBundle', 'ParameterBundle', (['"""pop:fcoll"""'], {}), "('pop:fcoll')\n", (5543, 5556), False, 'from ares.util import ParameterBundle\n'), ((5571, 5597), 'ares.util.ParameterBundle', 'ParameterBundle', (['"""sed:mcd"""'], {}), "('sed:mcd')\n", (5586, 5597), False, 'from ares.util import ParameterBundle\n'), ((3083, 3103), 'h5py.File', 'h5py.File', (['par0', '"""r"""'], {}), "(par0, 'r')\n", (3092, 3103), False, 'import h5py\n'), ((3124, 3144), 'numpy.array', 'np.array', (["f['tab_z']"], {}), "(f['tab_z'])\n", (3132, 3144), True, 'import numpy as np\n'), ((3168, 3186), 'numpy.array', 'np.array', (["f['m_x']"], {}), "(f['m_x'])\n", (3176, 3186), True, 'import numpy as np\n'), ((3212, 3235), 'numpy.array', 'np.array', (["f['tab_dndm']"], {}), "(f['tab_dndm'])\n", (3220, 3235), True, 'import numpy as np\n'), ((4072, 4089), 'numpy.array', 'np.array', (['[k, ps]'], {}), '([k, ps])\n', (4080, 4089), True, 'import numpy as np\n'), ((4909, 4924), 'numpy.log10', 'np.log10', (['tab_M'], {}), '(tab_M)\n', (4917, 4924), True, 'import numpy as np\n'), ((4926, 4941), 'numpy.log10', 'np.log10', (['(m / h)'], {}), '(m / h)\n', (4934, 4941), True, 'import numpy as np\n'), ((4956, 4985), 'numpy.log10', 'np.log10', (['(dndm[i, :] * h ** 4)'], {}), '(dndm[i, :] * h ** 4)\n', (4964, 4985), True, 'import numpy as np\n'), ((3354, 3375), 'numpy.abs', 'np.abs', (['(mxvals - par1)'], {}), '(mxvals - par1)\n', (3360, 3375), True, 'import numpy as np\n'), ((3708, 3736), 'numpy.array', 'np.array', (['[dndm_lo, dndm_hi]'], {}), '([dndm_lo, dndm_hi])\n', (3716, 3736), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.preprocessing import RobustScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from . import (
info_message,
warning_message,
debug_message
)
# from mlbmi import SentinelAOI
def kmeans_spatial_cluster(
image, n_clusters=5, quantile_range=(1, 99),
verbose=False, verbose_plot=False,
scene_id=None, res=None, date=None):
"""Compute kmeans clustering spatially over image as grey scale levels
Args:
image (np.array): input greayscale image
n_clusters (int, optional): number of grey scales. Defaults to 5.
quantile_range (tuple, optional): RobustScaler outlier rejecton
threshold. Defaults to (1, 99).
Returns:
sklearn.cluster._kmeans.KMeans: trained kmeans clustering object
"""
# Scale the input image data after reformatting 2D image as 1D vector and
# rejecting the outliers using the RobustScaler algorithm
sclr = RobustScaler(quantile_range=quantile_range)
pixel_scaled = sclr.fit_transform(image.reshape(-1, 1))
# Configure kmeans clustering instance
kmeans = KMeans(
n_clusters=n_clusters,
n_init=10,
max_iter=300,
tol=0.0001,
verbose=0,
random_state=None,
copy_x=True,
algorithm='auto',
)
# Compute the K-Means clusters and store in object
kmeans.fit(pixel_scaled)
if verbose_plot:
# Show BMI cluster images
sanity_check_spatial_kmeans(
kmeans, image, quantile_range=quantile_range,
scene_id=scene_id, res=res, date=date
)
return kmeans
def kmeans_temporal_cluster(
image_stack, n_clusters=5, quantile_range=(1, 99),
verbose=False, verbose_plot=False,
scene_id=None, res=None):
"""Compute kmeans clustering spatially over image as grey scale levels
Args:
image (np.array): input greayscale image
n_clusters (int, optional): number of grey scales. Defaults to 5.
quantile_range (tuple, optional): RobustScaler outlier rejecton
threshold. Defaults to (1, 99).
Returns:
sklearn.cluster._kmeans.KMeans: trained kmeans clustering object
"""
# Preprocess image data into a sequence of (nonzero) pixels over time
samples_ = image_stack.reshape(image_stack.shape[0], -1).T
where_zero = samples_.sum(axis=1) == 0
samples_ = samples_[~where_zero]
# Scale the input image data after reformatting 2D image as 1D vector and
# rejecting the outliers using the RobustScaler algorithm
sclr = RobustScaler(quantile_range=quantile_range)
samples_scaled = sclr.fit_transform(samples_)
# Configure kmeans clustering instance
kmeans = KMeans(
n_clusters=n_clusters,
n_init=10,
max_iter=300,
tol=0.0001,
verbose=0,
random_state=None,
copy_x=True,
algorithm='auto',
)
# Compute the K-Means clusters and store in object
kmeans.fit(samples_scaled)
if verbose_plot:
# Show BMI cluster images
sanity_check_temporal_kmeans(
kmeans, image_stack, quantile_range=quantile_range,
scene_id=scene_id, res=res
)
return kmeans
def sanity_check_spatial_kmeans(kmeans, image, quantile_range=(1, 99),
scene_id=None, res=None, date=None,
plot_now=False):
"""Plot imshow of clustering solution as sanity check
Args:
kmeans (sklearn.cluster._kmeans.kmeans): object storing kmeans solution
image (np.array): image with which kmeans was trains
quantile_range (tuple, optional): RobustScaler outlier rejecton
threshold. Defaults to (1, 99).
"""
# Preprocess image data
sclr = RobustScaler(quantile_range=quantile_range)
pixel_scaled = sclr.fit_transform(image.reshape(-1, 1))
# Predict each cluster value per pixel
cluster_pred = kmeans.predict(pixel_scaled)
base_fig_size = 5 # Each sub figure will be base_fig_size x base_fig_size
fig, axs = plt.subplots(
ncols=kmeans.n_clusters + 1,
figsize=(base_fig_size * (kmeans.n_clusters + 1), base_fig_size)
)
# Plot the entire cluster_pred image
axs[0].imshow(cluster_pred.reshape(image.shape), interpolation='None')
# Cycle through and plot each cluster_pred image per 'class'
for k in range(kmeans.n_clusters):
axs[k + 1].imshow(
(cluster_pred == k).reshape(image.shape),
interpolation='None'
)
# Remove all unnecessary markers from figure
[ax.grid(False) for ax in axs.ravel()] # remove grid for images
[ax.xaxis.set_ticks([]) for ax in axs.ravel()] # remove xticks
[ax.yaxis.set_ticks([]) for ax in axs.ravel()] # remove xticks
# Adjust figure to maximize use of gui box
plt.subplots_adjust(
left=0,
right=1,
bottom=0,
top=0.9,
wspace=1e-2
)
# Set title for entire figure
fig.suptitle(
f"Spatial K-Means Reconstruction: {scene_id} - {res} - {date}",
fontsize=20
)
if plot_now:
# User can override default behaviour and plot on-the-fly
plt.show()
def sanity_check_temporal_kmeans(
kmeans, image_stack, quantile_range=(1, 99),
scene_id=None, res=None, plot_now=False):
"""Plot imshow of clustering solution as sanity check
Args:
kmeans (sklearn.cluster._kmeans.kmeans): object storing kmeans solution
image_stack (np.array): image_stack with which kmeans was trains
quantile_range (tuple, optional): RobustScaler outlier rejecton
threshold. Defaults to (1, 99).
"""
samples_ = image_stack.reshape(image_stack.shape[0], -1).T
where_zero = samples_.sum(axis=1) == 0
samples_notzero = samples_[~where_zero]
# Scale the input image data after reformatting 2D image as 1D vector and
# rejecting the outliers using the RobustScaler algorithm
sclr = RobustScaler(quantile_range=quantile_range)
samples_scaled = sclr.fit_transform(samples_notzero)
# Predict each cluster value per pixel
cluster_pred = kmeans.predict(samples_scaled)
# Embedd above image in a zero array to re-constitute zeros
# for the out of mask shape
cluster_image = np.zeros(samples_.shape[0])
# Add one to each Class to represent the "out of mask" is class zero
cluster_image[~where_zero] = cluster_pred + 1
# Reshape 1D array into 2D image of the original image shape
img_shape = image_stack.shape[1:]
cluster_image = cluster_image.reshape(img_shape)
base_fig_size = 5 # Each sub figure will be base_fig_size x base_fig_size
fig, axs = plt.subplots(
ncols=kmeans.n_clusters + 2,
figsize=(base_fig_size * (kmeans.n_clusters + 1), base_fig_size)
)
# Plot the entire cluster_pred image
axs[0].imshow(cluster_image, interpolation='None')
# Plot the pixels outside the mask, which were not clustered
axs[1].imshow(cluster_image == 0, interpolation='None')
# Cycle through and plot each cluster_pred image per 'class'
for k in range(kmeans.n_clusters):
axs[k + 2].imshow((cluster_image == (k + 1)), interpolation='None')
# Remove all unnecessary markers from figure
[ax.grid(False) for ax in axs.ravel()] # remove grid for images
[ax.xaxis.set_ticks([]) for ax in axs.ravel()] # remove xticks
[ax.yaxis.set_ticks([]) for ax in axs.ravel()] # remove xticks
# Adjust figure to maximize use of gui box
plt.subplots_adjust(
left=0,
right=1,
bottom=0,
top=0.9,
wspace=1e-2
)
# Set title for entire figure
fig.suptitle(
f"Temporal K-Means Reconstruction: {scene_id} - {res}",
fontsize=20
)
if plot_now:
# User can override default behaviour and plot on-the-fly
plt.show()
| [
"matplotlib.pyplot.show",
"sklearn.preprocessing.RobustScaler",
"sklearn.cluster.KMeans",
"numpy.zeros",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots"
] | [((1098, 1141), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': 'quantile_range'}), '(quantile_range=quantile_range)\n', (1110, 1141), False, 'from sklearn.preprocessing import RobustScaler, MinMaxScaler\n'), ((1259, 1391), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'n_init': '(10)', 'max_iter': '(300)', 'tol': '(0.0001)', 'verbose': '(0)', 'random_state': 'None', 'copy_x': '(True)', 'algorithm': '"""auto"""'}), "(n_clusters=n_clusters, n_init=10, max_iter=300, tol=0.0001, verbose=\n 0, random_state=None, copy_x=True, algorithm='auto')\n", (1265, 1391), False, 'from sklearn.cluster import KMeans, MiniBatchKMeans\n'), ((2732, 2775), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': 'quantile_range'}), '(quantile_range=quantile_range)\n', (2744, 2775), False, 'from sklearn.preprocessing import RobustScaler, MinMaxScaler\n'), ((2883, 3015), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'n_init': '(10)', 'max_iter': '(300)', 'tol': '(0.0001)', 'verbose': '(0)', 'random_state': 'None', 'copy_x': '(True)', 'algorithm': '"""auto"""'}), "(n_clusters=n_clusters, n_init=10, max_iter=300, tol=0.0001, verbose=\n 0, random_state=None, copy_x=True, algorithm='auto')\n", (2889, 3015), False, 'from sklearn.cluster import KMeans, MiniBatchKMeans\n'), ((3960, 4003), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': 'quantile_range'}), '(quantile_range=quantile_range)\n', (3972, 4003), False, 'from sklearn.preprocessing import RobustScaler, MinMaxScaler\n'), ((4251, 4363), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(kmeans.n_clusters + 1)', 'figsize': '(base_fig_size * (kmeans.n_clusters + 1), base_fig_size)'}), '(ncols=kmeans.n_clusters + 1, figsize=(base_fig_size * (kmeans.\n n_clusters + 1), base_fig_size))\n', (4263, 4363), True, 'from matplotlib import pyplot as plt\n'), ((5034, 5102), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'bottom': '(0)', 'top': '(0.9)', 'wspace': '(0.01)'}), '(left=0, right=1, bottom=0, top=0.9, wspace=0.01)\n', (5053, 5102), True, 'from matplotlib import pyplot as plt\n'), ((6192, 6235), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': 'quantile_range'}), '(quantile_range=quantile_range)\n', (6204, 6235), False, 'from sklearn.preprocessing import RobustScaler, MinMaxScaler\n'), ((6506, 6533), 'numpy.zeros', 'np.zeros', (['samples_.shape[0]'], {}), '(samples_.shape[0])\n', (6514, 6533), True, 'import numpy as np\n'), ((6910, 7022), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(kmeans.n_clusters + 2)', 'figsize': '(base_fig_size * (kmeans.n_clusters + 1), base_fig_size)'}), '(ncols=kmeans.n_clusters + 2, figsize=(base_fig_size * (kmeans.\n n_clusters + 1), base_fig_size))\n', (6922, 7022), True, 'from matplotlib import pyplot as plt\n'), ((7751, 7819), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'bottom': '(0)', 'top': '(0.9)', 'wspace': '(0.01)'}), '(left=0, right=1, bottom=0, top=0.9, wspace=0.01)\n', (7770, 7819), True, 'from matplotlib import pyplot as plt\n'), ((5392, 5402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5400, 5402), True, 'from matplotlib import pyplot as plt\n'), ((8101, 8111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8109, 8111), True, 'from matplotlib import pyplot as plt\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from fpdf import FPDF
import sys
import io
def main():
df = pd.read_csv('reviews.csv')
products = list(df.Item.value_counts().keys())
def cloudmaker(df,product):
comment_words = ''
stopwords = set(STOPWORDS)
for val in df['Review']:
val = str(val)
# split the value
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens)+" "
wordcloud = WordCloud(width = 1000, height = 1000,max_words= 100,
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
plt.figure(figsize = (15, 15), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.title(f'{product}')
plt.savefig(f'{product}_cloud'+'.png')
def review_pro(df,product):
rates = df['Rating'].value_counts()
plt.figure(figsize=(10,10))
plt.bar(rates.index,rates.values,width = 0.3)
plt.title(f'Rating from users for {product}')
plt.ylabel('Number of users', fontsize=12)
plt.xlabel('Rating', fontsize=12)
for i, rate in enumerate(list(rates.values)):
plt.text( rates.index[i] - 0.10, rates.values[i]+ 5, str(rate), color='blue')
plt.savefig(f"{product}_review.png")
pdf = FPDF()
pdf.add_page()
pdf.set_font("Times", size = 25)
pdf.cell(200, 10, txt = '*'*40,ln = 1, align = 'C')
pdf.cell(200, 10, txt = '"Summary Report"',ln = 1, align = 'C')
pdf.cell(200, 10, txt = '*'*40,ln = 1, align = 'C')
pdf.set_font("Arial", size = 15)
for product in products:
sub_df = df[df['Item']== product]
name = product.split()[:3]
name = "_".join(name)
mark = '='*50
pdf.cell(200, 10, txt = mark,ln = 1, align = 'C')
product = f'Product Name: {name}'
pdf.cell(200, 10, txt = product,ln = 1, align = 'C')
review = f'Number of Reviews: {sub_df.shape[0]}'
pdf.cell(200, 10, txt = review,ln = 1, align = 'C')
price = sub_df['Price'][:1].values[0]
p = f'Price of {name} Rs.: {price}'
pdf.cell(200, 10, txt = p,ln = 1, align = 'C')
rating = f'Average Rating :' + str(round(np.mean(sub_df['Rating']),2))
pdf.cell(200, 10, txt = rating,ln = 1, align = 'C')
review_pro(sub_df,name)
pdf.image(f'{name}_review.png',w= 190,h = 190, x = 0)
cloudmaker(sub_df,name)
pdf.image(f'{name}_cloud.png',w= 190,h = 190)
mark = '='*50
pdf.cell(200, 10, txt = mark,ln = 1, align = 'C')
pdf.output("Summary_report.pdf")
main()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"fpdf.FPDF",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.bar",
"wordcloud.WordCloud",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
... | [((181, 207), 'pandas.read_csv', 'pd.read_csv', (['"""reviews.csv"""'], {}), "('reviews.csv')\n", (192, 207), True, 'import pandas as pd\n'), ((1611, 1617), 'fpdf.FPDF', 'FPDF', ([], {}), '()\n', (1615, 1617), False, 'from fpdf import FPDF\n'), ((862, 906), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'facecolor': 'None'}), '(figsize=(15, 15), facecolor=None)\n', (872, 906), True, 'import matplotlib.pyplot as plt\n'), ((920, 941), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {}), '(wordcloud)\n', (930, 941), True, 'import matplotlib.pyplot as plt\n'), ((951, 966), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (959, 966), True, 'import matplotlib.pyplot as plt\n'), ((976, 999), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (992, 999), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1034), 'matplotlib.pyplot.title', 'plt.title', (['f"""{product}"""'], {}), "(f'{product}')\n", (1020, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1083), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f'{product}_cloud' + '.png')"], {}), "(f'{product}_cloud' + '.png')\n", (1054, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1195), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1177, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1203, 1248), 'matplotlib.pyplot.bar', 'plt.bar', (['rates.index', 'rates.values'], {'width': '(0.3)'}), '(rates.index, rates.values, width=0.3)\n', (1210, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1302), 'matplotlib.pyplot.title', 'plt.title', (['f"""Rating from users for {product}"""'], {}), "(f'Rating from users for {product}')\n", (1266, 1302), True, 'import matplotlib.pyplot as plt\n'), ((1311, 1353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of users"""'], {'fontsize': '(12)'}), "('Number of users', fontsize=12)\n", (1321, 1353), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1395), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {'fontsize': '(12)'}), "('Rating', fontsize=12)\n", (1372, 1395), True, 'import matplotlib.pyplot as plt\n'), ((1561, 1597), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{product}_review.png"""'], {}), "(f'{product}_review.png')\n", (1572, 1597), True, 'import matplotlib.pyplot as plt\n'), ((691, 783), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(1000)', 'height': '(1000)', 'max_words': '(100)', 'stopwords': 'stopwords', 'min_font_size': '(10)'}), '(width=1000, height=1000, max_words=100, stopwords=stopwords,\n min_font_size=10)\n', (700, 783), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((2522, 2547), 'numpy.mean', 'np.mean', (["sub_df['Rating']"], {}), "(sub_df['Rating'])\n", (2529, 2547), True, 'import numpy as np\n')] |
# Copyright (c) 2020, MeteoSwiss
# Authors: <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
DAP debug server
================
A simple flask web service application for debugging the protocol.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import urllib
import numpy as np
from flask import Flask, Response, jsonify, request
import opendap_protocol as dap
app = Flask(__name__)
class BaseDataset(object):
def dds(self, constraint=''):
return self.dataset.dds(constraint=constraint)
def das(self, constraint=''):
return self.dataset.das(constraint=constraint)
def dods(self, constraint=''):
return self.dataset.dods(constraint=constraint)
@classmethod
def subclasses(cls):
return dict([(sc.__name__, sc) for sc in cls.__subclasses__()])
class Test2DGrid(BaseDataset):
@property
def dataset(self):
dataset = dap.Dataset(name='test')
x = dap.Array(name='x', data=np.array([0, 1]), dtype=dap.Int16)
y = dap.Array(name='y', data=np.array([10, 11]), dtype=dap.Int16)
p = dap.Grid(
name='p',
data=np.array([[0, 0], [0, 0]]),
dtype=dap.Int32,
dimensions=[x, y])
p_attr = [
dap.Attribute(name='units', value='second', dtype=dap.String),
dap.Attribute(name='size', value=4, dtype=dap.Float64),
]
p.append(*p_attr)
dataset.append(x, y, p)
return dataset
def _dods(self, constraint=''):
if constraint == 'x,y,p.p':
logging.debug(
'2D Grid: Returning fake DODS response (which is known to work).'
)
return b'Dataset {\n Int16 x[x = 2];\n Int16 y[y = 2];\n Grid {\n Array:\n Int32 p[x = 2][y = 2];\n Maps:\n Int16 x[x = 2];\n Int16 y[y = 2];\n } p;\n} test;\n\nData:\r\n\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\n\x00\x00\x00\x0b\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\n\x00\x00\x00\x0b'
else:
logging.debug('2D Grid: Returning normal response.')
return self.dataset.dods(constraint=constraint)
class Test3DGrid(BaseDataset):
@property
def dataset(self):
dataset = dap.Dataset(name='test')
x = dap.Array(name='x', data=np.array([0, 1]), dtype=dap.Int16)
y = dap.Array(name='y', data=np.array([10, 11]), dtype=dap.Int16)
z = dap.Array(name='z', data=np.array([20, 21]), dtype=dap.Int16)
p = dap.Grid(
name='p',
data=np.array([[[0, 0], [0, 0]], [[1, 1], [1, 1]]]),
dtype=dap.Int32,
dimensions=[x, y, z])
p_attr = [
dap.Attribute(name='units', value='second', dtype=dap.String),
dap.Attribute(name='size', value=8, dtype=dap.Float64),
]
p.append(*p_attr)
dataset.append(x, y, z, p)
return dataset
@app.route('/', methods=['GET'])
def index():
return jsonify([k for k, v in BaseDataset.subclasses().items()])
@app.route('/<testcase>.dds', methods=['GET'])
def dds(testcase):
constraint = urllib.parse.urlsplit(request.url)[3]
return Response(
BaseDataset.subclasses()[testcase]().dds(constraint=constraint),
mimetype='text/plain')
@app.route('/<testcase>.das')
def das(testcase):
constraint = urllib.parse.urlsplit(request.url)[3]
return Response(
BaseDataset.subclasses()[testcase]().das(constraint=constraint),
mimetype='text/plain')
@app.route('/<testcase>.dods')
def dods(testcase):
constraint = urllib.parse.urlsplit(request.url)[3]
return Response(
BaseDataset.subclasses()[testcase]().dods(constraint=constraint),
mimetype='application/octet-stream')
if __name__ == '__main__':
app.run(port=32111, debug=True)
| [
"opendap_protocol.Attribute",
"logging.debug",
"logging.basicConfig",
"flask.Flask",
"opendap_protocol.Dataset",
"numpy.array",
"urllib.parse.urlsplit"
] | [((1659, 1699), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1678, 1699), False, 'import logging\n'), ((1826, 1841), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1831, 1841), False, 'from flask import Flask, Response, jsonify, request\n'), ((2345, 2369), 'opendap_protocol.Dataset', 'dap.Dataset', ([], {'name': '"""test"""'}), "(name='test')\n", (2356, 2369), True, 'import opendap_protocol as dap\n'), ((3922, 3946), 'opendap_protocol.Dataset', 'dap.Dataset', ([], {'name': '"""test"""'}), "(name='test')\n", (3933, 3946), True, 'import opendap_protocol as dap\n'), ((4802, 4836), 'urllib.parse.urlsplit', 'urllib.parse.urlsplit', (['request.url'], {}), '(request.url)\n', (4823, 4836), False, 'import urllib\n'), ((5033, 5067), 'urllib.parse.urlsplit', 'urllib.parse.urlsplit', (['request.url'], {}), '(request.url)\n', (5054, 5067), False, 'import urllib\n'), ((5266, 5300), 'urllib.parse.urlsplit', 'urllib.parse.urlsplit', (['request.url'], {}), '(request.url)\n', (5287, 5300), False, 'import urllib\n'), ((2699, 2760), 'opendap_protocol.Attribute', 'dap.Attribute', ([], {'name': '"""units"""', 'value': '"""second"""', 'dtype': 'dap.String'}), "(name='units', value='second', dtype=dap.String)\n", (2712, 2760), True, 'import opendap_protocol as dap\n'), ((2774, 2828), 'opendap_protocol.Attribute', 'dap.Attribute', ([], {'name': '"""size"""', 'value': '(4)', 'dtype': 'dap.Float64'}), "(name='size', value=4, dtype=dap.Float64)\n", (2787, 2828), True, 'import opendap_protocol as dap\n'), ((3008, 3093), 'logging.debug', 'logging.debug', (['"""2D Grid: Returning fake DODS response (which is known to work)."""'], {}), "('2D Grid: Returning fake DODS response (which is known to work).'\n )\n", (3021, 3093), False, 'import logging\n'), ((3721, 3773), 'logging.debug', 'logging.debug', (['"""2D Grid: Returning normal response."""'], {}), "('2D Grid: Returning normal response.')\n", (3734, 3773), False, 'import logging\n'), ((4373, 4434), 'opendap_protocol.Attribute', 'dap.Attribute', ([], {'name': '"""units"""', 'value': '"""second"""', 'dtype': 'dap.String'}), "(name='units', value='second', dtype=dap.String)\n", (4386, 4434), True, 'import opendap_protocol as dap\n'), ((4448, 4502), 'opendap_protocol.Attribute', 'dap.Attribute', ([], {'name': '"""size"""', 'value': '(8)', 'dtype': 'dap.Float64'}), "(name='size', value=8, dtype=dap.Float64)\n", (4461, 4502), True, 'import opendap_protocol as dap\n'), ((2408, 2424), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2416, 2424), True, 'import numpy as np\n'), ((2480, 2498), 'numpy.array', 'np.array', (['[10, 11]'], {}), '([10, 11])\n', (2488, 2498), True, 'import numpy as np\n'), ((2579, 2605), 'numpy.array', 'np.array', (['[[0, 0], [0, 0]]'], {}), '([[0, 0], [0, 0]])\n', (2587, 2605), True, 'import numpy as np\n'), ((3985, 4001), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3993, 4001), True, 'import numpy as np\n'), ((4057, 4075), 'numpy.array', 'np.array', (['[10, 11]'], {}), '([10, 11])\n', (4065, 4075), True, 'import numpy as np\n'), ((4131, 4149), 'numpy.array', 'np.array', (['[20, 21]'], {}), '([20, 21])\n', (4139, 4149), True, 'import numpy as np\n'), ((4230, 4276), 'numpy.array', 'np.array', (['[[[0, 0], [0, 0]], [[1, 1], [1, 1]]]'], {}), '([[[0, 0], [0, 0]], [[1, 1], [1, 1]]])\n', (4238, 4276), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import pynbody as pn
from tqdm import tqdm
import h5py
from . import constants as c
def weight(snapshot, qty, weight_type="volume"):
"""
Weights the quantity 'qty' in the simulation.
Parameters
----------
snapshot : pynbody.snapshot
The snapshot that will have its quantity weighted.
qty : pynbody.array.SimArray
The quantity to calculated the weighted. This is usually specified by
using something similar to: s.g['rho']
weight_type : {'mass', 'volume', None}, optional
Which type of weighting to perform on the quantity. At the moment
mass weighting and no weighting are the same. This is corrent for
simulations where all the particles are the same mass.
Default: 'volume'
Returns
-------
weighted_qty : pynbody.array.SimArray
The weighted quantity
"""
if weight_type == "mass" or weight_type is None:
pmass = snapshot.g["Mass"].in_units("m_p")
total_mass = np.sum(pmass)
weighted_quant = np.sum(qty * pmass / total_mass)
elif weight_type == "volume":
pmass = snapshot.g["Mass"].in_units("m_p")
pvol = pmass / snapshot.g["Density"].in_units("m_p cm**-3")
total_vol = np.sum(pvol)
weighted_quant = np.sum(qty * pvol / total_vol)
return weighted_qty
def ion_mean(snapshot_list, ion="HI", weighting=None, verbose=False, **kwargs):
"""
Calculated the weighted mean fraction as a function of redshift.
Parameters
----------
snaplist : list of strings
A list containing the paths for each snapshot.
ion : string, optional
The ion to calculate the weighted mean abundance.
Options: HI, HeI, HeII (Default: HI)
weighting : {'mass', 'volume', None}, optional
The weighting scheme of the particles. Default: None
verbose : boolean, optional
If True, print progress information.
(Default: False)
Returns:
--------
redshift : numpy.darray
A numpy array containing the redshifts of each snapshot.
weighted_mean : numpy.darray
The mean ion fraction at each of the redshifts
"""
weighted_mean = []
redshift = []
for snap in tqdm(snaplist, desc=ion, disable=not verbose):
snap_suffix = snap.split("_")[-1]
snap_file = "{0}/snap_{1}".format(snap, snap_suffix)
s = pn.load(snap_file)
apion = "ap{0}".format(ion)
weighted_mean.append(weight(s, s.g[apion], weight_type=weighting))
redshift.append(s.properties["Redshift"])
return np.array(redshift), np.array(weighted_mean)
def calc_DM(ray):
data = h5py.File(ray, "r")
dl = np.array(data["grid"]["dl"]) * c.CM_TO_PC
# 1 electron from H II and He II and 2 electrons from He III
ne = (np.array(data["grid"]["H_p1_number_density"]) +
np.array(data["grid"]["He_p1_number_density"]) +
2 * np.array(data["grid"]["He_p2_number_density"]))
DM = np.sum(ne * dl)
return DM
| [
"tqdm.tqdm",
"h5py.File",
"numpy.sum",
"pynbody.load",
"numpy.array"
] | [((2332, 2377), 'tqdm.tqdm', 'tqdm', (['snaplist'], {'desc': 'ion', 'disable': '(not verbose)'}), '(snaplist, desc=ion, disable=not verbose)\n', (2336, 2377), False, 'from tqdm import tqdm\n'), ((2761, 2780), 'h5py.File', 'h5py.File', (['ray', '"""r"""'], {}), "(ray, 'r')\n", (2770, 2780), False, 'import h5py\n'), ((3088, 3103), 'numpy.sum', 'np.sum', (['(ne * dl)'], {}), '(ne * dl)\n', (3094, 3103), True, 'import numpy as np\n'), ((1087, 1100), 'numpy.sum', 'np.sum', (['pmass'], {}), '(pmass)\n', (1093, 1100), True, 'import numpy as np\n'), ((1126, 1158), 'numpy.sum', 'np.sum', (['(qty * pmass / total_mass)'], {}), '(qty * pmass / total_mass)\n', (1132, 1158), True, 'import numpy as np\n'), ((2494, 2512), 'pynbody.load', 'pn.load', (['snap_file'], {}), '(snap_file)\n', (2501, 2512), True, 'import pynbody as pn\n'), ((2686, 2704), 'numpy.array', 'np.array', (['redshift'], {}), '(redshift)\n', (2694, 2704), True, 'import numpy as np\n'), ((2706, 2729), 'numpy.array', 'np.array', (['weighted_mean'], {}), '(weighted_mean)\n', (2714, 2729), True, 'import numpy as np\n'), ((2790, 2818), 'numpy.array', 'np.array', (["data['grid']['dl']"], {}), "(data['grid']['dl'])\n", (2798, 2818), True, 'import numpy as np\n'), ((1333, 1345), 'numpy.sum', 'np.sum', (['pvol'], {}), '(pvol)\n', (1339, 1345), True, 'import numpy as np\n'), ((1371, 1401), 'numpy.sum', 'np.sum', (['(qty * pvol / total_vol)'], {}), '(qty * pvol / total_vol)\n', (1377, 1401), True, 'import numpy as np\n'), ((2909, 2954), 'numpy.array', 'np.array', (["data['grid']['H_p1_number_density']"], {}), "(data['grid']['H_p1_number_density'])\n", (2917, 2954), True, 'import numpy as np\n'), ((2967, 3013), 'numpy.array', 'np.array', (["data['grid']['He_p1_number_density']"], {}), "(data['grid']['He_p1_number_density'])\n", (2975, 3013), True, 'import numpy as np\n'), ((3030, 3076), 'numpy.array', 'np.array', (["data['grid']['He_p2_number_density']"], {}), "(data['grid']['He_p2_number_density'])\n", (3038, 3076), True, 'import numpy as np\n')] |
"""Test nimare.dataset (Dataset IO/transformations)."""
import copy
import json
import os.path as op
import warnings
import nibabel as nib
import numpy as np
import pytest
import nimare
from nimare import dataset
from nimare.tests.utils import get_test_data_path
def test_dataset_smoke():
"""Smoke test for nimare.dataset.Dataset initialization and get methods."""
db_file = op.join(get_test_data_path(), "neurosynth_dset.json")
dset = dataset.Dataset(db_file)
dset.update_path(get_test_data_path())
assert isinstance(dset, nimare.dataset.Dataset)
# Test that Dataset.masker is portable
assert not nib.is_proxy(dset.masker.mask_img_.dataobj)
methods = [dset.get_images, dset.get_labels, dset.get_metadata, dset.get_texts]
for method in methods:
assert isinstance(method(), list)
assert isinstance(method(ids=dset.ids[:5]), list)
assert isinstance(method(ids=dset.ids[0]), list)
assert isinstance(dset.get_images(imtype="beta"), list)
assert isinstance(dset.get_metadata(field="sample_sizes"), list)
assert isinstance(dset.get_studies_by_label("cogat_cognitive_control"), list)
assert isinstance(dset.get_studies_by_coordinate(np.array([[20, 20, 20]])), list)
# If label is not available, raise ValueError
with pytest.raises(ValueError):
dset.get_studies_by_label("dog")
mask_data = np.zeros(dset.masker.mask_img.shape, int)
mask_data[40, 40, 40] = 1
mask_img = nib.Nifti1Image(mask_data, dset.masker.mask_img.affine)
assert isinstance(dset.get_studies_by_mask(mask_img), list)
dset1 = dset.slice(dset.ids[:5])
dset2 = dset.slice(dset.ids[5:])
assert isinstance(dset1, dataset.Dataset)
dset_merged = dset1.merge(dset2)
assert isinstance(dset_merged, dataset.Dataset)
def test_empty_dset():
"""Smoke test for initialization with an empty Dataset."""
# dictionary with no information
minimal_dict = {"study-0": {"contrasts": {"1": {}}}}
dataset.Dataset(minimal_dict)
def test_posneg_warning():
"""Smoke test for nimare.dataset.Dataset initialization with positive and negative z_stat."""
db_file = op.join(get_test_data_path(), "neurosynth_dset.json")
with open(db_file, "r") as f_obj:
data = json.load(f_obj)
data_pos_zstats = copy.deepcopy(data)
data_neg_zstats = copy.deepcopy(data)
for pid in data.keys():
for expid in data[pid]["contrasts"].keys():
exp = data[pid]["contrasts"][expid]
if "coords" not in exp.keys():
continue
if "z_stat" not in exp["coords"].keys():
continue
n_zstats = len(exp["coords"]["z_stat"])
rand_arr = np.random.randn(n_zstats)
rand_pos_arr = np.abs(rand_arr)
rand_neg_arr = np.abs(rand_arr) * -1
data[pid]["contrasts"][expid]["coords"]["z_stat"] = rand_arr.tolist()
data_neg_zstats[pid]["contrasts"][expid]["coords"]["z_stat"] = rand_neg_arr.tolist()
data_pos_zstats[pid]["contrasts"][expid]["coords"]["z_stat"] = rand_pos_arr.tolist()
# Test Warning is raised if there are positive and negative z-stat
with pytest.warns(UserWarning, match=r"positive and negative z_stats"):
dset_posneg = dataset.Dataset(data)
# Test Warning is not raised if there are only positive or negative z-stat
with warnings.catch_warnings():
warnings.simplefilter("error")
dset_pos = dataset.Dataset(data_pos_zstats)
dset_neg = dataset.Dataset(data_neg_zstats)
assert isinstance(dset_posneg, nimare.dataset.Dataset)
assert isinstance(dset_pos, nimare.dataset.Dataset)
assert isinstance(dset_neg, nimare.dataset.Dataset)
| [
"nibabel.Nifti1Image",
"copy.deepcopy",
"json.load",
"numpy.abs",
"warnings.simplefilter",
"nimare.tests.utils.get_test_data_path",
"nibabel.is_proxy",
"pytest.warns",
"numpy.random.randn",
"numpy.zeros",
"pytest.raises",
"warnings.catch_warnings",
"numpy.array",
"nimare.dataset.Dataset"
] | [((452, 476), 'nimare.dataset.Dataset', 'dataset.Dataset', (['db_file'], {}), '(db_file)\n', (467, 476), False, 'from nimare import dataset\n'), ((1386, 1427), 'numpy.zeros', 'np.zeros', (['dset.masker.mask_img.shape', 'int'], {}), '(dset.masker.mask_img.shape, int)\n', (1394, 1427), True, 'import numpy as np\n'), ((1473, 1528), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['mask_data', 'dset.masker.mask_img.affine'], {}), '(mask_data, dset.masker.mask_img.affine)\n', (1488, 1528), True, 'import nibabel as nib\n'), ((1989, 2018), 'nimare.dataset.Dataset', 'dataset.Dataset', (['minimal_dict'], {}), '(minimal_dict)\n', (2004, 2018), False, 'from nimare import dataset\n'), ((2307, 2326), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2320, 2326), False, 'import copy\n'), ((2349, 2368), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2362, 2368), False, 'import copy\n'), ((395, 415), 'nimare.tests.utils.get_test_data_path', 'get_test_data_path', ([], {}), '()\n', (413, 415), False, 'from nimare.tests.utils import get_test_data_path\n'), ((498, 518), 'nimare.tests.utils.get_test_data_path', 'get_test_data_path', ([], {}), '()\n', (516, 518), False, 'from nimare.tests.utils import get_test_data_path\n'), ((630, 673), 'nibabel.is_proxy', 'nib.is_proxy', (['dset.masker.mask_img_.dataobj'], {}), '(dset.masker.mask_img_.dataobj)\n', (642, 673), True, 'import nibabel as nib\n'), ((1301, 1326), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1314, 1326), False, 'import pytest\n'), ((2168, 2188), 'nimare.tests.utils.get_test_data_path', 'get_test_data_path', ([], {}), '()\n', (2186, 2188), False, 'from nimare.tests.utils import get_test_data_path\n'), ((2267, 2283), 'json.load', 'json.load', (['f_obj'], {}), '(f_obj)\n', (2276, 2283), False, 'import json\n'), ((3198, 3262), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""positive and negative z_stats"""'}), "(UserWarning, match='positive and negative z_stats')\n", (3210, 3262), False, 'import pytest\n'), ((3287, 3308), 'nimare.dataset.Dataset', 'dataset.Dataset', (['data'], {}), '(data)\n', (3302, 3308), False, 'from nimare import dataset\n'), ((3398, 3423), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3421, 3423), False, 'import warnings\n'), ((3433, 3463), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (3454, 3463), False, 'import warnings\n'), ((3483, 3515), 'nimare.dataset.Dataset', 'dataset.Dataset', (['data_pos_zstats'], {}), '(data_pos_zstats)\n', (3498, 3515), False, 'from nimare import dataset\n'), ((3535, 3567), 'nimare.dataset.Dataset', 'dataset.Dataset', (['data_neg_zstats'], {}), '(data_neg_zstats)\n', (3550, 3567), False, 'from nimare import dataset\n'), ((1208, 1232), 'numpy.array', 'np.array', (['[[20, 20, 20]]'], {}), '([[20, 20, 20]])\n', (1216, 1232), True, 'import numpy as np\n'), ((2721, 2746), 'numpy.random.randn', 'np.random.randn', (['n_zstats'], {}), '(n_zstats)\n', (2736, 2746), True, 'import numpy as np\n'), ((2774, 2790), 'numpy.abs', 'np.abs', (['rand_arr'], {}), '(rand_arr)\n', (2780, 2790), True, 'import numpy as np\n'), ((2818, 2834), 'numpy.abs', 'np.abs', (['rand_arr'], {}), '(rand_arr)\n', (2824, 2834), True, 'import numpy as np\n')] |
############################TESTS ON POTENTIALS################################
from __future__ import print_function, division
import os
import sys
PY3= sys.version > '3'
import pytest
import numpy
from scipy import optimize
try:
import pynbody
_PYNBODY_LOADED= True
except ImportError:
_PYNBODY_LOADED= False
from galpy import potential
from galpy.util import coords
_TRAVIS= bool(os.getenv('TRAVIS'))
#Test whether the normalization of the potential works
def test_normalize_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'isNonAxi') and tp.isNonAxi:
continue # skip, bc vcirc not well defined
if not hasattr(tp,'normalize'): continue
tp.normalize(1.)
assert (tp.Rforce(1.,0.)+1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
tp.normalize(.5)
if hasattr(tp,'toPlanar'):
ptp= tp.toPlanar()
else:
ptp= tp
assert (ptp.Rforce(1.,0.)+.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (ptp.vcirc(1.)**2.-0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
# Also test SphericalShell and RingPotential's setup, bc not done elsewhere
tp= potential.SphericalShellPotential(normalize=1.)
assert (tp.Rforce(1.,0.)+1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
tp= potential.RingPotential(normalize=0.5)
assert (tp.Rforce(1.,0.)+0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
return None
#Test whether the derivative of the potential is minus the force
def test_forceAsDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('mockMovingObjectPotential')
pots.append('mockMovingObjectPotentialExplPlummer')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential')
pots.append('yRotatedTriaxialNFWPotential')
pots.append('fullyRotatedTriaxialNFWPotential')
pots.append('fullyRotatednoGLTriaxialNFWPotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -6. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['AnyAxisymmetricRazorThinDiskPotential']= -4.9
tol['mockInterpRZPotential']= -4.
tol['FerrersPotential']= -7.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#Radial force
for ii in range(len(Rs)):
for jj in range(len(Zs)):
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mpotderivR= (potential.evaluatelinearPotentials(tp,Rs[ii])
-potential.evaluatelinearPotentials(tp,Rs[ii]+dr))/dr
tRforce= potential.evaluatelinearForces(tp,Rs[ii])
elif isinstance(tp,potential.planarPotential):
mpotderivR= (potential.evaluateplanarPotentials(tp,Rs[ii],phi=Zs[jj])-potential.evaluateplanarPotentials(tp,Rs[ii]+dr,phi=Zs[jj]))/dr
tRforce= potential.evaluateplanarRforces(tp,Rs[ii],
phi=Zs[jj])
else:
mpotderivR= (potential.evaluatePotentials(tp,Rs[ii],Zs[jj],phi=1.)
-potential.evaluatePotentials(tp,Rs[ii]+dr,Zs[jj],phi=1.))/dr
tRforce= potential.evaluateRforces(tp,Rs[ii],Zs[jj],phi=1.)
if tRforce**2. < 10.**ttol:
assert mpotderivR**2. < 10.**ttol, \
"Calculation of the Radial force as the Radial derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRforce-mpotderivR), numpy.fabs((tRforce-mpotderivR)/tRforce))
else:
assert (tRforce-mpotderivR)**2./tRforce**2. < 10.**ttol, \
"Calculation of the Radial force as the Radial derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRforce-mpotderivR), numpy.fabs((tRforce-mpotderivR)/tRforce))
#Azimuthal force, if it exists
if isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mpotderivphi= (tp(Rs[ii],phi=phis[jj])-tp(Rs[ii],phi=phis[jj]+dphi))/dphi
tphiforce= potential.evaluateplanarphiforces(tp,Rs[ii],
phi=phis[jj])
else:
mpotderivphi= (tp(Rs[ii],0.05,phi=phis[jj])-tp(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphiforce= potential.evaluatephiforces(tp,Rs[ii],0.05,
phi=phis[jj])
try:
if tphiforce**2. < 10.**ttol:
assert(mpotderivphi**2. < 10.**ttol)
else:
assert((tphiforce-mpotderivphi)**2./tphiforce**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the azimuthal force as the azimuthal derivative of the %s potential fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphiforce-mpotderivphi),numpy.fabs((tphiforce-mpotderivphi)/tphiforce)))
else:
raise AssertionError("Calculation of the azimuthal force as the azimuthal derivative of the %s potential fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphiforce-mpotderivphi),numpy.fabs((tphiforce-mpotderivphi)/tphiforce)))
#Vertical force, if it exists
if isinstance(tp,potential.planarPotential) \
or isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
##Excluding KuzminDiskPotential when z = 0
if Zs[jj]==0 and isinstance(tp,potential.KuzminDiskPotential):
continue
dz= 10.**-8.
newZ= Zs[jj]+dz
dz= newZ-Zs[jj] #Representable number
mpotderivz= (tp(Rs[ii],Zs[jj],phi=1.)-tp(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tzforce= potential.evaluatezforces(tp,Rs[ii],Zs[jj],phi=1.)
if tzforce**2. < 10.**ttol:
assert mpotderivz**2. < 10.**ttol, \
"Calculation of the vertical force as the vertical derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(mpotderivz),numpy.fabs((tzforce-mpotderivz)/tzforce))
else:
assert (tzforce-mpotderivz)**2./tzforce**2. < 10.**ttol, \
"Calculation of the vertical force as the vertical derivative of the %s potential fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(mpotderivz),numpy.fabs((tzforce-mpotderivz)/tzforce))
#Test whether the second derivative of the potential is minus the derivative of the force
def test_2ndDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('oblateHernquistPotential') # in case these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['AnyAxisymmetricRazorThinDiskPotential']= -4.5
tol['mockInterpRZPotential']= -4.
tol['DehnenBarPotential']= -7.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if hasattr(tp,'_R2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential' and numpy.fabs(Zs[jj]) > 0.: continue #Not implemented
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mRforcederivR= (tp.Rforce(Rs[ii])-tp.Rforce(Rs[ii]+dr))/dr
tR2deriv= tp.R2deriv(Rs[ii])
elif isinstance(tp,potential.planarPotential):
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj])-tp.Rforce(Rs[ii]+dr,Zs[jj]))/dr
tR2deriv= potential.evaluateplanarR2derivs(tp,Rs[ii],
phi=Zs[jj])
else:
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj],phi=1.)-tp.Rforce(Rs[ii]+dr,Zs[jj],phi=1.))/dr
tR2deriv= potential.evaluateR2derivs(tp,Rs[ii],Zs[jj],phi=1.)
if tR2deriv**2. < 10.**ttol:
assert mRforcederivR**2. < 10.**ttol, \
"Calculation of the second Radial derivative of the potential as the Radial derivative of the %s Radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tR2deriv-mRforcederivR), numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv))
else:
assert (tR2deriv-mRforcederivR)**2./tR2deriv**2. < 10.**ttol, \
"Calculation of the second Radial derivative of the potential as the Radial derivative of the %s Radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tR2deriv-mRforcederivR), numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv))
#2nd azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_phi2deriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mphiforcederivphi= (tp.phiforce(Rs[ii],phi=phis[jj])-tp.phiforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tphi2deriv= tp.phi2deriv(Rs[ii],phi=phis[jj])
else:
mphiforcederivphi= (tp.phiforce(Rs[ii],0.05,phi=phis[jj])-tp.phiforce(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphi2deriv= potential.evaluatephi2derivs(tp,Rs[ii],0.05,phi=phis[jj])
try:
if tphi2deriv**2. < 10.**ttol:
assert(mphiforcederivphi**2. < 10.**ttol)
else:
assert((tphi2deriv-mphiforcederivphi)**2./tphi2deriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphi2deriv-mphiforcederivphi), numpy.fabs((tphi2deriv-mphiforcederivphi)/tphi2deriv)))
else:
raise AssertionError("Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tphi2deriv-mphiforcederivphi), numpy.fabs((tphi2deriv-mphiforcederivphi)/tphi2deriv)))
#mixed radial azimuthal: Isn't this the same as what's below??
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= tp.Rphideriv(Rs[ii],phi=phis[jj])
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.05,phi=phis[jj])-tp.Rforce(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluateRphiderivs(tp,Rs[ii],0.05,phi=phis[jj])
try:
if tRphideriv**2. < 10.**ttol:
assert(mRforcederivphi**2. < 10.**ttol)
else:
assert((tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError("Calculation of the mixed radial, azimuthal derivative of the potential as the azimuthal derivative of the %s Radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv)))
else:
raise AssertionError("Calculation of the mixed radial, azimuthal derivative of the potential as the azimuthal derivative of the %s azimuthal force fails at (R,Z,phi) = (%.3f,0.05,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv)))
#2nd vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_z2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
if p == 'TwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'specialTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'DehnenTwoPowerSphericalPotential': continue # Not implemented, or badly defined
if p == 'DehnenCoreTwoPowerSphericalPotential': continue # Not implemented, or badly defined
if p == 'HernquistTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'JaffeTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'NFWTwoPowerSphericalPotential': continue #Not implemented, or badly defined
#Excluding KuzminDiskPotential at z = 0
if p == 'KuzminDiskPotential' and Zs[jj] == 0: continue
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mzforcederivz= (tp.zforce(Rs[ii],Zs[jj],phi=1.)-tp.zforce(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tz2deriv= potential.evaluatez2derivs(tp,Rs[ii],Zs[jj],phi=1.)
if tz2deriv**2. < 10.**ttol:
assert mzforcederivz**2. < 10.**ttol, \
"Calculation of the second vertical derivative of the potential as the vertical derivative of the %s vertical force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tz2deriv-mzforcederivz), numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv))
else:
assert (tz2deriv-mzforcederivz)**2./tz2deriv**2. < 10.**ttol, \
"Calculation of the second vertical derivative of the potential as the vertical derivative of the %s vertical force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tz2deriv-mzforcederivz), numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv))
#mixed radial vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rzderiv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
#Excluding KuzminDiskPotential at z = 0
if p == 'KuzminDiskPotential' and Zs[jj] == 0: continue
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mRforcederivz= (tp.Rforce(Rs[ii],Zs[jj],phi=1.)-tp.Rforce(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tRzderiv= potential.evaluateRzderivs(tp,Rs[ii],Zs[jj],phi=1.)
if tRzderiv**2. < 10.**ttol:
assert mRforcederivz**2. < 10.**ttol, \
"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the %s radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRzderiv-mRforcederivz), numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv))
else:
assert (tRzderiv-mRforcederivz)**2./tRzderiv**2. < 10.**ttol, \
"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the %s radial force fails at (R,Z) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],numpy.fabs(tRzderiv-mRforcederivz), numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv))
#mixed radial, azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])\
-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluateplanarPotentials(tp,Rs[ii],
phi=phis[jj],dR=1,dphi=1)
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.1,phi=phis[jj])\
-tp.Rforce(Rs[ii],0.1,phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluatePotentials(tp,Rs[ii],0.1,
phi=phis[jj],dR=1,dphi=1)
if tRphideriv**2. < 10.**ttol:
assert mRforcederivphi**2. < 10.**ttol, \
"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the %s radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv))
else:
assert (tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol, \
"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the %s radial force fails at (R,phi) = (%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],phis[jj],numpy.fabs(tRphideriv-mRforcederivphi), numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv))
#Test whether the Poisson equation is satisfied if _dens and the relevant second derivatives are implemented
def test_poisson_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['SpiralArmsPotential']= -3 #these are more difficult
tol['rotatingSpiralArmsPotential']= -3
tol['specialSpiralArmsPotential']= -4
tol['SolidBodyRotationSpiralArmsPotential']= -2.9 #these are more difficult
tol['nestedListPotential']= -3 #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if not hasattr(tp,'_dens') or not hasattr(tp,'_R2deriv') \
or not hasattr(tp,'_Rforce') or not hasattr(tp,'phi2deriv') \
or not hasattr(tp,'_z2deriv'):
continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
tpoissondens= tp.dens(Rs[ii],Zs[jj],phi=phis[kk],
forcepoisson=True)
tdens= potential.evaluateDensities(tp,Rs[ii],Zs[jj],
phi=phis[kk],
forcepoisson=False)
if tdens**2. < 10.**ttol:
assert tpoissondens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
else:
assert (tpoissondens-tdens)**2./tdens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
return None
#Test whether the (integrated) Poisson equation is satisfied if _surfdens and the relevant second derivatives are implemented
def test_poisson_surfdens_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('testMWPotential')
"""
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
"""
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential') # R2deriv not implemented for |Z| > 0
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([.125,0.25,1.,10.])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['SphericalShellPotential']= -0 # Direct integration fails to deal with delta function!
#tol['SpiralArmsPotential']= -3 #these are more difficult
#tol['rotatingSpiralArmsPotential']= -3
#tol['specialSpiralArmsPotential']= -4
#tol['SolidBodyRotationSpiralArmsPotential']= -2.9 #these are more difficult
#tol['nestedListPotential']= -3 #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if not hasattr(tp,'_surfdens') or not hasattr(tp,'_R2deriv') \
or not hasattr(tp,'_Rforce') or not hasattr(tp,'phi2deriv') \
or not hasattr(tp,'_zforce') \
or (tclass._surfdens == potential.Potential._surfdens and not p == 'FlattenedPowerPotential'): # make sure _surfdens is explicitly implemented
continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
tpoissondens= tp.surfdens(Rs[ii],Zs[jj],phi=phis[kk],
forcepoisson=True)
tdens= potential.evaluateSurfaceDensities(tp,Rs[ii],Zs[jj],
phi=phis[kk],
forcepoisson=False)
if tdens**2. < 10.**ttol:
assert tpoissondens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented surface density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
else:
assert (tpoissondens-tdens)**2./tdens**2. < 10.**ttol, \
"Poisson equation relation between the derivatives of the potential and the implemented surface density is not satisfied for the %s potential at (R,Z,phi) = (%.3f,%.3f,%.3f); diff = %e, rel. diff = %e" % (p,Rs[ii],Zs[jj],phis[kk],numpy.fabs(tdens-tpoissondens), numpy.fabs((tdens-tpoissondens)/tdens))
return None
#Test whether the _evaluate function is correctly implemented in specifying derivatives
def test_evaluateAndDerivs_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockMovingObjectPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
#tolerances in log10
tol= {}
tol['default']= -12.
#tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#1st radial
if isinstance(tp,potential.linearPotential):
continue
elif isinstance(tp,potential.planarPotential):
tevaldr= tp(1.2,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,phi=0.1)
else:
tevaldr= tp(1.2,0.1,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,0.1,phi=0.1)
if not tevaldr is None:
if tevaldr**2. < 10.**ttol:
assert trforce**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
else:
assert (tevaldr+trforce)**2./tevaldr**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
#2nd radial
hasR2= True
from galpy.potential import PotentialError
if 'RazorThin' in p: R2z= 0.
else: R2z= 0.1
try:
if isinstance(tp,potential.planarPotential):
tp.R2deriv(1.2)
else:
tp.R2deriv(1.2,R2z)
except PotentialError:
hasR2= False
if hasR2:
if isinstance(tp,potential.planarPotential):
tevaldr2= tp(1.2,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,phi=0.1)
else:
tevaldr2= tp(1.2,R2z,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,R2z,phi=0.1)
if not tevaldr2 is None:
if tevaldr2**2. < 10.**ttol:
assert tr2deriv*2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
else:
assert (tevaldr2-tr2deriv)**2./tevaldr2**2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
#1st phi
if isinstance(tp,potential.planarPotential):
tevaldphi= tp(1.2,phi=0.1,dphi=1)
tphiforce= tp.phiforce(1.2,phi=0.1)
else:
tevaldphi= tp(1.2,0.1,phi=0.1,dphi=1)
tphiforce= tp.phiforce(1.2,0.1,phi=0.1)
if not tevaldphi is None:
if tevaldphi**2. < 10.**ttol:
assert tphiforce**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phiforce inconsistent for the %s potential" % p
else:
assert (tevaldphi+tphiforce)**2./tevaldphi**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phiforce inconsistent for the %s potential" % p
#2nd phi
hasphi2= True
try:
if isinstance(tp,potential.planarPotential):
tp.phi2deriv(1.2,phi=0.1)
else:
tp.phi2deriv(1.2,0.1,phi=0.1)
except (PotentialError,AttributeError):
hasphi2= False
if hasphi2 and hasattr(tp,'_phi2deriv'):
if isinstance(tp,potential.planarPotential):
tevaldphi2= tp(1.2,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,phi=0.1)
else:
tevaldphi2= tp(1.2,0.1,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,0.1,phi=0.1)
if not tevaldphi2 is None:
if tevaldphi2**2. < 10.**ttol:
assert tphi2deriv*2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
else:
assert (tevaldphi2-tphi2deriv)**2./tevaldphi2**2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
# Test that much higher derivatives are not implemented
try: tp(1.2,0.1,dR=4,dphi=10)
except NotImplementedError: pass
else: raise AssertionError('Higher-order derivative request in potential __call__ does not raise NotImplementedError for %s' % p)
continue
#mixed radial,vertical
if isinstance(tp,potential.planarPotential):
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
else:
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
if not tevaldrz is None:
if tevaldrz**2. < 10.**ttol:
assert trzderiv*2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
else:
assert (tevaldrz-trzderiv)**2./tevaldrz**2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
return None
#Test that potentials can be multiplied or divided by a number
def test_amp_mult_divide():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('mockMovingObjectPotential')
pots.append('mockMovingObjectPotentialExplPlummer')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential')
pots.append('yRotatedTriaxialNFWPotential')
pots.append('fullyRotatedTriaxialNFWPotential')
pots.append('fullyRotatednoGLTriaxialNFWPotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
R,Z,phi= 0.75,0.2,1.76
nums= numpy.random.uniform(size=len(pots)) # random set of amp changes
for num,p in zip(nums,pots):
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
if isinstance(tp,potential.linearPotential):
assert numpy.fabs(tp(R)*num-(num*tp)(R)) < 1e-10, "Multiplying a linearPotential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R)*num-(tp*num)(R)) < 1e-10, "Multiplying a linearPotential with a number does not behave as expected"
assert numpy.fabs(tp(R)/num-(tp/num)(R)) < 1e-10, "Dividing a linearPotential with a number does not behave as expected"
elif isinstance(tp,potential.planarPotential):
assert numpy.fabs(tp(R,phi=phi)*num-(num*tp)(R,phi=phi)) < 1e-10, "Multiplying a planarPotential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R,phi=phi)*num-(tp*num)(R,phi=phi)) < 1e-10, "Multiplying a planarPotential with a number does not behave as expected"
assert numpy.fabs(tp(R,phi=phi)/num-(tp/num)(R,phi=phi)) < 1e-10, "Dividing a planarPotential with a number does not behave as expected"
else:
assert numpy.fabs(tp(R,Z,phi=phi)*num-(num*tp)(R,Z,phi=phi)) < 1e-10, "Multiplying a Potential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R,Z,phi=phi)*num-(tp*num)(R,Z,phi=phi)) < 1e-10, "Multiplying a Potential with a number does not behave as expected"
assert numpy.fabs(tp(R,Z,phi=phi)/num-(tp/num)(R,Z,phi=phi)) < 1e-10, "Dividing a Potential with a number does not behave as expected"
return None
#Test whether potentials that support array input do so correctly
def test_potential_array_input():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
rmpots.append('FerrersPotential')
rmpots.append('PerfectEllipsoidPotential')
rmpots.append('TriaxialHernquistPotential')
rmpots.append('TriaxialJaffePotential')
rmpots.append('TriaxialNFWPotential')
rmpots.append('TwoPowerTriaxialPotential')
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('AnyAxisymmetricRazorThinDiskPotential')
rmpots.append('AnySphericalPotential')
rmpots.append('SphericalShellPotential')
rmpots.append('HomogeneousSpherePotential')
rmpots.append('TriaxialGaussianPotential')
# These cannot be setup without arguments
rmpots.append('MovingObjectPotential')
rmpots.append('SnapshotRZPotential')
rmpots.append('InterpSnapshotRZPotential')
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
rs= numpy.linspace(0.1,2.,11)
zs= numpy.linspace(-2.,2.,11)
phis= numpy.linspace(0.,numpy.pi,11)
ts= numpy.linspace(0.,10.,11)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
#Potential itself
tpevals= numpy.array([tp(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} evaluation does not work as expected for array inputs'.format(p)
#Rforce
tpevals= numpy.array([tp.Rforce(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rforce(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} Rforce evaluation does not work as expected for array inputs'.format(p)
#zforce
tpevals= numpy.array([tp.zforce(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.zforce(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} zforce evaluation does not work as expected for array inputs'.format(p)
#phiforce
tpevals= numpy.array([tp.phiforce(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.phiforce(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} zforce evaluation does not work as expected for array inputs'.format(p)
#R2deriv
if hasattr(tp,'_R2deriv'):
tpevals= numpy.array([tp.R2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.R2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} R2deriv evaluation does not work as expected for array inputs'.format(p)
#z2deriv
if hasattr(tp,'_z2deriv') \
and not p == 'TwoPowerSphericalPotential': # latter bc done through R2deriv
tpevals= numpy.array([tp.z2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.z2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} z2deriv evaluation does not work as expected for array inputs'.format(p)
#phi2deriv
if hasattr(tp,'_R2deriv'):
tpevals= numpy.array([tp.phi2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.phi2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} phi2deriv evaluation does not work as expected for array inputs'.format(p)
#Rzderiv
if hasattr(tp,'_Rzderiv'):
tpevals= numpy.array([tp.Rzderiv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rzderiv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} Rzderiv evaluation does not work as expected for array inputs'.format(p)
#Rphideriv
if hasattr(tp,'_Rphideriv'):
tpevals= numpy.array([tp.Rphideriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rphideriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} Rphideriv evaluation does not work as expected for array inputs'.format(p)
#dens
tpevals= numpy.array([tp.dens(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.dens(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
'{} dens evaluation does not work as expected for array inputs'.format(p)
return None
# Test that 1D potentials created using toVertical can handle array input if
# their 3D versions can
def test_toVertical_array():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
rmpots.append('FerrersPotential')
rmpots.append('PerfectEllipsoidPotential')
rmpots.append('TriaxialHernquistPotential')
rmpots.append('TriaxialJaffePotential')
rmpots.append('TriaxialNFWPotential')
rmpots.append('TwoPowerTriaxialPotential')
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('AnyAxisymmetricRazorThinDiskPotential')
rmpots.append('AnySphericalPotential')
rmpots.append('SphericalShellPotential')
rmpots.append('HomogeneousSpherePotential')
rmpots.append('TriaxialGaussianPotential')
# These cannot be setup without arguments
rmpots.append('MovingObjectPotential')
rmpots.append('SnapshotRZPotential')
rmpots.append('InterpSnapshotRZPotential')
for p in rmpots:
pots.remove(p)
xs= numpy.linspace(-2.,2.,11)
ts= numpy.linspace(0.,10.,11)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
# Only do 3D --> 1D potentials
if not isinstance(tp,potential.Potential): continue
tp= potential.toVerticalPotential(tp,0.8,phi=0.2)
#Potential itself
tpevals= numpy.array([tp(x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(tp(xs,t=ts)-tpevals) < 10.**-10.), \
'{} evaluation does not work as expected for array inputs for toVerticalPotential potentials'.format(p)
#force
tpevals= numpy.array([tp.force(x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(tp.force(xs,t=ts)-tpevals) < 10.**-10.), \
'{} force evaluation does not work as expected for array inputs for toVerticalPotential'.format(p)
# Also test Morgan's example
pot= potential.toVerticalPotential(potential.MWPotential2014,1.)
#Potential itself
tpevals= numpy.array([potential.evaluatelinearPotentials(pot,x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(potential.evaluatelinearPotentials(pot,xs,t=ts)-tpevals) < 10.**-10.), \
'{} evaluation does not work as expected for array inputs for toVerticalPotential potentials'.format(p)
#Rforce
tpevals= numpy.array([potential.evaluatelinearForces(pot,x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(potential.evaluatelinearForces(pot,xs,t=ts)-tpevals) < 10.**-10.), \
'{} force evaluation does not work as expected for array inputs for toVerticalPotential'.format(p)
return None
#Test that all potentials can be evaluated at zero
def test_potential_at_zero():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
#pots.append('specialTwoPowerSphericalPotential')
#pots.append('DehnenTwoPowerSphericalPotential')
#pots.append('DehnenCoreTwoPowerSphericalPotential')
#pots.append('HernquistTwoPowerSphericalPotential')
#pots.append('JaffeTwoPowerSphericalPotential')
#pots.append('NFWTwoPowerSphericalPotential') # Difficult, and who cares?
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('yRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('fullyRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('fullyRotatednoGLTriaxialNFWPotential') # Difficult bc of rotation
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
#pots.append('JaffeTwoPowerTriaxialPotential') # not finite
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
# Remove some more potentials that we don't support for now TO DO
rmpots.append('BurkertPotential') # Need to figure out...
#rmpots.append('FerrersPotential') # Need to figure out...
#rmpots.append('KuzminKutuzovStaeckelPotential') # Need to figure out...
rmpots.append('RazorThinExponentialDiskPotential') # Need to figure out...
rmpots.append('RingPotential') # Easy, but who cares?
#rmpots.append('SoftenedNeedleBarPotential') # Not that hard, but haven't done it
rmpots.append('SpiralArmsPotential')
rmpots.append('TwoPowerSphericalPotential') # Need to figure out
#rmpots.append('TwoPowerTriaxialPotential') # Need to figure out
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
assert not numpy.isnan(potential.evaluatePotentials(tp,0,0,phi=0.,t=0.)), 'Potential {} evaluated at zero gave NaN'.format(p)
# Also for arrays
if p == 'FerrersPotential' \
or p == 'HomogeneousSpherePotential' \
or p == 'PerfectEllipsoidPotential' \
or p == 'SphericalShellPotential' \
or p == 'AnyAxisymmetricRazorThinDiskPotential' \
or p == 'AnySphericalPotential' \
or 'riaxial' in p \
or 'oblate' in p \
or 'prolate' in p:
continue
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,numpy.zeros(4),numpy.zeros(4),phi=0.,t=0.))), 'Potential {} evaluated at zero gave NaN'.format(p)
return None
#Test that all potentials can be evaluated with large numbers and with infinity
def test_potential_at_infinity():
# One of the main reasons for this test is the implementation of vesc,
# which uses the potential at infinity. Import what vesc uses for infinity
from galpy.potential.plotEscapecurve import _INF
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
#pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
#pots.append('NFWTwoPowerSphericalPotential') # Difficult, and who cares?
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('mockInterpRZPotential')
#if _PYNBODY_LOADED:
# pots.append('mockSnapshotRZPotential')
# pots.append('mockInterpSnapshotRZPotential')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
#pots.append('zRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('yRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('fullyRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('fullyRotatednoGLTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('HernquistTwoPowerTriaxialPotential')
#pots.append('NFWTwoPowerTriaxialPotential')
#pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
# Remove some more potentials that we don't support for now TO DO
rmpots.append('FerrersPotential') # Need to figure out...
rmpots.append('KuzminKutuzovStaeckelPotential') # Need to figure out...
rmpots.append('RazorThinExponentialDiskPotential') # Need to figure out...
rmpots.append('SoftenedNeedleBarPotential') # Not that hard, but haven't done it
rmpots.append('SpiralArmsPotential') # Need to have 0 x cos = 0
rmpots.append('TwoPowerTriaxialPotential') # Need to figure out
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
assert not numpy.isnan(potential.evaluatePotentials(tp,numpy.inf,0,phi=0.,t=0.)), 'Potential {} evaluated at infinity gave NaN'.format(p)
assert not numpy.isnan(potential.evaluatePotentials(tp,_INF,0,phi=0.,t=0.)), 'Potential {} evaluated at vesc _INF gave NaN'.format(p)
# Also for arrays
if p == 'HomogeneousSpherePotential' \
or p == 'PerfectEllipsoidPotential' \
or p == 'SphericalShellPotential' \
or p == 'AnyAxisymmetricRazorThinDiskPotential' \
or p == 'AnySphericalPotential' \
or 'riaxial' in p \
or 'oblate' in p \
or 'prolate' in p:
continue
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,numpy.inf*numpy.ones(4),numpy.zeros(4),phi=0.,t=0.))), 'Potential {} evaluated at infinity gave NaN'.format(p)
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,_INF*numpy.ones(4),numpy.zeros(4),phi=0.,t=0.))), 'Potential {} evaluated at vesc _INF gave NaN'.format(p)
return None
# Test that the amplitude for potentials with a finite mass and amp=mass is
# correct through the relation -r^2 F_r =~ GM at large r
def test_finitemass_amp():
r_large= 10000.
# KeplerPotential
mass= 3.
kp= potential.KeplerPotential(amp=mass)
assert numpy.fabs(mass+r_large**2.*kp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of KeplerPotential does not not equal total mass'
# IsochronePotential
r_large= 1000000000.
mass= 3.
ip= potential.IsochronePotential(amp=mass,b=0.4)
assert numpy.fabs(mass+r_large**2.*ip.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of IsochronePotential does not not equal total mass'
# PlummerPotential
r_large= 10000.
mass= 3.
pp= potential.PlummerPotential(amp=mass,b=0.4)
assert numpy.fabs(mass+r_large**2.*pp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of PlummerPotential does not not equal total mass'
# SphericalShellPotential
mass= 3.
sp= potential.SphericalShellPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*sp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of SphericalShellPotential does not not equal total mass'
# RingPotential
mass= 3.
rp= potential.RingPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*rp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of RingPotential does not not equal total mass'
# KuzminDiskPotential
r_large= 1000000000.
mass= 3.
kp= potential.KuzminDiskPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*kp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of KuzminDiskPotential does not not equal total mass'
# MiyamotoNagaiPotential
r_large= 1000000000.
mass= 3.
mp= potential.MiyamotoNagaiPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*mp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of MiyamotoNagaiPotential does not not equal total mass'
return None
# Test that the spherically radial force is correct
def test_rforce():
# Spherical potentials: Rforce = rforce x R / r; zforce = rforce x z /r
pp= potential.PlummerPotential(amp=2.,b=2.)
R,z= 1.3, 0.4
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(pp.Rforce(R,z)*r/R-pp.rforce(R,z)) < 10.**-10., 'rforce does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateRforces(pp,R,z)*r/R-potential.evaluaterforces(pp,R,z)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials'
return None
def test_rforce_dissipative():
# Use dynamical friction along a radial orbit at z=0 --> spherical
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z,phi= 1.3, 0., 1.1
v= [0.1,0.,0.]
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(cdfc.Rforce(R,z,phi=phi,v=v)*r/R-cdfc.rforce(R,z,phi=phi,v=v)) < 10.**-10., 'rforce does not behave as expected for spherical potentials for dissipative forces'
assert numpy.fabs(potential.evaluateRforces([pp,cdfc],R,z,phi=phi,v=v)*r/R-potential.evaluaterforces([pp,cdfc],R,z,phi=phi,v=v)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials for dissipative forces'
assert numpy.fabs(potential.evaluateRforces(cdfc,R,z,phi=phi,v=v)*r/R-potential.evaluaterforces(cdfc,R,z,phi=phi,v=v)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials for dissipative forces'
return None
# Test that the spherically second radial derivative is correct
def test_r2deriv():
# Spherical potentials: Rforce = rforce x R / r; zforce = rforce x z /r
# and R2deriv = r2deriv x (R/r)^2 - rforce x z^2/r^3
# and z2deriv = z2deriv x (z/r)^2 - rforce x R^2/R^3
# and Rzderiv = r2deriv x Rz/r^2 + rforce x Rz/r^3
pp= potential.PlummerPotential(amp=2.,b=2.)
R,z= 1.3, 0.4
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(pp.R2deriv(R,z)-pp.r2deriv(R,z)*(R/r)**2.+pp.rforce(R,z)*z**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(pp.z2deriv(R,z)-pp.r2deriv(R,z)*(z/r)**2.+pp.rforce(R,z)*R**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(pp.Rzderiv(R,z)-pp.r2deriv(R,z)*R*z/r**2.-pp.rforce(R,z)*R*z/r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateR2derivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*(R/r)**2.+potential.evaluaterforces([pp],R,z)*z**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluatez2derivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*(z/r)**2.+potential.evaluaterforces([pp],R,z)*R**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateRzderivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*R*z/r**2.-potential.evaluaterforces([pp],R,z)*R*z/r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
return None
# Check that the masses are calculated correctly for spherical potentials
def test_mass_spher():
#PowerPotential close to Kepler should be very steep
pp= potential.PowerSphericalPotential(amp=2.,alpha=2.999)
kp= potential.KeplerPotential(amp=2.)
assert numpy.fabs((((3.-2.999)/(4.*numpy.pi)*pp.mass(10.)-kp.mass(10.)))/kp.mass(10.)) < 10.**-2., "Mass for PowerSphericalPotential close to KeplerPotential is not close to KeplerPotential's mass"
pp= potential.PowerSphericalPotential(amp=2.)
#mass = amp x r^(3-alpha)
tR= 1.
assert numpy.fabs(potential.mass(pp,tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 2.
assert numpy.fabs(potential.mass([pp],tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 20.
assert numpy.fabs(pp.mass(tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-9., 'Mass for PowerSphericalPotential not as expected'
#Test that for a cut-off potential, the mass far beyond the cut-off is
# 2pi rc^(3-alpha) gamma(1.5-alpha/2)
pp= potential.PowerSphericalPotentialwCutoff(amp=2.)
from scipy import special
expecMass= 2.*pp._amp*numpy.pi*pp.rc**(3.-pp.alpha)*special.gamma(1.5-pp.alpha/2.)
tR= 5.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 15.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 50.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
#Jaffe and Hernquist both have finite masses, NFW diverges logarithmically
jp= potential.JaffePotential(amp=2.,a=0.1)
hp= potential.HernquistPotential(amp=2.,a=0.1)
np= potential.NFWPotential(amp=2.,a=0.1)
tR= 10.
# Limiting behavior
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-3., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-3., 'Limit mass for Hernquist potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-2., 'Limit mass for NFW potential not as expected'
tR= 200.
# Limiting behavior, add z, to test that too
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-4., 'Limit mass for NFW potential not as expected'
# Burkert as an example of a SphericalPotential
bp= potential.BurkertPotential(amp=2.,a=3.)
assert numpy.fabs(bp.mass(4.2,forceint=True)-bp.mass(4.2)) < 1e-6, "Mass computed with SphericalPotential's general implementation incorrect"
return None
# Check that the masses are implemented correctly for spherical potentials
def test_mass_spher_analytic():
#TwoPowerSphericalPotentials all have explicitly implemented masses
dcp= potential.DehnenCoreSphericalPotential(amp=2.)
jp= potential.JaffePotential(amp=2.)
hp= potential.HernquistPotential(amp=2.)
np= potential.NFWPotential(amp=2.)
tp= potential.TwoPowerSphericalPotential(amp=2.)
dp= potential.DehnenSphericalPotential(amp=2.)
pp= potential.PlummerPotential(amp=2.,b=1.3)
tR= 2.
assert numpy.fabs(dcp.mass(tR,forceint=True)-dcp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Dehnen Core potential'
assert numpy.fabs(jp.mass(tR,forceint=True)-jp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Jaffe potential'
assert numpy.fabs(hp.mass(tR,forceint=True)-hp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Hernquist potential'
assert numpy.fabs(np.mass(tR,forceint=True)-np.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for NFW potential'
assert numpy.fabs(tp.mass(tR,forceint=True)-tp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for TwoPowerSpherical potential'
assert numpy.fabs(dp.mass(tR,forceint=True)-dp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for DehnenSphericalPotential potential, for not z is None'
assert numpy.fabs(pp.mass(tR,forceint=True)-pp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Plummer potential'
return None
# Check that the masses are calculated correctly for axisymmetric potentials
def test_mass_axi():
#For Miyamoto-Nagai, we know that mass integrated over everything should be equal to amp, so
mp= potential.MiyamotoNagaiPotential(amp=1.)
assert numpy.fabs(mp.mass(200.,20.)-1.) < 0.01, 'Total mass of Miyamoto-Nagai potential w/ amp=1 is not equal to 1'
# Also spherical
assert numpy.fabs(mp.mass(200.)-1.) < 0.01, 'Total mass of Miyamoto-Nagai potential w/ amp=1 is not equal to 1'
#For a double-exponential disk potential, the
# mass(R,z) = amp x hR^2 x hz x (1-(1+R/hR)xe^(-R/hR)) x (1-e^(-Z/hz)
dp= potential.DoubleExponentialDiskPotential(amp=2.)
def dblexpmass(r,z,dp):
return 4.*numpy.pi*dp._amp*dp._hr**2.*dp._hz*(1.-(1.+r/dp._hr)*numpy.exp(-r/dp._hr))*(1.-numpy.exp(-z/dp._hz))
tR,tz= 0.01,0.01
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))) < 5e-8, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 0.1,0.05
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))) < 3e-7, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 1.,0.1
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))) < 1e-6, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,0.1
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-5., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,1.
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-5., 'Mass for DoubleExponentialDiskPotential incorrect'
# Razor thin disk
rp= potential.RazorThinExponentialDiskPotential(amp=2.)
def razexpmass(r,z,dp):
return 2.*numpy.pi*rp._amp*rp._hr**2.*(1.-(1.+r/rp._hr)*numpy.exp(-r/rp._hr))
tR,tz= 0.01,0.01
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 0.1,0.05
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 1.,0.1
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 5.,0.1
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 5.,1.
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
# Kuzmin disk, amp = mass
kp= potential.KuzminDiskPotential(amp=2.,a=3.)
assert numpy.fabs(kp.mass(1000.,20.)-2.) < 1e-2, 'Mass for KuzminDiskPotential incorrect'
assert numpy.fabs(kp.mass(1000.)-2.) < 1e-2, 'Mass for KuzminDiskPotential incorrect'
#Test that nonAxi raises error
from galpy.orbit import Orbit
mop= potential.MovingObjectPotential(Orbit([1.,0.1,1.1,0.1,0.,0.]))
with pytest.raises(NotImplementedError) as excinfo:
mop.mass(1.,0.)
# also for lists
with pytest.raises(NotImplementedError) as excinfo:
potential.mass(mop,1.,0.)
with pytest.raises(NotImplementedError) as excinfo:
potential.mass([mop],1.,0.)
return None
# Check that the masses are calculated correctly for spheroidal potentials
def test_mass_spheroidal():
# PerfectEllipsoidPotential: total mass is amp, no matter what the axis ratio
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.3,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.,c=1.)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=.7,c=.5)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
# For TwoPowerTriaxial, the masses should be bxc times that for the spherical version
b= 0.7
c= 0.5
tpp= potential.TriaxialJaffePotential(amp=2.,a=3.,b=b,c=c)
sp= potential.JaffePotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TriaxialHernquistPotential(amp=2.,a=3.,b=b,c=c)
sp= potential.HernquistPotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TriaxialNFWPotential(amp=2.,a=3.,b=b,c=c)
sp= potential.NFWPotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TwoPowerTriaxialPotential(amp=2.,a=3.,b=b,c=c,alpha=1.1,beta=4.1)
sp= potential.TwoPowerSphericalPotential(amp=2.,a=3.,alpha=1.1,beta=4.1)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
# For TriaxialGaussianPotential, total mass is amp, no matter b/c
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.3,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.,c=1.)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=.7,c=.5)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
# Dummy EllipsoidalPotential for testing the general approach
from galpy.potential.EllipsoidalPotential import EllipsoidalPotential
class dummy(EllipsoidalPotential):
def __init__(self,amp=1.,b=1.,c=1.,
zvec=None,pa=None,glorder=50,
normalize=False,ro=None,vo=None):
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,
glorder=glorder,
ro=ro,vo=vo)
return None
def _mdens(self,m):
return m**-2.
b= 1.2
c= 1.7
dp= dummy(amp=2.,b=b,c=c)
r= 1.9
assert numpy.fabs(dp.mass(r)/b/c-4.*numpy.pi*2.*r) < 1e-6, 'General potential.EllipsoidalPotential mass incorrect'
r= 3.9
assert numpy.fabs(dp.mass(r)/b/c-4.*numpy.pi*2.*r) < 1e-6, 'General potential.EllipsoidalPotential mass incorrect'
return None
# Check that toVertical and toPlanar work
def test_toVertical_toPlanar():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False: #_TRAVIS: #travis CI
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if not hasattr(tp,'normalize'): continue #skip these
tp.normalize(1.)
if isinstance(tp,potential.linearPotential) or \
isinstance(tp,potential.planarPotential):
continue
tpp= tp.toPlanar()
assert isinstance(tpp,potential.planarPotential), \
"Conversion into planar potential of potential %s fails" % p
tlp= tp.toVertical(1.,phi=2.)
assert isinstance(tlp,potential.linearPotential), \
"Conversion into linear potential of potential %s fails" % p
def test_RZToplanarPotential():
lp= potential.LogarithmicHaloPotential(normalize=1.)
plp= potential.RZToplanarPotential(lp)
assert isinstance(plp,potential.planarPotential), 'Running an RZPotential through RZToplanarPotential does not produce a planarPotential'
#Check that a planarPotential through RZToplanarPotential is still planar
pplp= potential.RZToplanarPotential(plp)
assert isinstance(pplp,potential.planarPotential), 'Running a planarPotential through RZToplanarPotential does not produce a planarPotential'
#Check that a list with a mix of planar and 3D potentials produces list of planar
ppplp= potential.RZToplanarPotential([lp,plp])
for p in ppplp:
assert isinstance(p,potential.planarPotential), 'Running a list with a mix of planar and 3D potentials through RZToPlanarPotential does not produce a list of planar potentials'
# Check that giving an object that is not a list or Potential instance produces an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential('something else')
# Check that given a list of objects that are not a Potential instances gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([3,4,45])
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([lp,3,4,45])
# Check that using a non-axisymmetric potential gives an error
lpna= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential(lpna)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([lpna])
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToplanarPotential([pp,cdfc])
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToplanarPotential(cdfc)
return None
def test_toPlanarPotential():
tnp= potential.TriaxialNFWPotential(normalize=1.,b=0.5)
ptnp= potential.toPlanarPotential(tnp)
assert isinstance(ptnp,potential.planarPotential), 'Running a non-axisymmetric Potential through toPlanarPotential does not produce a planarPotential'
# Also for list
ptnp= potential.toPlanarPotential([tnp])
assert isinstance(ptnp[0],potential.planarPotential), 'Running a non-axisymmetric Potential through toPlanarPotential does not produce a planarPotential'
#Check that a planarPotential through toPlanarPotential is still planar
pptnp= potential.toPlanarPotential(tnp)
assert isinstance(pptnp,potential.planarPotential), 'Running a planarPotential through toPlanarPotential does not produce a planarPotential'
try:
ptnp= potential.toPlanarPotential('something else')
except potential.PotentialError:
pass
else:
raise AssertionError('Using toPlanarPotential with a string rather than an Potential or a planarPotential did not raise PotentialError')
# Check that list of objects that are not potentials gives error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toPlanarPotential([3,4,45])
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toPlanarPotential([pp,cdfc])
return None
def test_RZToverticalPotential():
lp= potential.LogarithmicHaloPotential(normalize=1.)
plp= potential.RZToverticalPotential(lp,1.2)
assert isinstance(plp,potential.linearPotential), 'Running an RZPotential through RZToverticalPotential does not produce a linearPotential'
#Check that a verticalPotential through RZToverticalPotential is still vertical
pplp= potential.RZToverticalPotential(plp,1.2)
assert isinstance(pplp,potential.linearPotential), 'Running a linearPotential through RZToverticalPotential does not produce a linearPotential'
# Also for list
pplp= potential.RZToverticalPotential([plp],1.2)
assert isinstance(pplp[0],potential.linearPotential), 'Running a linearPotential through RZToverticalPotential does not produce a linearPotential'
# Check that giving an object that is not a list or Potential instance produces an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential('something else',1.2)
# Check that given a list of objects that are not a Potential instances gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([3,4,45],1.2)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lp,3,4,45],1.2)
# Check that giving a planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential(lp.toPlanar(),1.2)
# Check that giving a list of planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lp.toPlanar()],1.2)
# Check that using a non-axisymmetric potential gives an error
lpna= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential(lpna,1.2)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lpna],1.2)
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToverticalPotential([pp,cdfc],1.2)
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToverticalPotential(cdfc,1.2)
return None
def test_toVerticalPotential():
tnp= potential.TriaxialNFWPotential(normalize=1.,b=0.5)
ptnp= potential.toVerticalPotential(tnp,1.2,phi=0.8)
assert isinstance(ptnp,potential.linearPotential), 'Running a non-axisymmetric Potential through toVerticalPotential does not produce a linearPotential'
# Also for list
ptnp= potential.toVerticalPotential([tnp],1.2,phi=0.8)
assert isinstance(ptnp[0],potential.linearPotential), 'Running a non-axisymmetric Potential through toVerticalPotential does not produce a linearPotential'
#Check that a linearPotential through toVerticalPotential is still vertical
ptnp= potential.toVerticalPotential(tnp,1.2,phi=0.8)
pptnp= potential.toVerticalPotential(ptnp,1.2,phi=0.8)
assert isinstance(pptnp,potential.linearPotential), 'Running a linearPotential through toVerticalPotential does not produce a linearPotential'
# also for list
pptnp= potential.toVerticalPotential([ptnp],1.2,phi=0.8)
assert isinstance(pptnp[0],potential.linearPotential), 'Running a linearPotential through toVerticalPotential does not produce a linearPotential'
try:
ptnp= potential.toVerticalPotential('something else',1.2,phi=0.8)
except potential.PotentialError:
pass
else:
raise AssertionError('Using toVerticalPotential with a string rather than an Potential or a linearPotential did not raise PotentialError')
# Check that giving a planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential(tnp.toPlanar(),1.2,phi=0.8)
# Check that giving a list of planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential([tnp.toPlanar()],1.2,phi=0.8)
# Check that giving a list of non-potentials gives error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential([3,4,45],1.2)
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toVerticalPotential([pp,cdfc],1.2,phi=0.8)
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toVerticalPotential(cdfc,1.2,phi=0.8)
# Check that running a non-axisymmetric potential through toVertical w/o
# phi gives an error
with pytest.raises(potential.PotentialError) as excinfo:
ptnp= potential.toVerticalPotential(tnp,1.2)
return None
# Sanity check the derivative of the rotation curve and the frequencies in the plane
def test_dvcircdR_omegac_epifreq_rl_vesc():
#Derivative of rotation curve
#LogarithmicHaloPotential: rotation everywhere flat
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert lp.dvcircdR(1.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=1"
assert lp.dvcircdR(0.5)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=0.5"
assert lp.dvcircdR(2.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=2"
#Kepler potential, vc = vc_0(R/R0)^-0.5 -> dvcdR= -0.5 vc_0 (R/R0)**-1.5
kp= potential.KeplerPotential(normalize=1.)
assert (kp.dvcircdR(1.)+0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=1"
assert (kp.dvcircdR(0.5)+0.5**-0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=0.5"
assert (kp.dvcircdR(2.)+0.5**2.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=2"
#Rotational frequency
assert (lp.omegac(1.)-1.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=1"
assert (lp.omegac(0.5)-2.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=0.5"
assert (lp.omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2"
assert (lp.toPlanar().omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2 through planarPotential"
#Epicycle frequency, flat rotation curve
assert (lp.epifreq(1.)-numpy.sqrt(2.)*lp.omegac(1.))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=1"
assert (lp.epifreq(0.5)-numpy.sqrt(2.)*lp.omegac(0.5))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=0.5"
assert (lp.epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=2"
assert (lp.toPlanar().epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=, through planar2"
#Epicycle frequency, Kepler
assert (kp.epifreq(1.)-kp.omegac(1.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=1"
assert (kp.epifreq(0.5)-kp.omegac(0.5))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=0.5"
assert (kp.epifreq(2.)-kp.omegac(2.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=2"
#Check radius of circular orbit, Kepler
assert (kp.rl(1.)-1.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=1."
assert (kp.rl(0.5)-1./4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (kp.rl(2.)-4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with close-to-flat rotation curve
pp= potential.PowerSphericalPotential(alpha=1.8,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.5)-0.5**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (pp.rl(2.)-2.**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with steeper rotation curve
pp= potential.PowerSphericalPotential(alpha=0.5,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.0625)-0.0625**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.0625"
assert (pp.rl(16.)-16.**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=16."
#Check radius in MWPotential2014 at very small lz, to test small lz behavior
lz= 0.000001
assert numpy.fabs(potential.vcirc(potential.MWPotential2014,potential.rl(potential.MWPotential2014,lz))*potential.rl(potential.MWPotential2014,lz)-lz) < 1e-12, 'Radius of circular orbit at small Lz in MWPotential2014 does not work as expected'
#Escape velocity of Kepler potential
assert (kp.vesc(1.)**2.-2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=1"
assert (kp.vesc(0.5)**2.-2.*kp.vcirc(0.5)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=0.5"
assert (kp.vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2"
assert (kp.toPlanar().vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2, through planar"
# W/ different interface
assert (kp.vcirc(1.)-potential.vcirc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp)"
assert (kp.vcirc(1.)-potential.vcirc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp.toPlanar)"
assert (kp.vesc(1.)-potential.vesc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp)"
assert (kp.vesc(1.)-potential.vesc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp.toPlanar)"
return None
def test_vcirc_phi_axi():
# Test that giving phi to vcirc for an axisymmetric potential doesn't
# affect the answer
kp= potential.KeplerPotential(normalize=1.)
phis= numpy.linspace(0.,numpy.pi,101)
vcs= numpy.array([kp.vcirc(1.,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-1.) < 10.**-8.), 'Setting phi= in vcirc for axisymmetric potential gives different answers for different phi'
# One at a different radius
R= 0.5
vcs= numpy.array([kp.vcirc(R,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-kp.vcirc(R)) < 10.**-8.), 'Setting phi= in vcirc for axisymmetric potential gives different answers for different phi'
return None
def test_vcirc_phi_nonaxi():
# Test that giving phi to vcirc for a non-axisymmetric potential does
# affect the answer
tnp= potential.TriaxialNFWPotential(b=0.4,normalize=1.)
# limited phi range
phis= numpy.linspace(numpy.pi/5.,numpy.pi/2.,5)
vcs= numpy.array([tnp.vcirc(1.,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-1.) > 0.01), 'Setting phi= in vcirc for axisymmetric potential does not give different answers for different phi'
# One at a different radius
R= 0.5
vcs= numpy.array([tnp.vcirc(R,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-tnp.vcirc(R,phi=0.)) > 0.01), 'Setting phi= in vcirc for axisymmetric potential does not give different answers for different phi'
return None
def test_vcirc_vesc_special():
#Test some special cases of vcirc and vesc
dp= potential.EllipticalDiskPotential()
try:
potential.plotRotcurve([dp])
except (AttributeError,potential.PotentialError): #should be raised
pass
else:
raise AssertionError("plotRotcurve for non-axisymmetric potential should have raised AttributeError, but didn't")
try:
potential.plotEscapecurve([dp])
except AttributeError: #should be raised
pass
else:
raise AssertionError("plotEscapecurve for non-axisymmetric potential should have raised AttributeError, but didn't")
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(potential.calcRotcurve(lp,0.8)-lp.vcirc(0.8)) < 10.**-16., 'Circular velocity calculated with calcRotcurve not the same as that calculated with vcirc'
assert numpy.fabs(potential.calcEscapecurve(lp,0.8)-lp.vesc(0.8)) < 10.**-16., 'Escape velocity calculated with calcEscapecurve not the same as that calculated with vcirc'
return None
def test_lindbladR():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.lindbladR(0.5,'corotation')-2.) < 10.**-10., 'Location of co-rotation resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,2))-2./(2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=2 resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also through general interface
assert numpy.fabs(lp.omegac(potential.lindbladR(lp,0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also for planar
assert numpy.fabs(lp.omegac(lp.toPlanar().lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Test non-existent ones
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.3)
assert mp.lindbladR(3.,2) is None, 'MiyamotoNagai w/ OmegaP=3 should not have a inner m=2 LindbladR'
assert mp.lindbladR(6.,'corotation') is None, 'MiyamotoNagai w/ OmegaP=6 should not have a inner m=2 LindbladR'
#Test error
try:
lp.lindbladR(0.5,'wrong resonance')
except IOError:
pass
else:
raise AssertionError("lindbladR w/ wrong m input should have raised IOError, but didn't")
return None
def test_vterm():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.vterm(30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(lp.vterm(numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
#Also using general interface
assert numpy.fabs(potential.vterm(lp,30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(potential.vterm(lp,numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
return None
def test_flattening():
#Simple tests: LogarithmicHalo
qs= [0.75,1.,1.25]
for q in qs:
lp= potential.LogarithmicHaloPotential(normalize=1.,q=q)
assert (lp.flattening(1.,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.001)" % q
assert (lp.flattening(1.,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.1)" % q
assert (lp.flattening(0.5,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.001)" % q
assert (lp.flattening(0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1)" % q
#One test with the general interface
assert (potential.flattening(lp,0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1), through potential.flattening" % q
#Check some spherical potentials
kp= potential.KeplerPotential(normalize=1.)
assert (kp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of KeplerPotential is not equal to 1 at (R,z) = (1.,0.02)"
np= potential.NFWPotential(normalize=1.,a=5.)
assert (np.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of NFWPotential is not equal to 1 at (R,z) = (1.,0.02)"
hp= potential.HernquistPotential(normalize=1.,a=5.)
assert (hp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of HernquistPotential is not equal to 1 at (R,z) = (1.,0.02)"
#Disk potentials should be oblate everywhere
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.5,b=0.05)
assert mp.flattening(1.,0.1) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,0.1)"
assert mp.flattening(1.,2.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,2.)"
assert mp.flattening(3.,3.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (3.,3.)"
return None
def test_verticalfreq():
#For spherical potentials, vertical freq should be equal to rotational freq
lp= potential.LogarithmicHaloPotential(normalize=1.,q=1.)
kp= potential.KeplerPotential(normalize=1.)
np= potential.NFWPotential(normalize=1.)
bp= potential.BurkertPotential(normalize=1.)
rs= numpy.linspace(0.2,2.,21)
for r in rs:
assert numpy.fabs(lp.verticalfreq(r)-lp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(kp.verticalfreq(r)-kp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#Through general interface
assert numpy.fabs(potential.verticalfreq(np,r)-np.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(potential.verticalfreq([bp],r)-bp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#For Double-exponential disk potential, epi^2+vert^2-2*rot^2 =~ 0 at very large distances (no longer explicitly, because we don't use a Kepler potential anylonger)
if True: #not _TRAVIS:
dp= potential.DoubleExponentialDiskPotential(normalize=1.,hr=0.05,hz=0.01)
assert numpy.fabs(dp.epifreq(1.)**2.+dp.verticalfreq(1.)**2.-2.*dp.omegac(1.)**2.) < 10.**-4., 'epi^2+vert^2-2*rot^2 !=~ 0 for dblexp potential, very far from center'
#Closer to the center, this becomes the Poisson eqn.
assert numpy.fabs(dp.epifreq(.125)**2.+dp.verticalfreq(.125)**2.-2.*dp.omegac(.125)**2.-4.*numpy.pi*dp.dens(0.125,0.))/4./numpy.pi/dp.dens(0.125,0.) < 10.**-3., 'epi^2+vert^2-2*rot^2 !=~ dens for dblexp potential'
return None
def test_planar_nonaxi():
dp= potential.EllipticalDiskPotential()
try:
potential.evaluateplanarPotentials(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarPotentials for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarRforces(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarRforces for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarphiforces(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarphiforces for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarR2derivs(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarR2derivs for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
return None
def test_ExpDisk_special():
#Test some special cases for the ExponentialDisk potentials
#if _TRAVIS: return None
#Test that array input works
dp= potential.DoubleExponentialDiskPotential(normalize=1.)
rs= numpy.linspace(0.1,2.11)
zs= numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential evaluation does not work as expected for array inputs'
#Rforce
#dpevals= numpy.array([dp.Rforce(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.Rforce(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential Rforce evaluation does not work as expected for array inputs'
#zforce
#dpevals= numpy.array([dp.zforce(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.zforce(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential zforce evaluation does not work as expected for array inputs'
#R2deriv
#dpevals= numpy.array([dp.R2deriv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential R2deriv evaluation does not work as expected for array inputs'
#z2deriv
#dpevals= numpy.array([dp.z2deriv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential z2deriv evaluation does not work as expected for array inputs'
#Rzderiv
#dpevals= numpy.array([dp.Rzderiv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential Rzderiv evaluation does not work as expected for array inputs'
#Check the PotentialError for z=/=0 evaluation of R2deriv of RazorThinDiskPotential
rp= potential.RazorThinExponentialDiskPotential(normalize=1.)
try: rp.R2deriv(1.,0.1)
except potential.PotentialError: pass
else: raise AssertionError("RazorThinExponentialDiskPotential's R2deriv did not raise AttributeError for z=/= 0 input")
return None
def test_DehnenBar_special():
#Test some special cases for the DehnenBar potentials
#if _TRAVIS: return None
#Test that array input works
dp= potential.DehnenBarPotential()
#Test frmo rs < rb through to rs > rb
rs= numpy.linspace(0.1*dp._rb,2.11*dp._rb)
zs= numpy.ones_like(rs)*0.1
phis=numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
#Rforce
dpevals= numpy.array([dp.Rforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rforce(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rforce(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce does not work as expected for array inputs'
#zforce
dpevals= numpy.array([dp.zforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.zforce(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.zforce(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce does not work as expected for array inputs'
#phiforce
dpevals= numpy.array([dp.phiforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phiforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.phiforce(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.phiforce(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phiforce does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.phiforce(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.phiforce(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phiforce does not work as expected for array inputs'
#R2deriv
dpevals= numpy.array([dp.R2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.R2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.R2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv does not work as expected for array inputs'
#z2deriv
dpevals= numpy.array([dp.z2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.z2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.z2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv does not work as expected for array inputs'
#phi2deriv
dpevals= numpy.array([dp.phi2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.phi2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phi2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.phi2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phi2deriv does not work as expected for array inputs'
#Rzderiv
dpevals= numpy.array([dp.Rzderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rzderiv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rzderiv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv does not work as expected for array inputs'
#Rphideriv
dpevals= numpy.array([dp.Rphideriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rphideriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rphideriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rphideriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rphideriv does not work as expected for array inputs'
return None
def test_SpiralArm_special():
#Test some special cases for the DehnenBar potentials
#if _TRAVIS: return None
#Test that array input works
dp= potential.SpiralArmsPotential()
rs= numpy.linspace(0.1,2.,11)
zs= numpy.ones_like(rs)*0.1
phis=numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential evaluation does not work as expected for array inputs'
#Rforce
dpevals= numpy.array([dp.Rforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rforce evaluation does not work as expected for array inputs'
#zforce
dpevals= numpy.array([dp.zforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential zforce evaluation does not work as expected for array inputs'
#phiforce
dpevals= numpy.array([dp.phiforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phiforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential zforce evaluation does not work as expected for array inputs'
#R2deriv
dpevals= numpy.array([dp.R2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential R2deriv evaluation does not work as expected for array inputs'
#z2deriv
dpevals= numpy.array([dp.z2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential z2deriv evaluation does not work as expected for array inputs'
#phi2deriv
dpevals= numpy.array([dp.phi2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential z2deriv evaluation does not work as expected for array inputs'
#Rzderiv
dpevals= numpy.array([dp.Rzderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
#Rphideriv
dpevals= numpy.array([dp.Rphideriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
#dens
dpevals= numpy.array([dp.dens(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.dens(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
return None
def test_MovingObject_density():
mp= mockMovingObjectPotential()
#Just test that the density far away from the object is close to zero
assert numpy.fabs(mp.dens(5.,0.)) < 10.**-8., 'Density far away from MovingObject is not close to zero'
return None
# test specialSelf for TwoPowerSphericalPotential
def test_TwoPowerSphericalPotentialSpecialSelf():
# TODO replace manual additions with an automatic method
# that checks the signatures all methods in all potentials
kw = dict(amp=1.,a=1.,normalize=False,ro=None,vo=None)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125])
pot = potential.TwoPowerSphericalPotential(alpha=0, beta=4,**kw)
comp = potential.DehnenCoreSphericalPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=1, beta=4,**kw)
comp = potential.HernquistPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=2, beta=4,**kw)
comp = potential.JaffePotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=1, beta=3,**kw)
comp = potential.NFWPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
return None
def test_DehnenSphericalPotentialSpecialSelf():
# TODO replace manual additions with an automatic method
# that checks the signatures all methods in all potentials
kw = dict(amp=1.,a=1.,normalize=False,ro=None,vo=None)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125])
pot = potential.DehnenSphericalPotential(alpha=0,**kw)
comp = potential.DehnenCoreSphericalPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
assert all(pot._R2deriv(Rs, Zs) == comp._R2deriv(Rs, Zs))
assert all(pot._Rzderiv(Rs, Zs) == comp._Rzderiv(Rs, Zs))
pot = potential.DehnenSphericalPotential(alpha=1,**kw)
comp = potential.HernquistPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.DehnenSphericalPotential(alpha=2,**kw)
comp = potential.JaffePotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
return None
# Test that MWPotential is what it's supposed to be
def test_MWPotential2014():
pot= potential.MWPotential2014
V0, R0= 220., 8.
#Check the parameters of the bulge
assert pot[0].rc == 1.9/R0, "MWPotential2014's bulge cut-off radius is incorrect"
assert pot[0].alpha == 1.8, "MWPotential2014's bulge power-law exponent is incorrect"
assert numpy.fabs(pot[0].Rforce(1.,0.)+0.05) < 10.**-14., "MWPotential2014's bulge amplitude is incorrect"
#Check the parameters of the disk
assert numpy.fabs(pot[1]._a-3./R0) < 10.**-14., "MWPotential2014's disk scale length is incorrect"
assert numpy.fabs(pot[1]._b-0.28/R0) < 10.**-14., "MWPotential2014's disk scale heigth is incorrect"
assert numpy.fabs(pot[1].Rforce(1.,0.)+0.60) < 10.**-14., "MWPotential2014's disk amplitude is incorrect"
#Check the parameters of the halo
assert numpy.fabs(pot[2].a-16./R0) < 10.**-14., "MWPotential2014's halo scale radius is incorrect"
assert numpy.fabs(pot[2].Rforce(1.,0.)+0.35) < 10.**-14., "MWPotential2014's halo amplitude is incorrect"
return None
# Test that the McMillan17 potential is what it's supposed to be
def test_McMillan17():
from galpy.potential.mwpotentials import McMillan17
from galpy.util import conversion
ro,vo= McMillan17[0]._ro, McMillan17[0]._vo
# Check some numbers from Table 3 of McMillan17: vertical force at the Sun
assert numpy.fabs(-potential.evaluatezforces(McMillan17,1.,1.1/8.21,
use_physical=False)
*conversion.force_in_2piGmsolpc2(vo,ro)-73.9) < 0.2, 'Vertical force at the Sun in McMillan17 does not agree with what it should be'
# Halo density at the Sun
assert numpy.fabs(potential.evaluateDensities(McMillan17[1],1.,0.,
use_physical=False)
*conversion.dens_in_msolpc3(vo,ro)-0.0101) < 1e-4, 'Halo density at the Sun in McMillan17 does not agree with what it should be'
# Halo concentration
assert numpy.fabs(McMillan17[1].conc(overdens=94.,wrtcrit=True,H=70.4)-15.4) < 1e-1, 'Halo concentration in McMillan17 does not agree with what it is supposed to be'
# Let's compute the mass of the NFWPotenial and add the paper's number for the mass in stars and gas. The following is the total mass in units of $10^11\,M_\odot$:
assert numpy.fabs((McMillan17[1].mass(50./8.21,quantity=False))/10.**11.+0.543+0.122-5.1) < 1e-1, 'Mass within 50 kpc in McMillan17 does not agree with what it is supposed to be'
# Mass of the bulge is slightly off
assert numpy.fabs((McMillan17[2].mass(50./8.21,quantity=False))/10.**9.-9.23) < 4e-1, 'Bulge mass in McMillan17 does not agree with what it is supposed to be'
# Mass in stars, compute bulge+disk and subtract what's supposed to be gas
assert numpy.fabs((McMillan17[0].mass(50./8.21,quantity=False)+McMillan17[2].mass(50./8.21,quantity=False))/10.**10.-1.22-5.43) < 1e-1, 'Stellar massi n McMillan17 does not agree with what it is supposed to be'
return None
# Test that the Irrgang13 potentials are what they are supposed to be
def test_Irrgang13():
from galpy.potential.mwpotentials import Irrgang13I, Irrgang13II, \
Irrgang13III
# Model I
ro,vo= Irrgang13I[0]._ro, Irrgang13I[0]._vo
# Check some numbers from Table 1 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13I,1.,quantity=False)-242.) < 1e-2, 'Circular velocity at the Sun in Irrgang13I does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13I[0].mass(100.,quantity=False)/1e9-9.5) < 1e-2, 'Mass of the bulge in Irrgang13I does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13I[1].mass(100.,10.,quantity=False)/1e10-6.6) < 1e-2, 'Mass of the disk in Irrgang13I does not agree with what it should be'
# Mass of the halo (go to edge in Irrgang13I)
assert numpy.fabs(Irrgang13I[2].mass(200./ro,quantity=False)/1e12-1.8) < 1e-1, 'Mass of the halo in Irrgang13I does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13I,1.,quantity=False)-616.4) < 1e0, 'Escape velocity at the Sun in Irrgang13I does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13I,1.,use_physical=False)-potential.dvcircdR(Irrgang13I,1.,use_physical=False))*vo/ro-15.06) < 1e-1, 'Oort A in Irrgang13I does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13I,1.,use_physical=False)+potential.dvcircdR(Irrgang13I,1.,use_physical=False))*vo/ro+13.74) < 1e-1, 'Oort B in Irrgang13I does not agree with what it should be'
# Model II
ro,vo= Irrgang13II[0]._ro, Irrgang13II[0]._vo
# Check some numbers from Table 2 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13II,1.,quantity=False)-240.4) < 3e-2, 'Circular velocity at the Sun in Irrgang13II does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13II[0].mass(100.,quantity=False)/1e9-4.1) < 1e-1, 'Mass of the bulge in Irrgang13II does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13II[1].mass(100.,10.,quantity=False)/1e10-6.6) < 1e-1, 'Mass of the disk in Irrgang13II does not agree with what it should be'
# Mass of the halo (go to edge in Irrgang13II)
assert numpy.fabs(Irrgang13II[2].mass(100.,quantity=False)/1e12-1.6) < 1e-1, 'Mass of the halo in Irrgang13II does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13II,1.,quantity=False)-575.9) < 1e0, 'Escape velocity at the Sun in Irrgang13II does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13II,1.,use_physical=False)-potential.dvcircdR(Irrgang13II,1.,use_physical=False))*vo/ro-15.11) < 1e-1, 'Oort A in Irrgang13II does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13II,1.,use_physical=False)+potential.dvcircdR(Irrgang13II,1.,use_physical=False))*vo/ro+13.68) < 1e-1, 'Oort B in Irrgang13II does not agree with what it should be'
# Model III
ro,vo= Irrgang13III[0]._ro, Irrgang13III[0]._vo
# Check some numbers from Table 3 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13III,1.,quantity=False)-239.7) < 3e-2, 'Circular velocity at the Sun in Irrgang13III does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13III[0].mass(100.,quantity=False)/1e9-10.2) < 1e-1, 'Mass of the bulge in Irrgang13III does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13III[1].mass(100.,10.,quantity=False)/1e10-7.2) < 1e-1, 'Mass of the disk in Irrgang13III does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13III,1.,quantity=False)-811.5) < 1e0, 'Escape velocity at the Sun in Irrgang13III does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13III,1.,use_physical=False)-potential.dvcircdR(Irrgang13III,1.,use_physical=False))*vo/ro-14.70) < 1e-1, 'Oort A in Irrgang13III does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13III,1.,use_physical=False)+potential.dvcircdR(Irrgang13III,1.,use_physical=False))*vo/ro+14.08) < 1e-1, 'Oort B in Irrgang13III does not agree with what it should be'
return None
# Test that the Dehnen & Binney (1998) models are what they are supposed to be
def test_DehnenBinney98():
from galpy.potential.mwpotentials import DehnenBinney98I, \
DehnenBinney98II, DehnenBinney98III, DehnenBinney98IV
check_DehnenBinney98_model(DehnenBinney98I,model='model 1')
check_DehnenBinney98_model(DehnenBinney98II,model='model 2')
check_DehnenBinney98_model(DehnenBinney98III,model='model 3')
check_DehnenBinney98_model(DehnenBinney98IV,model='model 4')
return None
def check_DehnenBinney98_model(pot,model='model 1'):
from galpy.util import conversion
truth= {'model 1':
{'SigmaR0':43.3,
'vc':222.,
'Fz':68.,
'A':14.4,
'B':-13.3},
'model 2':
{'SigmaR0':52.1,
'vc':217.,
'Fz':72.2,
'A':14.3,
'B':-12.9},
'model 3':
{'SigmaR0':52.7,
'vc':217.,
'Fz':72.5,
'A':14.1,
'B':-13.1},
'model 4':
{'SigmaR0':50.7,
'vc':220.,
'Fz':72.1,
'A':13.8,
'B':-13.6}
}
phys_kwargs= conversion.get_physical(pot)
ro= phys_kwargs.get('ro')
vo= phys_kwargs.get('vo')
assert numpy.fabs(pot[1].surfdens(1.,10./ro)-truth[model]['SigmaR0']) < 0.2, 'Surface density at R0 in Dehnen & Binney (1998) {} does not agree with paper value'.format(model)
assert numpy.fabs(potential.vcirc(pot,1.)-truth[model]['vc']) < 0.5, 'Circular velocity at R0 in Dehnen & Binney (1998) {} does not agree with paper value'.format(model)
assert numpy.fabs(-potential.evaluatezforces(pot,1.,1.1/ro,use_physical=False)*conversion.force_in_2piGmsolpc2(vo,ro)-truth[model]['Fz']) < 0.2, 'Vertical force at R0 in Dehnen & Binney (1998) {} does not agree with paper value'.format(model)
assert numpy.fabs(0.5*(potential.vcirc(pot,1.,use_physical=False)-potential.dvcircdR(pot,1.,use_physical=False))*vo/ro-truth[model]['A']) < 0.05, 'Oort A in Dehnen & Binney (1998) {} does not agree with paper value'.format(model)
assert numpy.fabs(-0.5*(potential.vcirc(pot,1.,use_physical=False)+potential.dvcircdR(pot,1.,use_physical=False))*vo/ro-truth[model]['B']) < 0.05, 'Oort A in Dehnen & Binney (1998) {} does not agree with paper value'.format(model)
return None
# Test that the virial setup of NFW works
def test_NFW_virialsetup_wrtmeanmatter():
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir-np.mvir(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)/10.**12.) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_NFW_virialsetup_wrtcrit():
H, Om, overdens, wrtcrit= 71., 0.32, 201., True
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir-np.mvir(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)/10.**12.) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_TriaxialNFW_virialsetup_wrtmeanmatter():
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
tnp= potential.TriaxialNFWPotential(b=0.3,c=0.7,
conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(np.a-tnp.a) < 10.**-10., "TriaxialNFWPotential virial setup's concentration does not work"
assert numpy.fabs(np._amp-tnp._amp*4.*numpy.pi*tnp.a**3) < 10.**-6., "TriaxialNFWPotential virial setup's virial mass does not work"
return None
def test_TriaxialNFW_virialsetup_wrtcrit():
H, Om, overdens, wrtcrit= 71., 0.32, 201., True
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
tnp= potential.TriaxialNFWPotential(b=0.3,c=0.7,
conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(np.a-tnp.a) < 10.**-10., "TriaxialNFWPotential virial setup's concentration does not work"
assert numpy.fabs(np._amp-tnp._amp*4.*numpy.pi*tnp.a**3) < 10.**-6., "TriaxialNFWPotential virial setup's virial mass does not work"
return None
# Test that setting up an NFW potential with rmax,vmax works as expected
def test_NFW_rmaxvmaxsetup():
rmax, vmax= 1.2, 3.23
np= potential.NFWPotential(rmax=rmax,vmax=vmax)
assert numpy.fabs(np.rmax()-rmax) < 10.**-10., 'NFWPotential setup with rmax,vmax does not work as expected'
assert numpy.fabs(np.vmax()-vmax) < 10.**-10., 'NFWPotential setup with rmax,vmax does not work as expected'
return None
def test_conc_attributeerror():
pp= potential.PowerSphericalPotential(normalize=1.)
#This potential doesn't have a scale, so we cannot calculate the concentration
try: pp.conc(220.,8.)
except AttributeError: pass
else: raise AssertionError('conc function for potential w/o scale did not raise AttributeError')
return None
def test_mvir_attributeerror():
mp= potential.MiyamotoNagaiPotential(normalize=1.)
#Don't think I will ever implement the virial radius for this
try: mp.mvir(220.,8.)
except AttributeError: pass
else: raise AssertionError('mvir function for potential w/o rvir did not raise AttributeError')
return None
# Test that virial quantities are correctly computed when specifying a different (ro,vo) pair from Potential setup (see issue #290)
def test_NFW_virialquantities_diffrovo():
from galpy.util import conversion
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro_setup, vo_setup= 220., 8.
ros= [7.,8.,9.]
vos= [220.,230.,240.]
for ro,vo in zip(ros,vos):
np= potential.NFWPotential(amp=2.,a=3.,
ro=ro_setup,vo=vo_setup)
# Computing the overdensity in physical units
od= (np.mvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)\
/4./numpy.pi*3.\
/np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)**3.)\
*(10.**6./H**2.*8.*numpy.pi/3./Om*(4.302*10.**-6.))
assert numpy.fabs(od-overdens) < 0.1, "NFWPotential's virial quantities computed in physical units with different (ro,vo) from setup are incorrect"
od= (np.mvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit,use_physical=False)\
/4./numpy.pi*3.\
/np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit,use_physical=False)**3.)\
*conversion.dens_in_meanmatterdens(vo,ro,H=H,Om=Om)
assert numpy.fabs(od-overdens) < 0.01, "NFWPotential's virial quantities computed in internal units with different (ro,vo) from setup are incorrect"
# Also test concentration
assert numpy.fabs(np.conc(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)\
-np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)/np._scale/ro) < 0.01, "NFWPotential's concentration computed for different (ro,vo) from setup is incorrect"
return None
# Test that rmax and vmax are correctly determined for an NFW potential
def test_NFW_rmaxvmax():
# Setup with rmax,vmax
rmax, vmax= 1.2, 3.23
np= potential.NFWPotential(rmax=rmax,vmax=vmax)
# Now determine rmax and vmax numerically
rmax_opt= optimize.minimize_scalar(lambda r: -np.vcirc(r),
bracket=[0.01,100.])['x']
assert numpy.fabs(rmax_opt-rmax) < 10.**-7., \
'NFW rmax() function does not behave as expected'
assert numpy.fabs(np.vcirc(rmax_opt)-vmax) < 10.**-8., \
'NFW rmax() function does not behave as expected'
assert numpy.fabs(np.vcirc(rmax_opt)-np.vmax()) < 10.**-8., \
'NFW vmax() function does not behave as expected'
return None
def test_LinShuReductionFactor():
#Test that the LinShuReductionFactor is implemented correctly, by comparing to figure 1 in Lin & Shu (1966)
from galpy.potential import LinShuReductionFactor, \
LogarithmicHaloPotential, omegac, epifreq
lp= LogarithmicHaloPotential(normalize=1.) #work in flat rotation curve
#nu^2 = 0.2, x=4 for m=2,sigmar=0.1
# w/ nu = m(OmegaP-omegac)/epifreq, x=sr^2*k^2/epifreq^2
R,m,sr = 0.9,2.,0.1
tepi, tomegac= epifreq(lp,R), omegac(lp,R)
OmegaP= tepi*numpy.sqrt(0.2)/m+tomegac #leads to nu^2 = 0.2
k= numpy.sqrt(4.)*tepi/sr
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,m=m,k=k,OmegaP=OmegaP)-0.18) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#nu^2 = 0.8, x=10
OmegaP= tepi*numpy.sqrt(0.8)/m+tomegac #leads to nu^2 = 0.8
k= numpy.sqrt(10.)*tepi/sr
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,m=m,k=k,OmegaP=OmegaP)-0.04) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#Similar test, but using a nonaxiPot= input
from galpy.potential import SteadyLogSpiralPotential
sp= SteadyLogSpiralPotential(m=2.,omegas=OmegaP,alpha=k*R)
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,nonaxiPot=sp)-0.04) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#Test exception
try:
LinShuReductionFactor(lp,R,sr)
except IOError: pass
else: raise AssertionError("LinShuReductionFactor w/o nonaxiPot set or k=,m=,OmegaP= set did not raise IOError")
return None
def test_nemoaccname():
#There is no real good way to test this (I think), so I'm just testing to
#what I think is the correct output now to make sure this isn't
#accidentally changed
# Log
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert lp.nemo_accname() == 'LogPot', "Logarithmic potential's NEMO name incorrect"
# NFW
np= potential.NFWPotential(normalize=1.)
assert np.nemo_accname() == 'NFW', "NFW's NEMO name incorrect"
# Miyamoto-Nagai
mp= potential.MiyamotoNagaiPotential(normalize=1.)
assert mp.nemo_accname() == 'MiyamotoNagai', "MiyamotoNagai's NEMO name incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(normalize=1.)
assert pp.nemo_accname() == 'PowSphwCut', "Power-spherical potential w/ cuto-ff's NEMO name incorrect"
# MN3ExponentialDiskPotential
mp= potential.MN3ExponentialDiskPotential(normalize=1.)
assert mp.nemo_accname() == 'MiyamotoNagai+MiyamotoNagai+MiyamotoNagai', "MN3ExponentialDiskPotential's NEMO name incorrect"
# Plummer
pp= potential.PlummerPotential(normalize=1.)
assert pp.nemo_accname() == 'Plummer', "PlummerPotential's NEMO name incorrect"
# Hernquist
hp= potential.HernquistPotential(normalize=1.)
assert hp.nemo_accname() == 'Dehnen', "HernquistPotential's NEMO name incorrect"
return None
def test_nemoaccnamepars_attributeerror():
# Use BurkertPotential (unlikely that I would implement that one in NEMO soon)
bp= potential.BurkertPotential(normalize=1.)
try: bp.nemo_accname()
except AttributeError: pass
else:
raise AssertionError('nemo_accname for potential w/o accname does not raise AttributeError')
try: bp.nemo_accpars(220.,8.)
except AttributeError: pass
else:
raise AssertionError('nemo_accpars for potential w/o accname does not raise AttributeError')
return None
def test_nemoaccnames():
# Just test MWPotential2014 and a single potential
# MWPotential2014
assert potential.nemo_accname(potential.MWPotential2014) == 'PowSphwCut+MiyamotoNagai+NFW', "MWPotential2014's NEMO name is incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(normalize=1.)
assert potential.nemo_accname(pp) == 'PowSphwCut', "Power-spherical potential w/ cut-off's NEMO name incorrect"
return None
def test_nemoaccpars():
# Log
lp= potential.LogarithmicHaloPotential(amp=2.,core=3.,q=27.) #completely ridiculous, but tests scalings
vo, ro= 2., 3.
vo/= 1.0227121655399913
ap= lp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-8.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-729.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-1.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[4])-27.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
# Miyamoto-Nagai
mp= potential.MiyamotoNagaiPotential(amp=3.,a=2.,b=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= mp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(amp=3.,alpha=4.,rc=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= pp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
# NFW
np= potential.NFWPotential(amp=1./0.2162165954,a=1./16)
vo, ro= 3., 4.
vo/= 1.0227121655399913
ap= np.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "NFW's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-0.25) < 10.**-8., "NFW's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-12.0) < 10.**-8., "NFW's NEMO accpars incorrect"
# MN3ExponentialDiskPotential
mn= potential.MN3ExponentialDiskPotential(normalize=1.,hr=2.,hz=0.5)
vo, ro= 3., 4.
ap= mn.nemo_accpars(vo,ro).replace('#',',').split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[4])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[8])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
# Test ratios
assert numpy.fabs(float(ap[1])/float(ap[5])-mn._mn3[0]._amp/mn._mn3[1]._amp) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])/float(ap[9])-mn._mn3[0]._amp/mn._mn3[2]._amp) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])/float(ap[6])-mn._mn3[0]._a/mn._mn3[1]._a) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])/float(ap[10])-mn._mn3[0]._a/mn._mn3[2]._a) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])/float(ap[7])-1.) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])/float(ap[11])-1.) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
# Plummer
pp= potential.PlummerPotential(amp=3.,b=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= pp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Plummer's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "Plummer's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-45.0) < 10.**-8., "Plummer's NEMO accpars incorrect"
# Hernquist
hp= potential.HernquistPotential(amp=2.,a=1./4.)
vo, ro= 3., 4.
vo/= 1.0227121655399913
ap= hp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1.) < 10.**-8., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-9.*4) < 10.**-7., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-1.0) < 10.**-8., "Hernquist's NEMO accpars incorrect"
return None
def test_nemoaccparss():
# Just combine a few of the above ones
# Miyamoto + PowerSpherwCut
mp= potential.MiyamotoNagaiPotential(amp=3.,a=2.,b=5.)
pp= potential.PowerSphericalPotentialwCutoff(amp=3.,alpha=4.,rc=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= potential.nemo_accpars(mp,vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
# PowSpherwCut
ap= potential.nemo_accpars(pp,vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
# Combined
apc= potential.nemo_accpars([mp,pp],vo,ro).split('#')
ap= apc[0].split(',') # should be MN
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
ap= apc[1].split(',') # should be PP
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
return None
def test_MN3ExponentialDiskPotential_inputs():
#Test the inputs of the MN3ExponentialDiskPotential
# IOError for hz so large that b is negative
try:
mn= potential.MN3ExponentialDiskPotential(amp=1.,hz=50.)
except IOError: pass
else:
raise AssertionError("MN3ExponentialDiskPotential with ridiculous hz should have given IOError, but didn't")
# Warning when b/Rd > 3 or (b/Rd > 1.35 and posdens)
#Turn warnings into errors to test for them
import warnings
from galpy.util import galpyWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
mn= MN3ExponentialDiskPotential(normalize=1.,hz=1.438,hr=1.)
# Should raise warning bc of MN3ExponentialDiskPotential,
# might raise others
raisedWarning= False
for wa in w:
raisedWarning= ('MN3ExponentialDiskPotential' in str(wa.message))
if raisedWarning: break
assert raisedWarning, "MN3ExponentialDiskPotential w/o posdens, but with b/Rd > 3 did not raise galpyWarning"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
mn= MN3ExponentialDiskPotential(normalize=1.,hr=1.,hz=0.7727,
posdens=True)
raisedWarning= False
for wa in w:
raisedWarning= ('MN3ExponentialDiskPotential' in str(wa.message))
if raisedWarning: break
assert raisedWarning, "MN3ExponentialDiskPotential w/o posdens, but with b/Rd > 1.35 did not raise galpyWarning"
return None
def test_MN3ExponentialDiskPotential_hz():
#Test that we correctly convert from hz/Rd to b/Rd
# exp
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=1.,sech=False)
assert numpy.fabs(mn._brd-1.875) < 0.05, "b/Rd not computed correctly for exponential profile"
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=2.,hz=1.,sech=False)
assert numpy.fabs(mn._brd-0.75) < 0.05, "b/Rd not computed correctly for exponential profile"
# sech
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=2.,sech=True)
assert numpy.fabs(mn._brd-2.1) < 0.05, "b/Rd not computed correctly for sech^2 profile"
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=2.,hz=2.,sech=True)
assert numpy.fabs(mn._brd-0.9) < 0.05, "b/Rd not computed correctly for sech^2 profile"
return None
def test_MN3ExponentialDiskPotential_approx():
# Test that the 3MN approximation works to the advertised level
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(4.,5.*.001)
assert numpy.fabs(mn.mass(4.,5.*.001)-dpmass)/dpmass < 0.005, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,10.*0.6)-dpmass)/dpmass < 0.01, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness w/ sech
mn= potential.MN3ExponentialDiskPotential(amp=.5,hr=1.,hz=1.24,sech=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,20.*0.6)-dpmass)/dpmass < 0.01, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# At 10 Rd
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(10.,5.*.001)
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness w/ sech
mn= potential.MN3ExponentialDiskPotential(amp=0.5,hr=1.,hz=1.24,sech=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,20.*0.6)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# For posdens the deviations are larger
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(4.,5.*.001)
assert numpy.fabs(mn.mass(4.,5.*.001)-dpmass)/dpmass < 0.015, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,10.*0.6)-dpmass)/dpmass < 0.015, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# At 10 Rd
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(10.,5.*.001)
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass > 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass < 0.07, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass < 0.08, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass > 0.03, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
return None
def test_TwoPowerTriaxialPotential_vs_TwoPowerSphericalPotential():
# Test that TwoPowerTriaxialPotential with spherical parameters is the same
# as TwoPowerSphericalPotential
tol= -4. # tough general case
rs= numpy.linspace(0.001,25.,1001)
tnp= potential.TwoPowerTriaxialPotential(normalize=1.,b=1.,c=1.,a=1.5,
alpha=1.5,beta=3.5)
np= potential.TwoPowerSphericalPotential(normalize=1.,a=1.5,
alpha=1.5,beta=3.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for TwoPowerSphericalPotential and spherical version of TwoPowerTriaxialPotential'
# Also do specific cases
tol= -8. # much better
# Hernquist
tnp= potential.TriaxialHernquistPotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.HernquistPotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for Hernquist and spherical version of TriaxialHernquist'
# NFW
tnp= potential.TriaxialNFWPotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.NFWPotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for NFW and spherical version of TriaxialNFW'
# Jaffe
tnp= potential.TriaxialJaffePotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.JaffePotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for Jaffe and spherical version of TriaxialJaffe'
return None
# Test that TwoPowerTriaxial setup raises an error for bad values of alpha
# and beta
def test_TwoPowerTriaxialPotential_alphahigherror():
with pytest.raises(IOError) as excinfo:
dummy= potential.TwoPowerTriaxialPotential(alpha=3.5)
return None
def test_TwoPowerTriaxialPotential_betalowerror():
with pytest.raises(IOError) as excinfo:
dummy= potential.TwoPowerTriaxialPotential(beta=1.)
return None
# Test that DehnenSphericalPotential setup raises an error for bad values of alpha
def test_DehnenSphericalPotential_alphalowhigherror():
with pytest.raises(IOError) as excinfo:
dummy= potential.DehnenSphericalPotential(alpha=-.5)
with pytest.raises(IOError) as excinfo:
dummy= potential.DehnenSphericalPotential(alpha=3.5)
return None
# Test that FerrersPotential raises a value error for n < 0
def test_FerrersPotential_nNegative():
with pytest.raises(ValueError) as excinfo:
dummy= potential.FerrersPotential(n=-1.)
return None
# Test that SphericalShellPotential raises a value error for normalize=True and a > 1
def test_SphericalShellPotential_normalizer0():
with pytest.raises(ValueError) as excinfo:
dummy= potential.SphericalShellPotential(normalize=1.,a=2.)
return None
# Test that RingPotential raises a value error for normalize=True and a > 1
def test_RingPotential_normalizer0():
with pytest.raises(ValueError) as excinfo:
dummy= potential.RingPotential(normalize=1.,a=2.)
return None
def test_planeRotatedNFWPotential():
# Test that the rotation according to pa works as expected
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,b=0.5,
pa=30./180.*numpy.pi)
# Compute the potential at a fixed radius, minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
pot= numpy.array([tnp(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Also do a negative angle
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,b=0.5,
pa=-60./180.*numpy.pi)
# Compute the potential at a fixed radius, minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
pot= numpy.array([tnp(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
return None
def test_zaxisRotatedNFWPotential():
from galpy.util import coords
# Test that the rotation according to zvec works as expected
pa= 30./180.*numpy.pi
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,c=0.5,
zvec=[0.,-numpy.sin(pa),numpy.cos(pa)])
# Compute the potential at a fixed radius in the y/z plane,
# minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
xs= numpy.zeros_like(phis)
ys= Rs*numpy.cos(phis)
zs= Rs*numpy.sin(phis)
tR,tphi,tz= coords.rect_to_cyl(xs,ys,zs)
pot= numpy.array([tnp(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Another one
pa= -60./180.*numpy.pi
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,c=0.5,
zvec=[-numpy.sin(pa),0.,numpy.cos(pa)])
# Compute the potential at a fixed radius in the z/z plane,
# minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
xs= Rs*numpy.cos(phis)
ys= numpy.zeros_like(phis)
zs= Rs*numpy.sin(phis)
tR,tphi,tz= coords.rect_to_cyl(xs,ys,zs)
pot= numpy.array([tnp(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
return None
def test_nonaxierror_function():
# Test that the code throws an exception when calling a non-axisymmetric
# potential without phi
tnp= potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.9)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatePotentials(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateDensities(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatezforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatephiforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateR2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatez2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRzderivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatephi2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRphiderivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluaterforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluater2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateSurfaceDensities(tnp,1.,0.1)
return None
def test_SoftenedNeedleBarPotential_density():
# Some simple tests of the density of the SoftenedNeedleBarPotential
# For a spherical softening kernel, density should be symmetric to y/z
sbp= potential.SoftenedNeedleBarPotential(normalize=1.,a=1.,c=.1,b=0.,
pa=0.)
assert numpy.fabs(sbp.dens(2.,0.,phi=numpy.pi/4.)-sbp.dens(numpy.sqrt(2.),numpy.sqrt(2.),phi=0.)) < 10.**-13., 'SoftenedNeedleBarPotential with spherical softening kernel does not appear to have a spherically symmetric density'
# Another one
assert numpy.fabs(sbp.dens(4.,0.,phi=numpy.pi/4.)-sbp.dens(2.*numpy.sqrt(2.),2.*numpy.sqrt(2.),phi=0.)) < 10.**-13., 'SoftenedNeedleBarPotential with spherical softening kernel does not appear to have a spherically symmetric density'
# For a flattened softening kernel, the density at (y,z) should be higher than at (z,y)
sbp= potential.SoftenedNeedleBarPotential(normalize=1.,a=1.,c=.1,b=0.3,
pa=0.)
assert sbp.dens(2.,0.,phi=numpy.pi/4.) > sbp.dens(numpy.sqrt(2.),numpy.sqrt(2.),phi=0.), 'SoftenedNeedleBarPotential with flattened softening kernel does not appear to have a consistent'
# Another one
assert sbp.dens(4.,0.,phi=numpy.pi/4.) > sbp.dens(2.*numpy.sqrt(2.),2.*numpy.sqrt(2.),phi=0.), 'SoftenedNeedleBarPotential with flattened softening kernel does not appear to have a consistent'
return None
def test_DiskSCFPotential_SigmaDerivs():
# Test that the derivatives of Sigma are correctly implemented in DiskSCF
# Very rough finite difference checks
dscfp= potential.DiskSCFPotential(dens=lambda R,z: 1.,# doesn't matter
Sigma=[{'type':'exp','h':1./3.,'amp':1.},
{'type':'expwhole','h':1./3.,
'amp':1.,'Rhole':0.5}],
hz=[{'type':'exp','h':1./27.},
{'type':'sech2','h':1./27.}],
a=1.,N=2,L=2)
# Sigma exp
testRs= numpy.linspace(0.3,1.5,101)
dR= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Sigma[0](testRs+dR)-dscfp._Sigma[0](testRs))/dR-dscfp._dSigmadR[0](testRs))/dscfp._dSigmadR[0](testRs)) < 10.**-7.), "Derivative dSigmadR does not agree with finite-difference derivative of Sigma for exponential profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dSigmadR[0](testRs+dR)-dscfp._dSigmadR[0](testRs))/dR-dscfp._d2SigmadR2[0](testRs))/dscfp._d2SigmadR2[0](testRs)) < 10.**-7.), "Derivative d2SigmadR2 does not agree with finite-difference derivative of dSigmadR for exponential profile in DiskSCFPotential"
# Sigma expwhole
dR= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Sigma[1](testRs+dR)-dscfp._Sigma[1](testRs))/dR-dscfp._dSigmadR[1](testRs))/dscfp._dSigmadR[1](testRs)) < 10.**-4.), "Derivative dSigmadR does not agree with finite-difference derivative of Sigma for exponential-with-hole profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dSigmadR[1](testRs+dR)-dscfp._dSigmadR[1](testRs))/dR-dscfp._d2SigmadR2[1](testRs))/dscfp._d2SigmadR2[1](testRs)) < 10.**-4.), "Derivative d2SigmadR2 does not agree with finite-difference derivative of dSigmadR for exponential-with-hole profile in DiskSCFPotential"
return None
def test_DiskSCFPotential_verticalDerivs():
# Test that the derivatives of Sigma are correctly implemented in DiskSCF
# Very rough finite difference checks
dscfp= potential.DiskSCFPotential(dens=lambda R,z: 1.,# doesn't matter
Sigma=[{'type':'exp','h':1./3.,'amp':1.},
{'type':'expwhole','h':1./3.,
'amp':1.,'Rhole':0.5}],
hz=[{'type':'exp','h':1./27.},
{'type':'sech2','h':1./27.}],
a=1.,N=2,L=2)
# Vertical exp
testzs= numpy.linspace(0.1/27.,3./27,101)
dz= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Hz[0](testzs+dz)-dscfp._Hz[0](testzs))/dz-dscfp._dHzdz[0](testzs))/dscfp._dHzdz[0](testzs)) < 10.**-5.5), "Derivative dHzdz does not agree with finite-difference derivative of Hz for exponential profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dHzdz[0](testzs+dz)-dscfp._dHzdz[0](testzs))/dz-dscfp._hz[0](testzs))/dscfp._hz[0](testzs)) < 10.**-6.), "Derivative hz does not agree with finite-difference derivative of dHzdz for exponential profile in DiskSCFPotential"
# Vertical sech^2
dz= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Hz[1](testzs+dz)-dscfp._Hz[1](testzs))/dz-dscfp._dHzdz[1](testzs))/dscfp._dHzdz[1](testzs)) < 10.**-5.5), "Derivative dSigmadz does not agree with finite-difference derivative of Sigma for sech2 profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dHzdz[1](testzs+dz)-dscfp._dHzdz[1](testzs))/dz-dscfp._hz[1](testzs))/dscfp._hz[1](testzs)) < 10.**-6.), "Derivative hz does not agree with finite-difference derivative of dHzdz for sech2 profile in DiskSCFPotential"
return None
def test_DiskSCFPotential_nhzNeqnsigmaError():
with pytest.raises(ValueError) as excinfo:
dummy= potential.DiskSCFPotential(\
dens=lambda R,z: numpy.exp(-3.*R)\
*1./numpy.cosh(z/2.*27.)**2./4.*27.,
Sigma={'h': 1./3.,
'type': 'exp', 'amp': 1.0},
hz=[{'type':'sech2','h':1./27.},{'type':'sech2','h':1./27.}],
a=1.,N=5,L=5)
return None
def test_DiskSCFPotential_againstDoubleExp():
# Test that the DiskSCFPotential approx. of a dbl-exp disk agrees with
# DoubleExponentialDiskPotential
dp= potential.DoubleExponentialDiskPotential(amp=13.5,hr=1./3.,hz=1./27.)
dscfp= potential.DiskSCFPotential(dens=lambda R,z: dp.dens(R,z),
Sigma_amp=1.,
Sigma=lambda R: numpy.exp(-3.*R),
dSigmadR=lambda R: -3.*numpy.exp(-3.*R),
d2SigmadR2=lambda R: 9.*numpy.exp(-3.*R),
hz={'type':'exp','h':1./27.},
a=1.,N=10,L=10)
testRs= numpy.linspace(0.3,1.5,101)
testzs= numpy.linspace(0.1/27.,3./27,101)
testR= 0.9*numpy.ones_like(testzs)
testz= 1.5/27.*numpy.ones_like(testRs)
# Test potential
assert numpy.all(numpy.fabs((dp(testRs,testz)-dscfp(testRs,testz))/dscfp(testRs,testz)) < 10.**-2.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
assert numpy.all(numpy.fabs((dp(testR,testzs)-dscfp(testR,testzs))/dscfp(testRs,testz)) < 10.**-2.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# Rforce
assert numpy.all(numpy.fabs((numpy.array([dp.Rforce(r,z) for (r,z) in zip(testRs,testz)])-dscfp.Rforce(testRs,testz))/dscfp.Rforce(testRs,testz)) < 10.**-2.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
assert numpy.all(numpy.fabs((numpy.array([dp.Rforce(r,z) for (r,z) in zip(testR,testzs)])-dscfp.Rforce(testR,testzs))/dscfp.Rforce(testRs,testz)) < 10.**-2.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# zforce
assert numpy.all(numpy.fabs((numpy.array([dp.zforce(r,z) for (r,z) in zip(testRs,testz)])-dscfp.zforce(testRs,testz))/dscfp.zforce(testRs,testz)) < 10.**-1.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# Following has rel. large difference at high z
assert numpy.all(numpy.fabs((numpy.array([dp.zforce(r,z) for (r,z) in zip(testR,testzs)])-dscfp.zforce(testR,testzs))/dscfp.zforce(testRs,testz)) < 10.**-1.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
return None
def test_DiskSCFPotential_againstDoubleExp_dens():
# Test that the DiskSCFPotential approx. of a dbl-exp disk agrees with
# DoubleExponentialDiskPotential
dp= potential.DoubleExponentialDiskPotential(amp=13.5,hr=1./3.,hz=1./27.)
dscfp= potential.DiskSCFPotential(dens=lambda R,z: dp.dens(R,z),
Sigma={'type':'exp','h':1./3.,'amp':1.},
hz={'type':'exp','h':1./27.},
a=1.,N=10,L=10)
testRs= numpy.linspace(0.3,1.5,101)
testzs= numpy.linspace(0.1/27.,3./27,101)
testR= 0.9*numpy.ones_like(testzs)
testz= 1.5/27.*numpy.ones_like(testRs)
# Test density
assert numpy.all(numpy.fabs((dp.dens(testRs,testz)-dscfp.dens(testRs,testz))/dscfp.dens(testRs,testz)) < 10.**-1.25), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# difficult at high z
assert numpy.all(numpy.fabs((dp.dens(testR,testzs)-dscfp.dens(testR,testzs))/dscfp.dens(testRs,testz)) < 10.**-1.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
return None
def test_WrapperPotential_dims():
# Test that WrapperPotentials get assigned to Potential/planarPotential
# correctly, based on input pot=
from galpy.potential.WrapperPotential import parentWrapperPotential, \
WrapperPotential, planarWrapperPotential
dp= potential.DehnenBarPotential()
# 3D pot should be Potential, Wrapper, parentWrapper, not planarX
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert isinstance(dwp,potential.Potential), 'WrapperPotential for 3D pot= is not an instance of Potential'
assert not isinstance(dwp,potential.planarPotential), 'WrapperPotential for 3D pot= is an instance of planarPotential'
assert isinstance(dwp,parentWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of parentWrapperPotential'
assert isinstance(dwp,WrapperPotential), 'WrapperPotential for 3D pot= is not an instance of WrapperPotential'
assert not isinstance(dwp,planarWrapperPotential), 'WrapperPotential for 3D pot= is an instance of planarWrapperPotential'
# 2D pot should be Potential, Wrapper, parentWrapper, not planarX
dwp= potential.DehnenSmoothWrapperPotential(pot=dp.toPlanar())
assert isinstance(dwp,potential.planarPotential), 'WrapperPotential for 3D pot= is not an instance of planarPotential'
assert not isinstance(dwp,potential.Potential), 'WrapperPotential for 3D pot= is an instance of Potential'
assert isinstance(dwp,parentWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of parentWrapperPotential'
assert isinstance(dwp,planarWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of planarWrapperPotential'
assert not isinstance(dwp,WrapperPotential), 'WrapperPotential for 3D pot= is an instance of WrapperPotential'
return None
def test_Wrapper_potinputerror():
# Test that setting up a WrapperPotential with anything other than a
# (list of) planar/Potentials raises an error
with pytest.raises(ValueError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=1)
return None
def test_Wrapper_incompatibleunitserror():
# Test that setting up a WrapperPotential with a potential with
# incompatible units to the wrapper itself raises an error
# 3D
ro,vo= 8., 220.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=ro,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo*1.1)
# 2D
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo).toPlanar()
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=ro,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo*1.1)
return None
def test_WrapperPotential_unittransfer_3d():
# Test that units are properly transferred between a potential and its
# wrapper
from galpy.util import conversion
ro,vo= 9., 230.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly tranferred to wrapper when wrapping a potential with ro set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly tranferred to wrapper when wrapping a potential with vo set"
# Just set ro
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert not hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly tranferred to wrapper when wrapping a potential with ro set"
# Just set vo
hp= potential.HernquistPotential(amp=0.55,a=1.3,vo=vo)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert not hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly tranferred to wrapper when wrapping a potential with vo set"
return None
def test_WrapperPotential_unittransfer_2d():
# Test that units are properly transferred between a potential and its
# wrapper
from galpy.util import conversion
ro,vo= 9., 230.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly tranferred to wrapper when wrapping a potential with ro set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly tranferred to wrapper when wrapping a potential with vo set"
# Just set ro
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert not hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly tranferred to wrapper when wrapping a potential with ro set"
# Just set vo
hp= potential.HernquistPotential(amp=0.55,a=1.3,vo=vo).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert not hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly tranferred to wrapper when wrapping a potential with vo set"
return None
def test_WrapperPotential_serialization():
import pickle
from galpy.potential.WrapperPotential import WrapperPotential
dp= potential.DehnenBarPotential()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
pickled_dwp= pickle.dumps(dwp)
unpickled_dwp= pickle.loads(pickled_dwp)
assert isinstance(unpickled_dwp,WrapperPotential), 'Deserialized WrapperPotential is not an instance of WrapperPotential'
testRs= numpy.linspace(0.1,1,100)
testzs= numpy.linspace(-1,1,100)
testphis= numpy.linspace(0,2*numpy.pi,100)
testts= numpy.linspace(0,1,100)
for R,z,phi,t in zip(testRs,testzs,testphis,testts):
assert dwp(R,z,phi,t) == unpickled_dwp(R,z,phi,t), 'Deserialized WrapperPotential does not agree with original WrapperPotential'
def test_WrapperPotential_print():
dp= potential.DehnenBarPotential()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert print(dwp) is None, 'Printing a 3D wrapper potential fails'
dp= potential.DehnenBarPotential().toPlanar()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert print(dwp) is None, 'Printing a 2D wrapper potential fails'
return None
def test_dissipative_ignoreInPotentialDensity2ndDerivs():
# Test that dissipative forces are ignored when they are included in lists
# given to evaluatePotentials, evaluateDensities, and evaluate2ndDerivs
lp= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=lp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z= 2.,0.4
assert numpy.fabs(potential.evaluatePotentials([lp,cdfc],R,z,phi=1.)-potential.evaluatePotentials([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatePotentials'
assert numpy.fabs(potential.evaluateDensities([lp,cdfc],R,z,phi=1.)-potential.evaluateDensities([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateDensities'
assert numpy.fabs(potential.evaluateR2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluateR2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateR2derivs'
assert numpy.fabs(potential.evaluatez2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluatez2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatez2derivs'
assert numpy.fabs(potential.evaluateRzderivs([lp,cdfc],R,z,phi=1.)-potential.evaluateRzderivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateRzderivs'
assert numpy.fabs(potential.evaluatephi2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluatephi2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatephi2derivs'
assert numpy.fabs(potential.evaluateRphiderivs([lp,cdfc],R,z,phi=1.)-potential.evaluateRphiderivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateRphiderivs'
assert numpy.fabs(potential.evaluater2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluater2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluater2derivs'
return None
def test_dissipative_noVelocityError():
# Test that calling evaluateXforces for a dissipative potential
# without including velocity produces an error
lp= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=lp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z,phi= 2.,0.4,1.1
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluateRforces([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluatephiforces([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluatezforces([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluaterforces([lp,cdfc],R,z,phi=phi)
return None
def test_RingPotential_correctPotentialIntegral():
# Test that the RingPotential's potential is correct, by comparing it to a
# direct integral solution of the Poisson equation
from scipy import special, integrate
# Direct solution
def pot(R,z,amp=1.,a=0.75):
return -amp\
*integrate.quad(lambda k: special.jv(0,k*R)*special.jv(0,k*a)*numpy.exp(-k*numpy.fabs(z)),0.,numpy.infty)[0]
rp= potential.RingPotential(amp=3.,a=0.75)
# Just check a bunch of (R,z)s; z=0 the direct integration doesn't work well, so we don't check that
Rs, zs= [1.2,1.2,0.2,0.2], [0.1,-1.1,-0.1,1.1]
for R,z in zip(Rs,zs):
assert numpy.fabs(pot(R,z,amp=3.)-rp(R,z)) < 1e-8, 'RingPotential potential evaluation does not agree with direct integration at (R,z) = ({},{})'.format(R,z)
return None
def test_DehnenSmoothWrapper_decay():
# Test that DehnenSmoothWrapperPotential with decay=True is the opposite
# of decay=False
lp= potential.LogarithmicHaloPotential(normalize=1.)
pot_grow= potential.DehnenSmoothWrapperPotential(pot=lp,tform=4.,
tsteady=3.)
pot_decay= potential.DehnenSmoothWrapperPotential(pot=lp,tform=4.,
tsteady=3.,decay=True)
ts= numpy.linspace(0.,10.,1001)
assert numpy.amax(numpy.fabs(lp(2.,0.,ts)-[pot_grow(2.,0.,t=t)+pot_decay(2.,0.,t=t) for t in ts])) < 1e-10, 'DehnenSmoothWrapper with decay=True is not the opposite of the same with decay=False'
assert numpy.amax(numpy.fabs(lp.Rforce(2.,0.,ts)-[pot_grow.Rforce(2.,0.,t=t)+pot_decay.Rforce(2.,0.,t=t) for t in ts])) < 1e-10, 'DehnenSmoothWrapper with decay=True is not the opposite of the same with decay=False'
return None
def test_vtermnegl_issue314():
# Test related to issue 314: vterm for negative l
rp= potential.RazorThinExponentialDiskPotential(normalize=1.,hr=3./8.)
assert numpy.fabs(rp.vterm(0.5)+rp.vterm(-0.5)) < 10.**-8., 'vterm for negative l does not behave as expected'
return None
def test_Ferrers_Rzderiv_issue319():
# Test that the Rz derivative works for the FerrersPotential (issue 319)
fp= potential.FerrersPotential(normalize=1.)
from scipy.misc import derivative
rzderiv= fp.Rzderiv(0.5,0.2,phi=1.)
rzderiv_finitediff= derivative(lambda x: -fp.zforce(x,0.2,phi=1.),
0.5,dx=10.**-8.)
assert numpy.fabs(rzderiv-rzderiv_finitediff) < 10.**-7., 'Rzderiv for FerrersPotential does not agree with finite-difference calculation'
return None
def test_rtide():
#Test that rtide is being calculated properly in select potentials
lp=potential.LogarithmicHaloPotential()
assert abs(1.0-lp.rtide(1.,0.,M=1.0)/0.793700525984) < 10.**-12.,"Calculation of rtide in logaritmic potential fails"
pmass=potential.PlummerPotential(b=0.0)
assert abs(1.0-pmass.rtide(1.,0.,M=1.0)/0.693361274351) < 10.**-12., "Calculation of rtide in point-mass potential fails"
# Also test function interface
assert abs(1.0-potential.rtide([lp],1.,0.,M=1.0)/0.793700525984) < 10.**-12.,"Calculation of rtide in logaritmic potential fails"
pmass=potential.PlummerPotential(b=0.0)
assert abs(1.0-potential.rtide([pmass],1.,0.,M=1.0)/0.693361274351) < 10.**-12., "Calculation of rtide in point-mass potential fails"
return None
def test_rtide_noMError():
# Test the running rtide without M= input raises error
lp=potential.LogarithmicHaloPotential()
with pytest.raises(potential.PotentialError) as excinfo:
dummy= lp.rtide(1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.rtide([lp],1.,0.)
return None
def test_ttensor():
pmass= potential.KeplerPotential(normalize=1.)
tij=pmass.ttensor(1.0,0.0,0.0)
# Full tidal tensor here should be diag(2,-1,-1)
assert numpy.all(numpy.fabs(tij-numpy.diag([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test eigenvalues
tij=pmass.ttensor(1.0,0.0,0.0,eigenval=True)
assert numpy.all(numpy.fabs(tij-numpy.array([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test function interface
tij= potential.ttensor([pmass],1.0,0.0,0.0)
# Full tidal tensor here should be diag(2,-1,-1)
assert numpy.all(numpy.fabs(tij-numpy.diag([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test eigenvalues
tij= potential.ttensor([pmass],1.0,0.0,0.0,eigenval=True)
assert numpy.all(numpy.fabs(tij-numpy.array([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also Test symmetry when y!=0 and z!=0
tij= potential.ttensor([pmass],1.0,1.0,1.0)
assert numpy.all(numpy.fabs(tij[0][1]-tij[1][0]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
assert numpy.all(numpy.fabs(tij[0][2]-tij[2][0]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
assert numpy.all(numpy.fabs(tij[1][2]-tij[2][1]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
return None
def test_ttensor_trace():
# Test that the trace of the tidal tensor == 4piG density for a bunch of
# potentials
pots= [potential.KeplerPotential(normalize=1.),
potential.LogarithmicHaloPotential(normalize=3.,q=0.8),
potential.MiyamotoNagaiPotential(normalize=0.5,a=3.,b=0.5)]
R,z,phi= 1.3,-0.2,2.
for pot in pots:
assert numpy.fabs(numpy.trace(pot.ttensor(R,z,phi=phi))-4.*numpy.pi*pot.dens(R,z,phi=phi)), 'Trace of the tidal tensor not equal 4piG density'
# Also test a list
assert numpy.fabs(numpy.trace(potential.ttensor(potential.MWPotential2014,R,z,phi=phi))-4.*numpy.pi*potential.evaluateDensities(potential.MWPotential2014,R,z,phi=phi)), 'Trace of the tidal tensor not equal 4piG density'
return None
def test_ttensor_nonaxi():
# Test that computing the tidal tensor for a non-axi potential raises error
lp= potential.LogarithmicHaloPotential(normalize=1.,b=0.8,q=0.7)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= lp.ttensor(1.,0.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.ttensor(lp,1.,0.,0.)
return None
# Test that zvc_range returns the range over which the zvc is defined for a
# given E,Lz
def test_zvc_range():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
# Also one for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(pot,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(pot,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(pot,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
return None
# Test that we get [NaN,NaN] when there are no orbits for this combination of E and Lz
def test_zvc_range_undefined():
# Set up circular orbit at Rc, then ask for Lz > Lzmax(E)
Rc= 0.6653
E= potential.evaluatePotentials(potential.MWPotential2014,Rc,0.)\
+potential.vcirc(potential.MWPotential2014,Rc)**2./2.
Lzmax= Rc*potential.vcirc(potential.MWPotential2014,Rc)
assert numpy.all(numpy.isnan(potential.zvc_range(potential.MWPotential2014,E,Lzmax+1e-4))), 'zvc_range does not return [NaN,NaN] when no orbits exist at this combination of (E,Lz)'
return None
def test_zvc_at_rminmax():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
# Also for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
assert numpy.fabs(pot.zvc(Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(pot.zvc(Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
return None
def test_zvc():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
# Also for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
return None
# Test that zvc outside of zvc_range is NaN
def test_zvc_undefined():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmin-1e-4,E,Lz)), 'zvc at R < Rmin is not NaN'
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmax+1e-4,E,Lz)), 'zvc at R > Rmax is not NaN'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmin-1e-4,E,Lz)), 'zvc at R < Rmin is not NaN'
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmax+1e-4,E,Lz)), 'zvc at R > Rmax is not NaN'
return None
# Check that we get the correct ValueError if no solution can be found
def test_zvc_valueerror():
E, Lz= -1.25+100, 0.6
with pytest.raises(ValueError) as excinfo:
potential.zvc(potential.MWPotential2014,0.7,E+100,Lz)
return None
def test_rhalf():
# Test some known cases
a= numpy.pi
# Hernquist, r12= (1+sqrt(2))a
hp= potential.HernquistPotential(amp=1.,a=a)
assert numpy.fabs(hp.rhalf()-(1.+numpy.sqrt(2.))*a) < 1e-10, 'Half-mass radius of the Hernquist potential incorrect'
# DehnenSpherical, r12= a/(2^(1/(3-alpha)-1)
alpha= 1.34
hp= potential.DehnenSphericalPotential(amp=1.,a=a,alpha=alpha)
assert numpy.fabs(hp.rhalf()-a/(2**(1./(3.-alpha))-1.)) < 1e-10, 'Half-mass radius of the DehnenSpherical potential incorrect'
# Plummer, r12= b/sqrt(1/0.5^(2/3)-1)
pp= potential.PlummerPotential(amp=1.,b=a)
assert numpy.fabs(potential.rhalf(pp)-a/numpy.sqrt(0.5**(-2./3.)-1.)) < 1e-10, 'Half-mass radius of the Plummer potential incorrect'
return None
def test_tdyn():
# Spherical: tdyn = 2piR/vc
a= numpy.pi
# Hernquist
hp= potential.HernquistPotential(amp=1.,a=a)
R= 1.4
assert numpy.fabs(hp.tdyn(R)-2.*numpy.pi*R/hp.vcirc(R)) < 1e-10, 'Dynamical time of the Hernquist potential incorrect'
# DehnenSpherical
alpha= 1.34
hp= potential.DehnenSphericalPotential(amp=1.,a=a,alpha=alpha)
assert numpy.fabs(potential.tdyn(hp,R)-2.*numpy.pi*R/hp.vcirc(R)) < 1e-10, 'Dynamical time of the DehnenSpherical potential incorrect'
# Axi, this approx. holds
hp= potential.MiyamotoNagaiPotential(amp=1.,a=a,b=a/5.)
R= 3.4
assert numpy.fabs(hp.tdyn(R)/(2.*numpy.pi*R/hp.vcirc(R))-1.) < 0.03, 'Dynamical time of the Miyamoto-Nagai potential incorrect'
return None
def test_NumericalPotentialDerivativesMixin():
# Test that the NumericalPotentialDerivativesMixin works as expected
def get_mixin_first_instance(cls,*args,**kwargs):
# Function to return instance of a class for Potential cls where
# the NumericalPotentialDerivativesMixin comes first, so all derivs
# are numerical (should otherwise always be used second!)
class NumericalPot(potential.NumericalPotentialDerivativesMixin,cls):
def __init__(self,*args,**kwargs):
potential.NumericalPotentialDerivativesMixin.__init__(self,
kwargs)
cls.__init__(self,*args,**kwargs)
return NumericalPot(*args,**kwargs)
# Function to check all numerical derivatives
def check_numerical_derivs(Pot,NumPot,tol=1e-6,tol2=1e-5):
# tol: tolerance for forces, tol2: tolerance for 2nd derivatives
# Check wide range of R,z,phi
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
# Forces
assert numpy.fabs((Pot.Rforce(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rforce(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rforce(Rs[ii],Zs[jj],phi=phis[kk])) < tol, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct Rforce'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.zforce(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.zforce(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.zforce(Rs[ii],Zs[jj],phi=phis[kk])**(Zs[jj] > 0.)) < tol, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct zforce'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.phiforce(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.phiforce(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.phiforce(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct phiforce'.format(Pot.__class__.__name__)
# Second derivatives
assert numpy.fabs((Pot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk])) < tol2, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct R2deriv'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk])) < tol2, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct z2deriv'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol2, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct phi2deriv'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk])**(Zs[jj] > 0.)) < tol2, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct Rzderiv'.format(Pot.__class__.__name__)
assert numpy.fabs((Pot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol2, 'NumericalPotentialDerivativesMixin applied to {} Potential does not give the correct Rphideriv'.format(Pot.__class__.__name__)
return None
# Now check some potentials
# potential.MiyamotoNagaiPotential
mp= potential.MiyamotoNagaiPotential(amp=1.,a=0.5,b=0.05)
num_mp= get_mixin_first_instance(potential.MiyamotoNagaiPotential,
amp=1.,a=0.5,b=0.05)
check_numerical_derivs(mp,num_mp)
# potential.DehnenBarPotential
dp= potential.DehnenBarPotential()
num_dp= get_mixin_first_instance(potential.DehnenBarPotential)
check_numerical_derivs(dp,num_dp)
return None
# Test that we don't get the "FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated" numpy warning for the SCF potential; issue #347
def test_scf_tupleindexwarning():
import warnings
with warnings.catch_warnings(record=True):
warnings.simplefilter("error",FutureWarning)
p= mockSCFZeeuwPotential()
p.Rforce(1.,0.)
# another one reported by Nil, now problem is with array input
with warnings.catch_warnings(record=True):
warnings.simplefilter("error",FutureWarning)
p= mockSCFZeeuwPotential()
p.Rforce(numpy.atleast_1d(1.),numpy.atleast_1d(0.))
return None
# Test that attempting to multiply or divide a potential by something other than a number raises an error
def test_mult_divide_error():
# 3D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
return None
# Test that arithmetically adding potentials returns lists of potentials
def test_add_potentials():
assert potential.MWPotential2014 == potential.MWPotential2014[0]+potential.MWPotential2014[1]+potential.MWPotential2014[2], 'Potential addition of components of MWPotential2014 does not give MWPotential2014'
# 3D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1)
pot3= potential.HernquistPotential(normalize=0.4,a=0.1)
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
# 2D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1).toPlanar()
pot3= potential.HernquistPotential(normalize=0.4,a=0.1).toPlanar()
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
# 1D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1).toVertical(1.1)
pot3= potential.HernquistPotential(normalize=0.4,a=0.1).toVertical(1.1)
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
return None
# Test that attempting to multiply or divide a potential by something other
# than a number raises a TypeError (test both left and right)
def test_add_potentials_error():
# 3D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
return None
# Test that adding potentials with incompatible unit systems raises an error
def test_add_potentials_unitserror():
# 3D
ro, vo= 8., 220.
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo)
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo)
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1)
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo).toPlanar()
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo).toPlanar()
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1).toPlanar()
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1).toPlanar()
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo).toVertical(1.1)
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo).toVertical(1.1)
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1).toVertical(1.1)
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1).toVertical(1.1)
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
return None
# Test unit handling of interpolated Spherical potentials
def test_interSphericalPotential_unithandling():
pot= potential.HernquistPotential(amp=1.,a=2.,ro=8.3,vo=230.)
# Test that setting up the interpolated potential with inconsistent units
# raises a RuntimeError
with pytest.raises(RuntimeError):
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201),ro=7.5)
with pytest.raises(RuntimeError):
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201),vo=210.)
# Check that units are properly transferred
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201))
assert ipot._roSet, 'ro of interpSphericalPotential not set, even though that of parent was set'
assert ipot._ro == pot._ro, 'ro of interpSphericalPotential does not agree with that of the parent potential'
assert ipot._voSet, 'vo of interpSphericalPotential not set, even though that of parent was set'
assert ipot._vo == pot._vo, 'vo of interpSphericalPotential does not agree with that of the parent potential'
return None
# Test that the amplitude of the isothermal disk potential is set correctly (issue #400)
def test_isodisk_amplitude_issue400():
# Morgan's example
z= numpy.linspace(-0.1,0.1,10001)
pot= potential.IsothermalDiskPotential(amp=0.1,sigma=20.5/220.)
# Density at z=0 should be 0.1, no density or 2nd deriv for 1D at this
# point, so manually compute
z= numpy.linspace(-2e-4,2e-4,5)
dens_at_0= 1./(numpy.pi*4)*numpy.gradient(numpy.gradient(pot(z),z),z)[2]
assert numpy.fabs(dens_at_0-0.1) < 1e-7, 'Density at z=0 for IsothermalDiskPotential is not correct'
return None
def test_plotting():
import tempfile
#Some tests of the plotting routines, to make sure they don't fail
kp= potential.KeplerPotential(normalize=1.)
#Plot the rotation curve
kp.plotRotcurve()
kp.toPlanar().plotRotcurve() #through planar interface
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=None)
potential.plotRotcurve([kp])
potential.plotRotcurve([kp],Rrange=[0.01,10.],
grid=101,
savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the escape-velocity curve
kp.plotEscapecurve()
kp.toPlanar().plotEscapecurve() #Through planar interface
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=None)
potential.plotEscapecurve([kp])
potential.plotEscapecurve([kp],Rrange=[0.01,10.],
grid=101,
savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the potential itself
kp.plot()
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,xy=True,
xrange=[0.01,1.8],yrange=[-0.55,0.55],justcontours=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,
xrange=[0.01,1.8],yrange=[-0.55,0.55],
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,
xrange=[0.01,1.8],yrange=[-0.55,0.55],
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotPotentials([kp])
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
potential.plotPotentials([kp],
rmin=0.01,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
justcontours=True,xy=True,
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
potential.plotPotentials([kp],t=1.,
rmin=0.01,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the effective potential
kp.plot()
kp.plot(effective=True,Lz=1.)
try:
kp.plot(effective=True,Lz=None)
except RuntimeError:
print("Here")
pass
else:
raise AssertionError("Potential.plot with effective=True, but Lz=None did not return a RuntimeError")
potential.plotPotentials([kp],effective=True,Lz=1.)
try:
potential.plotPotentials([kp],effective=True,Lz=None)
except RuntimeError:
pass
else:
raise AssertionError("Potential.plot with effective=True, but Lz=None did not return a RuntimeError")
#Plot the density of a LogarithmicHaloPotential
lp= potential.LogarithmicHaloPotential(normalize=1.)
lp.plotDensity()
lp.plotDensity(t=1.,rmin=0.05,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
aspect=1.,log=True,justcontours=True,xy=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lp.plotDensity(savefilename=tmp_savefilename)
#Then plot using the saved file
lp.plotDensity(savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotDensities([lp])
potential.plotDensities([lp],t=1.,
rmin=0.05,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
aspect=1.,log=True,xy=True,
justcontours=True,
ncontours=11,savefilename=None)
#Plot the surface density of a LogarithmicHaloPotential
lp= potential.LogarithmicHaloPotential(normalize=1.)
lp.plotSurfaceDensity()
lp.plotSurfaceDensity(t=1.,z=2.,xmin=0.05,xmax=1.8,nxs=11,
ymin=-0.55,ymax=0.55,nys=11,
aspect=1.,log=True,justcontours=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lp.plotSurfaceDensity(savefilename=tmp_savefilename)
#Then plot using the saved file
lp.plotSurfaceDensity(savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotSurfaceDensities([lp])
potential.plotSurfaceDensities([lp],t=1.,z=2.,
xmin=0.05,xmax=1.8,nxs=11,
ymin=-0.55,ymax=0.55,nys=11,
aspect=1.,log=True,
justcontours=True,
ncontours=11,savefilename=None)
#Plot the potential itself for a 2D potential
kp.toPlanar().plot()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.toPlanar().plot(Rrange=[0.01,1.8],grid=11,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.toPlanar().plot(Rrange=[0.01,1.8],grid=11,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
dp= potential.EllipticalDiskPotential()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
dp.plot(xrange=[0.01,1.8],yrange=[0.01,1.8],gridx=11,gridy=11,
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
dp.plot(xrange=[0.01,1.8],yrange=[0.01,1.8],gridx=11,gridy=11,
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotplanarPotentials([dp],gridx=11,gridy=11)
#Tests of linearPotential plotting
lip= potential.RZToverticalPotential(potential.MiyamotoNagaiPotential(normalize=1.),1.)
lip.plot()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lip.plot(t=0.,min=-15.,max=15,ns=21,savefilename=tmp_savefilename)
#Then plot using the saved file
lip.plot(t=0.,min=-15.,max=15,ns=21,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
potential.plotlinearPotentials(lip,t=0.,min=-15.,max=15,ns=21,
savefilename=tmp_savefilename)
#Then plot using the saved file
potential.plotlinearPotentials(lip,t=0.,min=-15.,max=15,ns=21,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
return None
#Classes for testing Integer TwoSphericalPotential and for testing special
# cases of some other potentials
from galpy.potential import TwoPowerSphericalPotential, \
MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, \
MWPotential, FlattenedPowerPotential,MN3ExponentialDiskPotential, \
TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, \
TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, \
FerrersPotential, DiskSCFPotential, SpiralArmsPotential, \
LogarithmicHaloPotential
class mockSphericalSoftenedNeedleBarPotential(SoftenedNeedleBarPotential):
def __init__(self):
SoftenedNeedleBarPotential.__init__(self,amp=1.,a=0.000001,b=0.,
c=10.,omegab=0.,pa=0.)
self.normalize(1.)
self.isNonAxi= False
return None
def _evaluate(self,R,z,phi=0.,t=0.):
if phi is None: phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
Tp, Tm= self._compute_TpTm(x,y,z)
return numpy.log((x-self._a+Tm)/(x+self._a+Tp))/2./self._a
class specialTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.5,beta=3.)
return None
class DehnenTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.5,beta=4.)
return None
class DehnenCoreTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=0,beta=4.)
return None
class HernquistTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=4.)
return None
class JaffeTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=2.,beta=4.)
return None
class NFWTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=3.)
return None
class specialPowerSphericalPotential(PowerSphericalPotential):
def __init__(self):
PowerSphericalPotential.__init__(self,amp=1.,alpha=2.)
return None
class specialMiyamotoNagaiPotential(MiyamotoNagaiPotential):
def __init__(self):
MiyamotoNagaiPotential.__init__(self,amp=1.,a=0.,b=0.1)
return None
class specialFlattenedPowerPotential(FlattenedPowerPotential):
def __init__(self):
FlattenedPowerPotential.__init__(self,alpha=0.)
return None
class specialMN3ExponentialDiskPotentialPD(MN3ExponentialDiskPotential):
def __init__(self):
MN3ExponentialDiskPotential.__init__(self,normalize=1.,posdens=True)
return None
class specialMN3ExponentialDiskPotentialSECH(MN3ExponentialDiskPotential):
def __init__(self):
MN3ExponentialDiskPotential.__init__(self,normalize=1.,sech=True)
return None
class BurkertPotentialNoC(BurkertPotential):
def __init__(self):
# Just to force not using C
BurkertPotential.__init__(self)
self.hasC= False
self.hasC_dxdv= False
return None
class oblateHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class oblateNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class oblatenoGLNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=.2,glorder=None)
return None
class oblateJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class prolateHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class prolateNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class prolateJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class rotatingSpiralArmsPotential(SpiralArmsPotential):
def __init__(self):
SpiralArmsPotential.__init__(self, omega=1.1)
class specialSpiralArmsPotential(SpiralArmsPotential):
def __init__(self):
SpiralArmsPotential.__init__(self, omega=1.3, N=4., Cs=[8./3./numpy.pi, 1./2., 8./15./numpy.pi])
class triaxialHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.4,c=0.6)
return None
class triaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=.2,c=1.8)
return None
class triaxialJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=0.4,c=0.7)
return None
class zRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)])
return None
class yRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
pa=0.2)
return None
class fullyRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)],
pa=0.2)
return None
class fullyRotatednoGLTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)],
pa=0.2,glorder=None)
return None
class triaxialLogarithmicHaloPotential(LogarithmicHaloPotential):
def __init__(self):
LogarithmicHaloPotential.__init__(self,normalize=1.,b=0.7,q=0.9,
core=0.5)
return None
def OmegaP(self):
return 0.
# Implementations through TwoPowerTriaxialPotential
class HernquistTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=4.,
b=0.3,c=1.8)
return None
class NFWTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=2.,alpha=1.,beta=3.,
b=1.3,c=0.8)
self.isNonAxi= True # to test planar-from-full
return None
class JaffeTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=5.,alpha=2.,beta=4.,
b=1.3,c=1.8)
return None
# Other DiskSCFPotentials
class sech2DiskSCFPotential(DiskSCFPotential):
def __init__(self):
DiskSCFPotential.__init__(self,
dens=lambda R,z: numpy.exp(-3.*R)\
*1./numpy.cosh(z/2.*27.)**2./4.*27.,
Sigma={'h': 1./3.,
'type': 'exp', 'amp': 1.0},
hz={'type':'sech2','h':1./27.},
a=1.,N=5,L=5)
return None
class expwholeDiskSCFPotential(DiskSCFPotential):
def __init__(self):
# Add a Hernquist potential because otherwise the density near the
# center is zero
from galpy.potential import HernquistPotential
hp= HernquistPotential(normalize=0.5)
DiskSCFPotential.__init__(self,\
dens=lambda R,z: 13.5*numpy.exp(-0.5/(R+10.**-10.)
-3.*R-numpy.fabs(z)*27.)
+hp.dens(R,z),
Sigma={'h': 1./3.,
'type': 'expwhole','amp': 1.0,
'Rhole':0.5},
hz={'type':'exp','h':1./27.},
a=1.,N=5,L=5)
return None
# Same as above, but specify type as 'exp' and give Rhole, to make sure that
# case is handled correctly
class altExpwholeDiskSCFPotential(DiskSCFPotential):
def __init__(self):
# Add a Hernquist potential because otherwise the density near the
# center is zero
from galpy.potential import HernquistPotential
hp= HernquistPotential(normalize=0.5)
DiskSCFPotential.__init__(self,\
dens=lambda R,z: 13.5*numpy.exp(-0.5/(R+10.**-10.)
-3.*R-numpy.fabs(z)*27.)
+hp.dens(R,z),
Sigma={'h': 1./3.,
'type': 'exp','amp': 1.0,
'Rhole':0.5},
hz={'type':'exp','h':1./27.},
a=1.,N=5,L=5)
return None
class nonaxiDiskSCFPotential(DiskSCFPotential):
def __init__(self):
thp= triaxialHernquistPotential()
DiskSCFPotential.__init__(self,\
dens= lambda R,z,phi: 13.5*numpy.exp(-3.*R)\
*numpy.exp(-27.*numpy.fabs(z))
+thp.dens(R,z,phi=phi),
Sigma_amp=[0.5,0.5],
Sigma=[lambda R: numpy.exp(-3.*R),
lambda R: numpy.exp(-3.*R)],
dSigmadR=[lambda R: -3.*numpy.exp(-3.*R),
lambda R: -3.*numpy.exp(-3.*R)],
d2SigmadR2=[lambda R: 9.*numpy.exp(-3.*R),
lambda R: 9.*numpy.exp(-3.*R)],
hz=lambda z: 13.5*numpy.exp(-27.
*numpy.fabs(z)),
Hz=lambda z: (numpy.exp(-27.*numpy.fabs(z))-1.
+27.*numpy.fabs(z))/54.,
dHzdz=lambda z: 0.5*numpy.sign(z)*\
(1.-numpy.exp(-27.*numpy.fabs(z))),
N=5,L=5)
return None
# An axisymmetric FerrersPotential
class mockAxisymmetricFerrersPotential(FerrersPotential):
def __init__(self):
FerrersPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class mockInterpRZPotential(interpRZPotential):
def __init__(self):
interpRZPotential.__init__(self,RZPot=MWPotential,
rgrid=(0.01,2.1,101),zgrid=(0.,0.26,101),
logR=True,
interpPot=True,interpRforce=True,
interpzforce=True,interpDens=True)
class mockSnapshotRZPotential(potential.SnapshotRZPotential):
def __init__(self):
# Test w/ equivalent of KeplerPotential: one mass
kp= potential.KeplerPotential(amp=1.)
s= pynbody.new(star=1)
s['mass']= 1./numpy.fabs(kp.Rforce(1.,0.)) #forces vc(1,0)=1
s['eps']= 0.
potential.SnapshotRZPotential.__init__(self,s)
class mockInterpSnapshotRZPotential(potential.InterpSnapshotRZPotential):
def __init__(self):
# Test w/ equivalent of KeplerPotential: one mass
kp= potential.KeplerPotential(amp=1.)
s= pynbody.new(star=1)
s['mass']= 1./numpy.fabs(kp.Rforce(1.,0.)) #forces vc(1,0)=1
s['eps']= 0.
potential.InterpSnapshotRZPotential.__init__(self,s,
rgrid=(0.01,2.,101),
zgrid=(0.,0.3,101),
logR=False,
interpPot=True,
zsym=True)
# Some special cases of 2D, non-axisymmetric potentials, to make sure they
# are covered; need 3 to capture all of the transient behavior
from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, \
EllipticalDiskPotential, SteadyLogSpiralPotential, \
TransientLogSpiralPotential, HenonHeilesPotential
class mockDehnenBarPotentialT1(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
tform=0.5,tsteady=0.5,
alpha=0.01,Af=0.04)
class mockDehnenBarPotentialTm1(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.6,
barphi=25.*numpy.pi/180.,beta=0.,
tform=-1.,tsteady=1.01,
alpha=0.01,Af=0.04)
class mockDehnenBarPotentialTm5(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
tform=-5.,tsteady=2.,
alpha=0.01,Af=0.04)
class mockCosmphiDiskPotentialnegcp(CosmphiDiskPotential):
def __init__(self):
CosmphiDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,phio=0.01,m=1.,rb=0.9,
cp=-0.05,sp=0.05)
class mockCosmphiDiskPotentialnegp(CosmphiDiskPotential):
def __init__(self):
CosmphiDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=-1.,phio=0.01,m=1.,
cp=-0.05,sp=0.05)
class mockEllipticalDiskPotentialT1(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=0.5,tsteady=1.,
cp=0.05,sp=0.05)
class mockEllipticalDiskPotentialTm1(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=-1.,tsteady=None,
cp=-0.05,sp=0.05)
class mockEllipticalDiskPotentialTm5(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=-5.,tsteady=-1.,
cp=-0.05,sp=0.05)
class mockSteadyLogSpiralPotentialT1(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=0.5,tsteady=1.)
class mockSteadyLogSpiralPotentialTm1(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=-1.,tsteady=None)
class mockSteadyLogSpiralPotentialTm5(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=-1.,tsteady=-5.)
class mockTransientLogSpiralPotential(TransientLogSpiralPotential):
def __init__(self):
TransientLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3)
##Potentials used for mock SCF
def rho_Zeeuw(R, z=0., phi=0., a=1.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
return 3./(4*numpy.pi) * numpy.power((a + r),-4.) * a
def axi_density1(R, z=0, phi=0.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
h = potential.HernquistPotential()
return h.dens(R, z, phi)*(1 + numpy.cos(theta) + numpy.cos(theta)**2.)
def axi_density2(R, z=0, phi=0.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
return rho_Zeeuw(R,z,phi)*(1 +numpy.cos(theta) + numpy.cos(theta)**2)
def scf_density(R, z=0, phi=0.):
eps = .1
return axi_density2(R,z,phi)*(1 + eps*(numpy.cos(phi) + numpy.sin(phi)))
##Mock SCF class
class mockSCFZeeuwPotential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_spherical(rho_Zeeuw,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFNFWPotential(potential.SCFPotential):
def __init__(self):
nfw = potential.NFWPotential()
Acos, Asin = potential.scf_compute_coeffs_spherical(nfw.dens,10)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFAxiDensity1Potential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_axi(axi_density1,10,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFAxiDensity2Potential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_axi(axi_density2,10,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFDensityPotential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs(scf_density,10,10,phi_order=30)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
# Test interpSphericalPotential
class mockInterpSphericalPotential(potential.interpSphericalPotential):
def __init__(self):
hp= potential.HomogeneousSpherePotential(normalize=1.,R=1.1)
potential.interpSphericalPotential.__init__(self,rforce=hp,
rgrid=numpy.linspace(0.,1.1,201))
class mockInterpSphericalPotentialwForce(potential.interpSphericalPotential):
def __init__(self):
hp= potential.HomogeneousSpherePotential(normalize=1.,R=1.1)
potential.interpSphericalPotential.__init__(self,
rforce=lambda r: hp.Rforce(r,0.),
Phi0=hp(0.,0.),
rgrid=numpy.linspace(0.,1.1,201))
#Class to test potentials given as lists, st we can use their methods as class.
from galpy.potential import Potential, \
evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, \
evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, \
evaluateDensities, _isNonAxi, evaluateSurfaceDensities
from galpy.potential import planarPotential, \
evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, \
evaluateplanarR2derivs
class testMWPotential(Potential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= potlist
Potential.__init__(self,amp=1.)
self.isNonAxi= _isNonAxi(self._potlist)
return None
def _evaluate(self,R,z,phi=0,t=0,dR=0,dphi=0):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,
dR=dR,dphi=dphi)
def _Rforce(self,R,z,phi=0.,t=0.):
return evaluateRforces(self._potlist,R,z,phi=phi,t=t)
def _phiforce(self,R,z,phi=0.,t=0.):
return evaluatephiforces(self._potlist,R,z,phi=phi,t=t)
def _zforce(self,R,z,phi=0.,t=0.):
return evaluatezforces(self._potlist,R,z,phi=phi,t=t)
def _R2deriv(self,R,z,phi=0.,t=0.):
return evaluateR2derivs(self._potlist,R,z,phi=phi,t=t)
def _z2deriv(self,R,z,phi=0.,t=0.):
return evaluatez2derivs(self._potlist,R,z,phi=phi,t=t)
def _Rzderiv(self,R,z,phi=0.,t=0.):
return evaluateRzderivs(self._potlist,R,z,phi=phi,t=t)
def _phi2deriv(self,R,z,phi=0.,t=0.):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,dphi=2)
def _Rphideriv(self,R,z,phi=0.,t=0.):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,dR=1,
dphi=1)
def _dens(self,R,z,phi=0.,t=0.,forcepoisson=False):
return evaluateDensities(self._potlist,R,z,phi=phi,t=t,
forcepoisson=forcepoisson)
def _surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False):
return evaluateSurfaceDensities(self._potlist,R,z,phi=phi,t=t,
forcepoisson=forcepoisson)
def vcirc(self,R):
return potential.vcirc(self._potlist,R)
def normalize(self,norm,t=0.):
self._amp= norm
def OmegaP(self):
return 1.
#Class to test lists of planarPotentials
class testplanarMWPotential(planarPotential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= [p.toPlanar() for p in potlist if isinstance(p,Potential)]
self._potlist.extend([p for p in potlist if isinstance(p,planarPotential)])
planarPotential.__init__(self,amp=1.)
self.isNonAxi= _isNonAxi(self._potlist)
return None
def _evaluate(self,R,phi=0,t=0,dR=0,dphi=0):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t)
def _Rforce(self,R,phi=0.,t=0.):
return evaluateplanarRforces(self._potlist,R,phi=phi,t=t)
def _phiforce(self,R,phi=0.,t=0.):
return evaluateplanarphiforces(self._potlist,R,phi=phi,t=t)
def _R2deriv(self,R,phi=0.,t=0.):
return evaluateplanarR2derivs(self._potlist,R,phi=phi,t=t)
def _phi2deriv(self,R,phi=0.,t=0.):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t,dphi=2)
def _Rphideriv(self,R,phi=0.,t=0.):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t,dR=1,
dphi=1)
def vcirc(self,R):
return potential.vcirc(self._potlist,R)
def normalize(self,norm,t=0.):
self._amp= norm
def OmegaP(self):
return 1.
class mockFlatEllipticalDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.EllipticalDiskPotential(phib=numpy.pi/2.,p=0.,tform=None,tsteady=None,twophio=14./220.)])
def OmegaP(self):
return 0.
class mockSlowFlatEllipticalDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.EllipticalDiskPotential(phib=numpy.pi/2.,p=0.,twophio=14./220.,tform=1.,tsteady=250.)])
def OmegaP(self):
return 0.
class mockFlatLopsidedDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.LopsidedDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.)])
def OmegaP(self):
return 0.
class mockFlatCosmphiDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.CosmphiDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.)])
def OmegaP(self):
return 0.
class mockFlatCosmphiDiskwBreakPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.CosmphiDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.,rb=0.99,m=6)])
def OmegaP(self):
return 0.
class mockFlatDehnenBarPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.DehnenBarPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSlowFlatDehnenBarPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.DehnenBarPotential(tform=1.,tsteady=250.,rolr=2.5)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatSteadyLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SteadyLogSpiralPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSlowFlatSteadyLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SteadyLogSpiralPotential(tform=.1,tsteady=25.)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatTransientLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.TransientLogSpiralPotential(to=-10.)]) #this way, it's basically a steady spiral
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockRotatingFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential(omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSpecialRotatingFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential(omega=1.3, N=4, Cs=[8/3/numpy.pi, 1/2, 8/15/numpy.pi])])
def OmegaP(self):
return self._potlist[1].OmegaP()
#Class to test lists of linearPotentials
from galpy.potential import linearPotential, \
evaluatelinearPotentials, evaluatelinearForces, \
RZToverticalPotential
class testlinearMWPotential(linearPotential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= RZToverticalPotential(potlist,1.)
linearPotential.__init__(self,amp=1.)
return None
def _evaluate(self,R,phi=0,t=0,dR=0,dphi=0):
return evaluatelinearPotentials(self._potlist,R,t=t)
def _force(self,R,t=0.):
return evaluatelinearForces(self._potlist,R,t=t)
def normalize(self,norm,t=0.):
self._amp= norm
class mockCombLinearPotential(testlinearMWPotential):
def __init__(self):
testlinearMWPotential.__init__(self,
potlist=[potential.MWPotential[0],
potential.MWPotential[1].toVertical(1.),
potential.MWPotential[2].toVertical(1.)])
class mockSimpleLinearPotential(testlinearMWPotential):
def __init__(self):
testlinearMWPotential.__init__(self,
potlist=potential.MiyamotoNagaiPotential(normalize=1.).toVertical(1.))
from galpy.potential import PlummerPotential
class mockMovingObjectPotential(testMWPotential):
def __init__(self,rc=0.75,maxt=1.,nt=50):
from galpy.orbit import Orbit
self._rc= rc
o1= Orbit([self._rc,0.,1.,0.,0.,0.])
o2= Orbit([self._rc,0.,1.,0.,0.,numpy.pi])
lp= potential.LogarithmicHaloPotential(normalize=1.)
times= numpy.linspace(0.,maxt,nt)
o1.integrate(times,lp,method='dopr54_c')
o2.integrate(times,lp,method='dopr54_c')
self._o1p= potential.MovingObjectPotential(o1)
self._o2p= potential.MovingObjectPotential(o2)
testMWPotential.__init__(self,[self._o1p,self._o2p])
self.isNonAxi= True
return None
def phi2deriv(self,R,z,phi=0.,t=0.):
raise AttributeError
def OmegaP(self):
return 1./self._rc
class mockMovingObjectPotentialExplPlummer(testMWPotential):
def __init__(self,rc=0.75,maxt=1.,nt=50):
from galpy.orbit import Orbit
self._rc= rc
o1= Orbit([self._rc,0.,1.,0.,0.,0.])
o2= Orbit([self._rc,0.,1.,0.,0.,numpy.pi])
lp= potential.LogarithmicHaloPotential(normalize=1.)
times= numpy.linspace(0.,maxt,nt)
o1.integrate(times,lp,method='dopr54_c')
o2.integrate(times,lp,method='dopr54_c')
oplum = potential.PlummerPotential(amp=0.06, b=0.01)
self._o1p= potential.MovingObjectPotential(o1, pot=oplum)
self._o2p= potential.MovingObjectPotential(o2, pot=oplum)
testMWPotential.__init__(self,[self._o1p,self._o2p])
self.isNonAxi= True
return None
def phi2deriv(self,R,z,phi=0.,t=0.):
raise AttributeError
def OmegaP(self):
return 1./self._rc
class mockMovingObjectLongIntPotential(mockMovingObjectPotential):
def __init__(self,rc=0.75):
mockMovingObjectPotential.__init__(self,rc=rc,maxt=15.,nt=3001)
return None
# Classes to test wrappers
from galpy.potential import DehnenSmoothWrapperPotential, \
SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, \
GaussianAmplitudeWrapperPotential
from galpy.potential.WrapperPotential import parentWrapperPotential
class DehnenSmoothDehnenBarPotential(DehnenSmoothWrapperPotential):
# This wrapped potential should be the same as the default DehnenBar
# for t > -99
#
# Need to use __new__ because new Wrappers are created using __new__
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(tform=-100.,tsteady=1.) #on after t=-99
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-4.*2.*numpy.pi/dpn.OmegaP())
# Additional DehnenSmooth instances to catch all smoothing cases
class mockDehnenSmoothBarPotentialT1(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
# tform=-4.*2.*numpy.pi/dpn.OmegaP())
tform=0.5,tsteady=0.5)
class mockDehnenSmoothBarPotentialTm1(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-1.,tsteady=1.01)
class mockDehnenSmoothBarPotentialTm5(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-5.,tsteady=2.)
class mockDehnenSmoothBarPotentialDecay(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
# tform=-4.*2.*numpy.pi/dpn.OmegaP())
tform=-0.5,tsteady=1.,decay=True)
class mockFlatDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=-4.*2.*numpy.pi/dpn.OmegaP(),
tsteady=2.*2*numpy.pi/dpn.OmegaP())])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
class mockSlowFlatDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=0.1,tsteady=500.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
class mockSlowFlatDecayingDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=-250.,tsteady=500.,decay=True)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
# A DehnenSmoothWrappered version of LogarithmicHaloPotential for simple aAtest
class mockSmoothedLogarithmicHaloPotential(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,
pot=potential.LogarithmicHaloPotential(normalize=1.),
tform=-1.,tsteady=0.5)
#SolidBodyWrapperPotential
class SolidBodyRotationSpiralArmsPotential(SolidBodyRotationWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
spn= potential.SpiralArmsPotential(omega=0.,phi_ref=0.)
return SolidBodyRotationWrapperPotential.__new__(cls,amp=1.,
pot=spn.toPlanar(),
omega=1.1,pa=0.4)
class mockFlatSolidBodyRotationSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
SolidBodyRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
# Special case to test handling of pure planarWrapper, not necessary for new wrappers
class mockFlatSolidBodyRotationPlanarSpiralArmsPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.).toPlanar(),
SolidBodyRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential().toPlanar(),omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class testorbitHenonHeilesPotential(testplanarMWPotential):
# Need this class, bc orbit tests skip potentials that do not have
# .normalize, and HenonHeiles as a non-axi planarPotential instance
# does not
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[HenonHeilesPotential(amp=1.)])
def OmegaP(self):
# Non-axi, so need to set this to zero for Jacobi
return 0.
#CorotatingWrapperPotential
class CorotatingRotationSpiralArmsPotential(CorotatingRotationWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
spn= potential.SpiralArmsPotential(omega=0.,phi_ref=0.)
return CorotatingRotationWrapperPotential.__new__(cls,amp=1.,
pot=spn.toPlanar(),
vpo=1.1,beta=-0.2,
pa=0.4,to=3.)
class mockFlatCorotatingRotationSpiralArmsPotential(testMWPotential):
# With beta=1 this has a fixed pattern speed --> Jacobi conserved
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
CorotatingRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),vpo=1.3,beta=1.,pa=0.3,to=3.)])
def OmegaP(self):
return 1.3
# beta =/= 1 --> Liouville should still hold!
class mockFlatTrulyCorotatingRotationSpiralArmsPotential(testMWPotential):
# With beta=1 this has a fixed pattern speed --> Jacobi conserved
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
CorotatingRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),vpo=1.3,beta=0.1,pa=-0.3,to=-3.)])
def OmegaP(self):
return 1.3
#GaussianAmplitudeWrapperPotential
class GaussianAmplitudeDehnenBarPotential(GaussianAmplitudeWrapperPotential):
# Need to use __new__ because new Wrappers are created using __new__
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(tform=-100.,tsteady=1.) #on after t=-99
return GaussianAmplitudeWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
to=0.,sigma=1.)
# Basically constant
class mockFlatGaussianAmplitudeBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
GaussianAmplitudeWrapperPotential(\
amp=1.,pot=dpn,to=10,sigma=1000000.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
#For Liouville
class mockFlatTrulyGaussianAmplitudeBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
GaussianAmplitudeWrapperPotential(\
amp=1.,pot=dpn,to=10,sigma=1.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
# A GaussianAmplitudeWrappered version of LogarithmicHaloPotential for simple aAtest
class mockGaussianAmplitudeSmoothedLogarithmicHaloPotential(GaussianAmplitudeWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
return GaussianAmplitudeWrapperPotential.__new__(cls,amp=1.,
pot=potential.LogarithmicHaloPotential(normalize=1.),
to=0.,sigma=100000000000000.)
class nestedListPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.MWPotential2014,potential.SpiralArmsPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
| [
"galpy.util.conversion.dens_in_meanmatterdens",
"numpy.ones",
"numpy.argmin",
"galpy.potential.TriaxialGaussianPotential",
"galpy.potential.TransientLogSpiralPotential.__init__",
"galpy.util.conversion.force_in_2piGmsolpc2",
"numpy.exp",
"numpy.diag",
"galpy.potential.epifreq",
"galpy.potential.pl... | [((395, 414), 'os.getenv', 'os.getenv', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (404, 414), False, 'import os\n'), ((3137, 3185), 'galpy.potential.SphericalShellPotential', 'potential.SphericalShellPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (3170, 3185), False, 'from galpy import potential\n'), ((3397, 3435), 'galpy.potential.RingPotential', 'potential.RingPotential', ([], {'normalize': '(0.5)'}), '(normalize=0.5)\n', (3420, 3435), False, 'from galpy import potential\n'), ((8215, 8243), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (8226, 8243), False, 'import numpy\n'), ((8248, 8294), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125, 0.25, -0.25]'], {}), '([0.0, 0.125, -0.125, 0.25, -0.25])\n', (8259, 8294), False, 'import numpy\n'), ((8298, 8384), 'numpy.array', 'numpy.array', (['[0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 + numpy.pi]'], {}), '([0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 +\n numpy.pi])\n', (8309, 8384), False, 'import numpy\n'), ((18065, 18093), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (18076, 18093), False, 'import numpy\n'), ((18098, 18144), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125, 0.25, -0.25]'], {}), '([0.0, 0.125, -0.125, 0.25, -0.25])\n', (18109, 18144), False, 'import numpy\n'), ((18148, 18234), 'numpy.array', 'numpy.array', (['[0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 + numpy.pi]'], {}), '([0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 +\n numpy.pi])\n', (18159, 18234), False, 'import numpy\n'), ((34017, 34045), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (34028, 34045), False, 'import numpy\n'), ((34050, 34096), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125, 0.25, -0.25]'], {}), '([0.0, 0.125, -0.125, 0.25, -0.25])\n', (34061, 34096), False, 'import numpy\n'), ((34100, 34186), 'numpy.array', 'numpy.array', (['[0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 + numpy.pi]'], {}), '([0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 +\n numpy.pi])\n', (34111, 34186), False, 'import numpy\n'), ((39767, 39795), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (39778, 39795), False, 'import numpy\n'), ((39800, 39837), 'numpy.array', 'numpy.array', (['[0.125, 0.25, 1.0, 10.0]'], {}), '([0.125, 0.25, 1.0, 10.0])\n', (39811, 39837), False, 'import numpy\n'), ((39842, 39928), 'numpy.array', 'numpy.array', (['[0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 + numpy.pi]'], {}), '([0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 +\n numpy.pi])\n', (39853, 39928), False, 'import numpy\n'), ((60745, 60773), 'numpy.linspace', 'numpy.linspace', (['(0.1)', '(2.0)', '(11)'], {}), '(0.1, 2.0, 11)\n', (60759, 60773), False, 'import numpy\n'), ((60779, 60808), 'numpy.linspace', 'numpy.linspace', (['(-2.0)', '(2.0)', '(11)'], {}), '(-2.0, 2.0, 11)\n', (60793, 60808), False, 'import numpy\n'), ((60815, 60848), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(11)'], {}), '(0.0, numpy.pi, 11)\n', (60829, 60848), False, 'import numpy\n'), ((60854, 60883), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(10.0)', '(11)'], {}), '(0.0, 10.0, 11)\n', (60868, 60883), False, 'import numpy\n'), ((66174, 66203), 'numpy.linspace', 'numpy.linspace', (['(-2.0)', '(2.0)', '(11)'], {}), '(-2.0, 2.0, 11)\n', (66188, 66203), False, 'import numpy\n'), ((66208, 66237), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(10.0)', '(11)'], {}), '(0.0, 10.0, 11)\n', (66222, 66237), False, 'import numpy\n'), ((67265, 67326), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['potential.MWPotential2014', '(1.0)'], {}), '(potential.MWPotential2014, 1.0)\n', (67294, 67326), False, 'from galpy import potential\n'), ((78879, 78914), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'amp': 'mass'}), '(amp=mass)\n', (78904, 78914), False, 'from galpy import potential\n'), ((79162, 79207), 'galpy.potential.IsochronePotential', 'potential.IsochronePotential', ([], {'amp': 'mass', 'b': '(0.4)'}), '(amp=mass, b=0.4)\n', (79190, 79207), False, 'from galpy import potential\n'), ((79450, 79493), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': 'mass', 'b': '(0.4)'}), '(amp=mass, b=0.4)\n', (79476, 79493), False, 'from galpy import potential\n'), ((79721, 79771), 'galpy.potential.SphericalShellPotential', 'potential.SphericalShellPotential', ([], {'amp': 'mass', 'a': '(0.4)'}), '(amp=mass, a=0.4)\n', (79754, 79771), False, 'from galpy import potential\n'), ((79996, 80036), 'galpy.potential.RingPotential', 'potential.RingPotential', ([], {'amp': 'mass', 'a': '(0.4)'}), '(amp=mass, a=0.4)\n', (80019, 80036), False, 'from galpy import potential\n'), ((80282, 80328), 'galpy.potential.KuzminDiskPotential', 'potential.KuzminDiskPotential', ([], {'amp': 'mass', 'a': '(0.4)'}), '(amp=mass, a=0.4)\n', (80311, 80328), False, 'from galpy import potential\n'), ((80583, 80632), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': 'mass', 'a': '(0.4)'}), '(amp=mass, a=0.4)\n', (80615, 80632), False, 'from galpy import potential\n'), ((80987, 81029), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(2.0)', 'b': '(2.0)'}), '(amp=2.0, b=2.0)\n', (81013, 81029), False, 'from galpy import potential\n'), ((81052, 81077), 'numpy.sqrt', 'numpy.sqrt', (['(R * R + z * z)'], {}), '(R * R + z * z)\n', (81062, 81077), False, 'import numpy\n'), ((81514, 81557), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.12)', 'b': '(2.0)'}), '(amp=1.12, b=2.0)\n', (81540, 81557), False, 'from galpy import potential\n'), ((81754, 81779), 'numpy.sqrt', 'numpy.sqrt', (['(R * R + z * z)'], {}), '(R * R + z * z)\n', (81764, 81779), False, 'import numpy\n'), ((82781, 82823), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(2.0)', 'b': '(2.0)'}), '(amp=2.0, b=2.0)\n', (82807, 82823), False, 'from galpy import potential\n'), ((82846, 82871), 'numpy.sqrt', 'numpy.sqrt', (['(R * R + z * z)'], {}), '(R * R + z * z)\n', (82856, 82871), False, 'import numpy\n'), ((84242, 84297), 'galpy.potential.PowerSphericalPotential', 'potential.PowerSphericalPotential', ([], {'amp': '(2.0)', 'alpha': '(2.999)'}), '(amp=2.0, alpha=2.999)\n', (84275, 84297), False, 'from galpy import potential\n'), ((84304, 84338), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (84329, 84338), False, 'from galpy import potential\n'), ((84548, 84590), 'galpy.potential.PowerSphericalPotential', 'potential.PowerSphericalPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (84581, 84590), False, 'from galpy import potential\n'), ((85218, 85267), 'galpy.potential.PowerSphericalPotentialwCutoff', 'potential.PowerSphericalPotentialwCutoff', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (85258, 85267), False, 'from galpy import potential\n'), ((85995, 86035), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {'amp': '(2.0)', 'a': '(0.1)'}), '(amp=2.0, a=0.1)\n', (86019, 86035), False, 'from galpy import potential\n'), ((86042, 86086), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(2.0)', 'a': '(0.1)'}), '(amp=2.0, a=0.1)\n', (86070, 86086), False, 'from galpy import potential\n'), ((86093, 86131), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'amp': '(2.0)', 'a': '(0.1)'}), '(amp=2.0, a=0.1)\n', (86115, 86131), False, 'from galpy import potential\n'), ((87328, 87370), 'galpy.potential.BurkertPotential', 'potential.BurkertPotential', ([], {'amp': '(2.0)', 'a': '(3.0)'}), '(amp=2.0, a=3.0)\n', (87354, 87370), False, 'from galpy import potential\n'), ((87719, 87766), 'galpy.potential.DehnenCoreSphericalPotential', 'potential.DehnenCoreSphericalPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87757, 87766), False, 'from galpy import potential\n'), ((87774, 87807), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87798, 87807), False, 'from galpy import potential\n'), ((87815, 87852), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87843, 87852), False, 'from galpy import potential\n'), ((87860, 87891), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87882, 87891), False, 'from galpy import potential\n'), ((87899, 87944), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87935, 87944), False, 'from galpy import potential\n'), ((87952, 87995), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (87986, 87995), False, 'from galpy import potential\n'), ((88003, 88045), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(2.0)', 'b': '(1.3)'}), '(amp=2.0, b=1.3)\n', (88029, 88045), False, 'from galpy import potential\n'), ((89415, 89456), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': '(1.0)'}), '(amp=1.0)\n', (89447, 89456), False, 'from galpy import potential\n'), ((89846, 89895), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (89886, 89895), False, 'from galpy import potential\n'), ((90891, 90943), 'galpy.potential.RazorThinExponentialDiskPotential', 'potential.RazorThinExponentialDiskPotential', ([], {'amp': '(2.0)'}), '(amp=2.0)\n', (90934, 90943), False, 'from galpy import potential\n'), ((91939, 91984), 'galpy.potential.KuzminDiskPotential', 'potential.KuzminDiskPotential', ([], {'amp': '(2.0)', 'a': '(3.0)'}), '(amp=2.0, a=3.0)\n', (91968, 91984), False, 'from galpy import potential\n'), ((92801, 92866), 'galpy.potential.PerfectEllipsoidPotential', 'potential.PerfectEllipsoidPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': '(1.3)', 'c': '(1.9)'}), '(amp=2.0, a=3.0, b=1.3, c=1.9)\n', (92836, 92866), False, 'from galpy import potential\n'), ((92977, 93042), 'galpy.potential.PerfectEllipsoidPotential', 'potential.PerfectEllipsoidPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': '(1.0)', 'c': '(1.9)'}), '(amp=2.0, a=3.0, b=1.0, c=1.9)\n', (93012, 93042), False, 'from galpy import potential\n'), ((93152, 93217), 'galpy.potential.PerfectEllipsoidPotential', 'potential.PerfectEllipsoidPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': '(1.0)', 'c': '(1.0)'}), '(amp=2.0, a=3.0, b=1.0, c=1.0)\n', (93187, 93217), False, 'from galpy import potential\n'), ((93326, 93391), 'galpy.potential.PerfectEllipsoidPotential', 'potential.PerfectEllipsoidPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': '(0.7)', 'c': '(0.5)'}), '(amp=2.0, a=3.0, b=0.7, c=0.5)\n', (93361, 93391), False, 'from galpy import potential\n'), ((93612, 93670), 'galpy.potential.TriaxialJaffePotential', 'potential.TriaxialJaffePotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': 'b', 'c': 'c'}), '(amp=2.0, a=3.0, b=b, c=c)\n', (93644, 93670), False, 'from galpy import potential\n'), ((93674, 93714), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {'amp': '(2.0)', 'a': '(3.0)'}), '(amp=2.0, a=3.0)\n', (93698, 93714), False, 'from galpy import potential\n'), ((93826, 93888), 'galpy.potential.TriaxialHernquistPotential', 'potential.TriaxialHernquistPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': 'b', 'c': 'c'}), '(amp=2.0, a=3.0, b=b, c=c)\n', (93862, 93888), False, 'from galpy import potential\n'), ((93892, 93936), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(2.0)', 'a': '(3.0)'}), '(amp=2.0, a=3.0)\n', (93920, 93936), False, 'from galpy import potential\n'), ((94048, 94104), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': 'b', 'c': 'c'}), '(amp=2.0, a=3.0, b=b, c=c)\n', (94078, 94104), False, 'from galpy import potential\n'), ((94108, 94146), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'amp': '(2.0)', 'a': '(3.0)'}), '(amp=2.0, a=3.0)\n', (94130, 94146), False, 'from galpy import potential\n'), ((94258, 94344), 'galpy.potential.TwoPowerTriaxialPotential', 'potential.TwoPowerTriaxialPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'b': 'b', 'c': 'c', 'alpha': '(1.1)', 'beta': '(4.1)'}), '(amp=2.0, a=3.0, b=b, c=c, alpha=1.1,\n beta=4.1)\n', (94293, 94344), False, 'from galpy import potential\n'), ((94342, 94415), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'alpha': '(1.1)', 'beta': '(4.1)'}), '(amp=2.0, a=3.0, alpha=1.1, beta=4.1)\n', (94378, 94415), False, 'from galpy import potential\n'), ((94595, 94664), 'galpy.potential.TriaxialGaussianPotential', 'potential.TriaxialGaussianPotential', ([], {'amp': '(2.0)', 'sigma': '(3.0)', 'b': '(1.3)', 'c': '(1.9)'}), '(amp=2.0, sigma=3.0, b=1.3, c=1.9)\n', (94630, 94664), False, 'from galpy import potential\n'), ((94775, 94844), 'galpy.potential.TriaxialGaussianPotential', 'potential.TriaxialGaussianPotential', ([], {'amp': '(2.0)', 'sigma': '(3.0)', 'b': '(1.0)', 'c': '(1.9)'}), '(amp=2.0, sigma=3.0, b=1.0, c=1.9)\n', (94810, 94844), False, 'from galpy import potential\n'), ((94954, 95023), 'galpy.potential.TriaxialGaussianPotential', 'potential.TriaxialGaussianPotential', ([], {'amp': '(2.0)', 'sigma': '(3.0)', 'b': '(1.0)', 'c': '(1.0)'}), '(amp=2.0, sigma=3.0, b=1.0, c=1.0)\n', (94989, 95023), False, 'from galpy import potential\n'), ((95132, 95201), 'galpy.potential.TriaxialGaussianPotential', 'potential.TriaxialGaussianPotential', ([], {'amp': '(2.0)', 'sigma': '(3.0)', 'b': '(0.7)', 'c': '(0.5)'}), '(amp=2.0, sigma=3.0, b=0.7, c=0.5)\n', (95167, 95201), False, 'from galpy import potential\n'), ((98207, 98256), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (98241, 98256), False, 'from galpy import potential\n'), ((98265, 98298), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['lp'], {}), '(lp)\n', (98294, 98298), False, 'from galpy import potential\n'), ((98529, 98563), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['plp'], {}), '(plp)\n', (98558, 98563), False, 'from galpy import potential\n'), ((98807, 98847), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['[lp, plp]'], {}), '([lp, plp])\n', (98836, 98847), False, 'from galpy import potential\n'), ((99666, 99729), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'b': '(0.8)'}), '(normalize=1.0, q=0.9, b=0.8)\n', (99700, 99729), False, 'from galpy import potential\n'), ((100048, 100091), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.12)', 'b': '(2.0)'}), '(amp=1.12, b=2.0)\n', (100074, 100091), False, 'from galpy import potential\n'), ((100507, 100559), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'b': '(0.5)'}), '(normalize=1.0, b=0.5)\n', (100537, 100559), False, 'from galpy import potential\n'), ((100568, 100600), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['tnp'], {}), '(tnp)\n', (100595, 100600), False, 'from galpy import potential\n'), ((100786, 100820), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['[tnp]'], {}), '([tnp])\n', (100813, 100820), False, 'from galpy import potential\n'), ((101066, 101098), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['tnp'], {}), '(tnp)\n', (101093, 101098), False, 'from galpy import potential\n'), ((101798, 101841), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.12)', 'b': '(2.0)'}), '(amp=1.12, b=2.0)\n', (101824, 101841), False, 'from galpy import potential\n'), ((102153, 102202), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (102187, 102202), False, 'from galpy import potential\n'), ((102211, 102251), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['lp', '(1.2)'], {}), '(lp, 1.2)\n', (102242, 102251), False, 'from galpy import potential\n'), ((102489, 102530), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['plp', '(1.2)'], {}), '(plp, 1.2)\n', (102520, 102530), False, 'from galpy import potential\n'), ((102708, 102751), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['[plp]', '(1.2)'], {}), '([plp], 1.2)\n', (102739, 102751), False, 'from galpy import potential\n'), ((103922, 103985), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'b': '(0.8)'}), '(normalize=1.0, q=0.9, b=0.8)\n', (103956, 103985), False, 'from galpy import potential\n'), ((104316, 104359), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.12)', 'b': '(2.0)'}), '(amp=1.12, b=2.0)\n', (104342, 104359), False, 'from galpy import potential\n'), ((104789, 104841), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'b': '(0.5)'}), '(normalize=1.0, b=0.5)\n', (104819, 104841), False, 'from galpy import potential\n'), ((104850, 104898), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['tnp', '(1.2)'], {'phi': '(0.8)'}), '(tnp, 1.2, phi=0.8)\n', (104879, 104898), False, 'from galpy import potential\n'), ((105084, 105134), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['[tnp]', '(1.2)'], {'phi': '(0.8)'}), '([tnp], 1.2, phi=0.8)\n', (105113, 105134), False, 'from galpy import potential\n'), ((105383, 105431), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['tnp', '(1.2)'], {'phi': '(0.8)'}), '(tnp, 1.2, phi=0.8)\n', (105412, 105431), False, 'from galpy import potential\n'), ((105441, 105490), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['ptnp', '(1.2)'], {'phi': '(0.8)'}), '(ptnp, 1.2, phi=0.8)\n', (105470, 105490), False, 'from galpy import potential\n'), ((105667, 105718), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['[ptnp]', '(1.2)'], {'phi': '(0.8)'}), '([ptnp], 1.2, phi=0.8)\n', (105696, 105718), False, 'from galpy import potential\n'), ((106837, 106880), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.12)', 'b': '(2.0)'}), '(amp=1.12, b=2.0)\n', (106863, 106880), False, 'from galpy import potential\n'), ((107724, 107773), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (107758, 107773), False, 'from galpy import potential\n'), ((108212, 108252), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (108237, 108252), False, 'from galpy import potential\n'), ((111037, 111096), 'galpy.potential.PowerSphericalPotential', 'potential.PowerSphericalPotential', ([], {'alpha': '(1.8)', 'normalize': '(1.0)'}), '(alpha=1.8, normalize=1.0)\n', (111070, 111096), False, 'from galpy import potential\n'), ((111595, 111654), 'galpy.potential.PowerSphericalPotential', 'potential.PowerSphericalPotential', ([], {'alpha': '(0.5)', 'normalize': '(1.0)'}), '(alpha=0.5, normalize=1.0)\n', (111628, 111654), False, 'from galpy import potential\n'), ((113823, 113863), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (113848, 113863), False, 'from galpy import potential\n'), ((113873, 113907), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (113887, 113907), False, 'import numpy\n'), ((114515, 114567), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'b': '(0.4)', 'normalize': '(1.0)'}), '(b=0.4, normalize=1.0)\n', (114545, 114567), False, 'from galpy import potential\n'), ((114600, 114649), 'numpy.linspace', 'numpy.linspace', (['(numpy.pi / 5.0)', '(numpy.pi / 2.0)', '(5)'], {}), '(numpy.pi / 5.0, numpy.pi / 2.0, 5)\n', (114614, 114649), False, 'import numpy\n'), ((115220, 115255), 'galpy.potential.EllipticalDiskPotential', 'potential.EllipticalDiskPotential', ([], {}), '()\n', (115253, 115255), False, 'from galpy import potential\n'), ((115769, 115818), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (115803, 115818), False, 'from galpy import potential\n'), ((116222, 116271), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (116256, 116271), False, 'from galpy import potential\n'), ((117178, 117232), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)', 'a': '(0.3)'}), '(normalize=1.0, a=0.3)\n', (117210, 117232), False, 'from galpy import potential\n'), ((117705, 117754), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (117739, 117754), False, 'from galpy import potential\n'), ((119609, 119649), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (119634, 119649), False, 'from galpy import potential\n'), ((119794, 119838), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'normalize': '(1.0)', 'a': '(5.0)'}), '(normalize=1.0, a=5.0)\n', (119816, 119838), False, 'from galpy import potential\n'), ((119978, 120028), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(1.0)', 'a': '(5.0)'}), '(normalize=1.0, a=5.0)\n', (120006, 120028), False, 'from galpy import potential\n'), ((120223, 120285), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)', 'a': '(0.5)', 'b': '(0.05)'}), '(normalize=1.0, a=0.5, b=0.05)\n', (120255, 120285), False, 'from galpy import potential\n'), ((120808, 120864), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(1.0)'}), '(normalize=1.0, q=1.0)\n', (120842, 120864), False, 'from galpy import potential\n'), ((120870, 120910), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (120895, 120910), False, 'from galpy import potential\n'), ((120918, 120955), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (120940, 120955), False, 'from galpy import potential\n'), ((120963, 121004), 'galpy.potential.BurkertPotential', 'potential.BurkertPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (120989, 121004), False, 'from galpy import potential\n'), ((121012, 121040), 'numpy.linspace', 'numpy.linspace', (['(0.2)', '(2.0)', '(21)'], {}), '(0.2, 2.0, 21)\n', (121026, 121040), False, 'import numpy\n'), ((122523, 122558), 'galpy.potential.EllipticalDiskPotential', 'potential.EllipticalDiskPotential', ([], {}), '()\n', (122556, 122558), False, 'from galpy import potential\n'), ((123746, 123801), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (123786, 123801), False, 'from galpy import potential\n'), ((123809, 123834), 'numpy.linspace', 'numpy.linspace', (['(0.1)', '(2.11)'], {}), '(0.1, 2.11)\n', (123823, 123834), False, 'import numpy\n'), ((125508, 125566), 'galpy.potential.RazorThinExponentialDiskPotential', 'potential.RazorThinExponentialDiskPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (125551, 125566), False, 'from galpy import potential\n'), ((125935, 125965), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (125963, 125965), False, 'from galpy import potential\n'), ((126016, 126059), 'numpy.linspace', 'numpy.linspace', (['(0.1 * dp._rb)', '(2.11 * dp._rb)'], {}), '(0.1 * dp._rb, 2.11 * dp._rb)\n', (126030, 126059), False, 'import numpy\n'), ((133577, 133608), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (133606, 133608), False, 'from galpy import potential\n'), ((133617, 133645), 'numpy.linspace', 'numpy.linspace', (['(0.1)', '(2.0)', '(11)'], {}), '(0.1, 2.0, 11)\n', (133631, 133645), False, 'import numpy\n'), ((136926, 136954), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (136937, 136954), False, 'import numpy\n'), ((136959, 136992), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125]'], {}), '([0.0, 0.125, -0.125])\n', (136970, 136992), False, 'import numpy\n'), ((136999, 137058), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'alpha': '(0)', 'beta': '(4)'}), '(alpha=0, beta=4, **kw)\n', (137035, 137058), False, 'from galpy import potential\n'), ((137069, 137113), 'galpy.potential.DehnenCoreSphericalPotential', 'potential.DehnenCoreSphericalPotential', ([], {}), '(**kw)\n', (137107, 137113), False, 'from galpy import potential\n'), ((137309, 137368), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'alpha': '(1)', 'beta': '(4)'}), '(alpha=1, beta=4, **kw)\n', (137345, 137368), False, 'from galpy import potential\n'), ((137379, 137413), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {}), '(**kw)\n', (137407, 137413), False, 'from galpy import potential\n'), ((137609, 137668), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'alpha': '(2)', 'beta': '(4)'}), '(alpha=2, beta=4, **kw)\n', (137645, 137668), False, 'from galpy import potential\n'), ((137679, 137709), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {}), '(**kw)\n', (137703, 137709), False, 'from galpy import potential\n'), ((137905, 137964), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'alpha': '(1)', 'beta': '(3)'}), '(alpha=1, beta=3, **kw)\n', (137941, 137964), False, 'from galpy import potential\n'), ((137975, 138003), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {}), '(**kw)\n', (137997, 138003), False, 'from galpy import potential\n'), ((138445, 138473), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (138456, 138473), False, 'import numpy\n'), ((138478, 138511), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125]'], {}), '([0.0, 0.125, -0.125])\n', (138489, 138511), False, 'import numpy\n'), ((138518, 138567), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'alpha': '(0)'}), '(alpha=0, **kw)\n', (138552, 138567), False, 'from galpy import potential\n'), ((138578, 138622), 'galpy.potential.DehnenCoreSphericalPotential', 'potential.DehnenCoreSphericalPotential', ([], {}), '(**kw)\n', (138616, 138622), False, 'from galpy import potential\n'), ((138942, 138991), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'alpha': '(1)'}), '(alpha=1, **kw)\n', (138976, 138991), False, 'from galpy import potential\n'), ((139002, 139036), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {}), '(**kw)\n', (139030, 139036), False, 'from galpy import potential\n'), ((139232, 139281), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'alpha': '(2)'}), '(alpha=2, **kw)\n', (139266, 139281), False, 'from galpy import potential\n'), ((139292, 139322), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {}), '(**kw)\n', (139316, 139322), False, 'from galpy import potential\n'), ((148498, 148526), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['pot'], {}), '(pot)\n', (148521, 148526), False, 'from galpy.util import conversion\n'), ((149871, 149981), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(conc=conc, mvir=mvir, vo=vo, ro=ro, H=H, Om=Om,\n overdens=overdens, wrtcrit=wrtcrit)\n', (149893, 149981), False, 'from galpy import potential\n'), ((150575, 150685), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(conc=conc, mvir=mvir, vo=vo, ro=ro, H=H, Om=Om,\n overdens=overdens, wrtcrit=wrtcrit)\n', (150597, 150685), False, 'from galpy import potential\n'), ((151294, 151404), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(conc=conc, mvir=mvir, vo=vo, ro=ro, H=H, Om=Om,\n overdens=overdens, wrtcrit=wrtcrit)\n', (151316, 151404), False, 'from galpy import potential\n'), ((151467, 151599), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'b': '(0.3)', 'c': '(0.7)', 'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(b=0.3, c=0.7, conc=conc, mvir=mvir, vo=vo,\n ro=ro, H=H, Om=Om, overdens=overdens, wrtcrit=wrtcrit)\n', (151497, 151599), False, 'from galpy import potential\n'), ((152127, 152237), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(conc=conc, mvir=mvir, vo=vo, ro=ro, H=H, Om=Om,\n overdens=overdens, wrtcrit=wrtcrit)\n', (152149, 152237), False, 'from galpy import potential\n'), ((152300, 152432), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'b': '(0.3)', 'c': '(0.7)', 'conc': 'conc', 'mvir': 'mvir', 'vo': 'vo', 'ro': 'ro', 'H': 'H', 'Om': 'Om', 'overdens': 'overdens', 'wrtcrit': 'wrtcrit'}), '(b=0.3, c=0.7, conc=conc, mvir=mvir, vo=vo,\n ro=ro, H=H, Om=Om, overdens=overdens, wrtcrit=wrtcrit)\n', (152330, 152432), False, 'from galpy import potential\n'), ((152947, 152991), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'rmax': 'rmax', 'vmax': 'vmax'}), '(rmax=rmax, vmax=vmax)\n', (152969, 152991), False, 'from galpy import potential\n'), ((153274, 153322), 'galpy.potential.PowerSphericalPotential', 'potential.PowerSphericalPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (153307, 153322), False, 'from galpy import potential\n'), ((153621, 153668), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (153653, 153668), False, 'from galpy import potential\n'), ((155834, 155878), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'rmax': 'rmax', 'vmax': 'vmax'}), '(rmax=rmax, vmax=vmax)\n', (155856, 155878), False, 'from galpy import potential\n'), ((156682, 156721), 'galpy.potential.LogarithmicHaloPotential', 'LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (156706, 156721), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((157574, 157633), 'galpy.potential.SteadyLogSpiralPotential', 'SteadyLogSpiralPotential', ([], {'m': '(2.0)', 'omegas': 'OmegaP', 'alpha': '(k * R)'}), '(m=2.0, omegas=OmegaP, alpha=k * R)\n', (157598, 157633), False, 'from galpy.potential import SteadyLogSpiralPotential\n'), ((158227, 158276), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (158261, 158276), False, 'from galpy import potential\n'), ((158382, 158419), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (158404, 158419), False, 'from galpy import potential\n'), ((158515, 158562), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (158547, 158562), False, 'from galpy import potential\n'), ((158690, 158745), 'galpy.potential.PowerSphericalPotentialwCutoff', 'potential.PowerSphericalPotentialwCutoff', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (158730, 158745), False, 'from galpy import potential\n'), ((158894, 158946), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (158931, 158946), False, 'from galpy import potential\n'), ((159097, 159138), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (159123, 159138), False, 'from galpy import potential\n'), ((159246, 159289), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (159274, 159289), False, 'from galpy import potential\n'), ((159525, 159566), 'galpy.potential.BurkertPotential', 'potential.BurkertPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (159551, 159566), False, 'from galpy import potential\n'), ((160212, 160267), 'galpy.potential.PowerSphericalPotentialwCutoff', 'potential.PowerSphericalPotentialwCutoff', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (160252, 160267), False, 'from galpy import potential\n'), ((160442, 160503), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'amp': '(2.0)', 'core': '(3.0)', 'q': '(27.0)'}), '(amp=2.0, core=3.0, q=27.0)\n', (160476, 160503), False, 'from galpy import potential\n'), ((161166, 161221), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': '(3.0)', 'a': '(2.0)', 'b': '(5.0)'}), '(amp=3.0, a=2.0, b=5.0)\n', (161198, 161221), False, 'from galpy import potential\n'), ((161722, 161790), 'galpy.potential.PowerSphericalPotentialwCutoff', 'potential.PowerSphericalPotentialwCutoff', ([], {'amp': '(3.0)', 'alpha': '(4.0)', 'rc': '(5.0)'}), '(amp=3.0, alpha=4.0, rc=5.0)\n', (161762, 161790), False, 'from galpy import potential\n'), ((162360, 162418), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'amp': '(1.0 / 0.2162165954)', 'a': '(1.0 / 16)'}), '(amp=1.0 / 0.2162165954, a=1.0 / 16)\n', (162382, 162418), False, 'from galpy import potential\n'), ((162792, 162860), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'normalize': '(1.0)', 'hr': '(2.0)', 'hz': '(0.5)'}), '(normalize=1.0, hr=2.0, hz=0.5)\n', (162829, 162860), False, 'from galpy import potential\n'), ((164123, 164165), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(3.0)', 'b': '(5.0)'}), '(amp=3.0, b=5.0)\n', (164149, 164165), False, 'from galpy import potential\n'), ((164539, 164589), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(2.0)', 'a': '(1.0 / 4.0)'}), '(amp=2.0, a=1.0 / 4.0)\n', (164567, 164589), False, 'from galpy import potential\n'), ((165152, 165207), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': '(3.0)', 'a': '(2.0)', 'b': '(5.0)'}), '(amp=3.0, a=2.0, b=5.0)\n', (165184, 165207), False, 'from galpy import potential\n'), ((165211, 165279), 'galpy.potential.PowerSphericalPotentialwCutoff', 'potential.PowerSphericalPotentialwCutoff', ([], {'amp': '(3.0)', 'alpha': '(4.0)', 'rc': '(5.0)'}), '(amp=3.0, alpha=4.0, rc=5.0)\n', (165251, 165279), False, 'from galpy import potential\n'), ((169207, 169281), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(1.0)', 'sech': '(False)'}), '(amp=1.0, hr=1.0, hz=1.0, sech=False)\n', (169244, 169281), False, 'from galpy import potential\n'), ((169383, 169457), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(2.0)', 'hz': '(1.0)', 'sech': '(False)'}), '(amp=1.0, hr=2.0, hz=1.0, sech=False)\n', (169420, 169457), False, 'from galpy import potential\n'), ((169569, 169642), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(2.0)', 'sech': '(True)'}), '(amp=1.0, hr=1.0, hz=2.0, sech=True)\n', (169606, 169642), False, 'from galpy import potential\n'), ((169737, 169810), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(2.0)', 'hz': '(2.0)', 'sech': '(True)'}), '(amp=1.0, hr=2.0, hz=2.0, sech=True)\n', (169774, 169810), False, 'from galpy import potential\n'), ((170058, 170134), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)', 'sech': '(False)'}), '(amp=1.0, hr=1.0, hz=0.001, sech=False)\n', (170095, 170134), False, 'from galpy import potential\n'), ((170138, 170205), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)'}), '(amp=1.0, hr=1.0, hz=0.001)\n', (170178, 170205), False, 'from galpy import potential\n'), ((170414, 170489), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)', 'sech': '(False)'}), '(amp=1.0, hr=1.0, hz=0.62, sech=False)\n', (170451, 170489), False, 'from galpy import potential\n'), ((170493, 170559), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (170533, 170559), False, 'from galpy import potential\n'), ((170774, 170848), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(0.5)', 'hr': '(1.0)', 'hz': '(1.24)', 'sech': '(True)'}), '(amp=0.5, hr=1.0, hz=1.24, sech=True)\n', (170811, 170848), False, 'from galpy import potential\n'), ((170852, 170918), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (170892, 170918), False, 'from galpy import potential\n'), ((171138, 171214), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)', 'sech': '(False)'}), '(amp=1.0, hr=1.0, hz=0.001, sech=False)\n', (171175, 171214), False, 'from galpy import potential\n'), ((171218, 171285), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)'}), '(amp=1.0, hr=1.0, hz=0.001)\n', (171258, 171285), False, 'from galpy import potential\n'), ((171495, 171570), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)', 'sech': '(False)'}), '(amp=1.0, hr=1.0, hz=0.62, sech=False)\n', (171532, 171570), False, 'from galpy import potential\n'), ((171574, 171640), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (171614, 171640), False, 'from galpy import potential\n'), ((171857, 171931), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(0.5)', 'hr': '(1.0)', 'hz': '(1.24)', 'sech': '(True)'}), '(amp=0.5, hr=1.0, hz=1.24, sech=True)\n', (171894, 171931), False, 'from galpy import potential\n'), ((171936, 172002), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (171976, 172002), False, 'from galpy import potential\n'), ((172253, 172347), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)', 'sech': '(False)', 'posdens': '(True)'}), '(amp=1.0, hr=1.0, hz=0.001, sech=False,\n posdens=True)\n', (172290, 172347), False, 'from galpy import potential\n'), ((172393, 172460), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)'}), '(amp=1.0, hr=1.0, hz=0.001)\n', (172433, 172460), False, 'from galpy import potential\n'), ((172669, 172762), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)', 'sech': '(False)', 'posdens': '(True)'}), '(amp=1.0, hr=1.0, hz=0.62, sech=False,\n posdens=True)\n', (172706, 172762), False, 'from galpy import potential\n'), ((172808, 172874), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (172848, 172874), False, 'from galpy import potential\n'), ((173095, 173189), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)', 'sech': '(False)', 'posdens': '(True)'}), '(amp=1.0, hr=1.0, hz=0.001, sech=False,\n posdens=True)\n', (173132, 173189), False, 'from galpy import potential\n'), ((173235, 173302), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.001)'}), '(amp=1.0, hr=1.0, hz=0.001)\n', (173275, 173302), False, 'from galpy import potential\n'), ((173661, 173754), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)', 'sech': '(False)', 'posdens': '(True)'}), '(amp=1.0, hr=1.0, hz=0.62, sech=False,\n posdens=True)\n', (173698, 173754), False, 'from galpy import potential\n'), ((173800, 173866), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(1.0)', 'hr': '(1.0)', 'hz': '(0.62)'}), '(amp=1.0, hr=1.0, hz=0.62)\n', (173840, 173866), False, 'from galpy import potential\n'), ((174436, 174469), 'numpy.linspace', 'numpy.linspace', (['(0.001)', '(25.0)', '(1001)'], {}), '(0.001, 25.0, 1001)\n', (174450, 174469), False, 'import numpy\n'), ((174476, 174572), 'galpy.potential.TwoPowerTriaxialPotential', 'potential.TwoPowerTriaxialPotential', ([], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.0)', 'a': '(1.5)', 'alpha': '(1.5)', 'beta': '(3.5)'}), '(normalize=1.0, b=1.0, c=1.0, a=1.5,\n alpha=1.5, beta=3.5)\n', (174511, 174572), False, 'from galpy import potential\n'), ((174615, 174694), 'galpy.potential.TwoPowerSphericalPotential', 'potential.TwoPowerSphericalPotential', ([], {'normalize': '(1.0)', 'a': '(1.5)', 'alpha': '(1.5)', 'beta': '(3.5)'}), '(normalize=1.0, a=1.5, alpha=1.5, beta=3.5)\n', (174651, 174694), False, 'from galpy import potential\n'), ((175060, 175132), 'galpy.potential.TriaxialHernquistPotential', 'potential.TriaxialHernquistPotential', ([], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, b=1.0, c=1.0, a=1.5)\n', (175096, 175132), False, 'from galpy import potential\n'), ((175135, 175185), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, a=1.5)\n', (175163, 175185), False, 'from galpy import potential\n'), ((175420, 175486), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, b=1.0, c=1.0, a=1.5)\n', (175450, 175486), False, 'from galpy import potential\n'), ((175489, 175533), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'normalize': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, a=1.5)\n', (175511, 175533), False, 'from galpy import potential\n'), ((175758, 175826), 'galpy.potential.TriaxialJaffePotential', 'potential.TriaxialJaffePotential', ([], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, b=1.0, c=1.0, a=1.5)\n', (175790, 175826), False, 'from galpy import potential\n'), ((175829, 175875), 'galpy.potential.JaffePotential', 'potential.JaffePotential', ([], {'normalize': '(1.0)', 'a': '(1.5)'}), '(normalize=1.0, a=1.5)\n', (175853, 175875), False, 'from galpy import potential\n'), ((177721, 177812), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'a': '(1.5)', 'b': '(0.5)', 'pa': '(30.0 / 180.0 * numpy.pi)'}), '(normalize=1.0, a=1.5, b=0.5, pa=30.0 / 180.0 *\n numpy.pi)\n', (177751, 177812), False, 'from galpy import potential\n'), ((177934, 177969), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(1001)'], {}), '(0.0, numpy.pi, 1001)\n', (177948, 177969), False, 'import numpy\n'), ((178038, 178055), 'numpy.argmin', 'numpy.argmin', (['pot'], {}), '(pot)\n', (178050, 178055), False, 'import numpy\n'), ((178353, 178371), 'numpy.argmax', 'numpy.argmax', (['dens'], {}), '(dens)\n', (178365, 178371), False, 'import numpy\n'), ((178592, 178685), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'a': '(1.5)', 'b': '(0.5)', 'pa': '(-60.0 / 180.0 * numpy.pi)'}), '(normalize=1.0, a=1.5, b=0.5, pa=-60.0 / \n 180.0 * numpy.pi)\n', (178622, 178685), False, 'from galpy import potential\n'), ((178806, 178841), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(1001)'], {}), '(0.0, numpy.pi, 1001)\n', (178820, 178841), False, 'import numpy\n'), ((178910, 178927), 'numpy.argmin', 'numpy.argmin', (['pot'], {}), '(pot)\n', (178922, 178927), False, 'import numpy\n'), ((179226, 179244), 'numpy.argmax', 'numpy.argmax', (['dens'], {}), '(dens)\n', (179238, 179244), False, 'import numpy\n'), ((179868, 179903), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(1001)'], {}), '(0.0, numpy.pi, 1001)\n', (179882, 179903), False, 'import numpy\n'), ((179909, 179931), 'numpy.zeros_like', 'numpy.zeros_like', (['phis'], {}), '(phis)\n', (179925, 179931), False, 'import numpy\n'), ((180002, 180032), 'galpy.util.coords.rect_to_cyl', 'coords.rect_to_cyl', (['xs', 'ys', 'zs'], {}), '(xs, ys, zs)\n', (180020, 180032), False, 'from galpy.util import coords\n'), ((180115, 180132), 'numpy.argmin', 'numpy.argmin', (['pot'], {}), '(pot)\n', (180127, 180132), False, 'import numpy\n'), ((180437, 180455), 'numpy.argmax', 'numpy.argmax', (['dens'], {}), '(dens)\n', (180449, 180455), False, 'import numpy\n'), ((180938, 180973), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(1001)'], {}), '(0.0, numpy.pi, 1001)\n', (180952, 180973), False, 'import numpy\n'), ((181006, 181028), 'numpy.zeros_like', 'numpy.zeros_like', (['phis'], {}), '(phis)\n', (181022, 181028), False, 'import numpy\n'), ((181072, 181102), 'galpy.util.coords.rect_to_cyl', 'coords.rect_to_cyl', (['xs', 'ys', 'zs'], {}), '(xs, ys, zs)\n', (181090, 181102), False, 'from galpy.util import coords\n'), ((181185, 181202), 'numpy.argmin', 'numpy.argmin', (['pot'], {}), '(pot)\n', (181197, 181202), False, 'import numpy\n'), ((181508, 181526), 'numpy.argmax', 'numpy.argmax', (['dens'], {}), '(dens)\n', (181520, 181526), False, 'import numpy\n'), ((181867, 181920), 'galpy.potential.TriaxialNFWPotential', 'potential.TriaxialNFWPotential', ([], {'amp': '(1.0)', 'b': '(0.7)', 'c': '(0.9)'}), '(amp=1.0, b=0.7, c=0.9)\n', (181897, 181920), False, 'from galpy import potential\n'), ((183544, 183629), 'galpy.potential.SoftenedNeedleBarPotential', 'potential.SoftenedNeedleBarPotential', ([], {'normalize': '(1.0)', 'a': '(1.0)', 'c': '(0.1)', 'b': '(0.0)', 'pa': '(0.0)'}), '(normalize=1.0, a=1.0, c=0.1, b=0.0, pa=0.0\n )\n', (183580, 183629), False, 'from galpy import potential\n'), ((184252, 184337), 'galpy.potential.SoftenedNeedleBarPotential', 'potential.SoftenedNeedleBarPotential', ([], {'normalize': '(1.0)', 'a': '(1.0)', 'c': '(0.1)', 'b': '(0.3)', 'pa': '(0.0)'}), '(normalize=1.0, a=1.0, c=0.1, b=0.3, pa=0.0\n )\n', (184288, 184337), False, 'from galpy import potential\n'), ((184971, 185241), 'galpy.potential.DiskSCFPotential', 'potential.DiskSCFPotential', ([], {'dens': '(lambda R, z: 1.0)', 'Sigma': "[{'type': 'exp', 'h': 1.0 / 3.0, 'amp': 1.0}, {'type': 'expwhole', 'h': 1.0 /\n 3.0, 'amp': 1.0, 'Rhole': 0.5}]", 'hz': "[{'type': 'exp', 'h': 1.0 / 27.0}, {'type': 'sech2', 'h': 1.0 / 27.0}]", 'a': '(1.0)', 'N': '(2)', 'L': '(2)'}), "(dens=lambda R, z: 1.0, Sigma=[{'type': 'exp',\n 'h': 1.0 / 3.0, 'amp': 1.0}, {'type': 'expwhole', 'h': 1.0 / 3.0, 'amp':\n 1.0, 'Rhole': 0.5}], hz=[{'type': 'exp', 'h': 1.0 / 27.0}, {'type':\n 'sech2', 'h': 1.0 / 27.0}], a=1.0, N=2, L=2)\n", (184997, 185241), False, 'from galpy import potential\n'), ((185481, 185510), 'numpy.linspace', 'numpy.linspace', (['(0.3)', '(1.5)', '(101)'], {}), '(0.3, 1.5, 101)\n', (185495, 185510), False, 'import numpy\n'), ((186934, 187204), 'galpy.potential.DiskSCFPotential', 'potential.DiskSCFPotential', ([], {'dens': '(lambda R, z: 1.0)', 'Sigma': "[{'type': 'exp', 'h': 1.0 / 3.0, 'amp': 1.0}, {'type': 'expwhole', 'h': 1.0 /\n 3.0, 'amp': 1.0, 'Rhole': 0.5}]", 'hz': "[{'type': 'exp', 'h': 1.0 / 27.0}, {'type': 'sech2', 'h': 1.0 / 27.0}]", 'a': '(1.0)', 'N': '(2)', 'L': '(2)'}), "(dens=lambda R, z: 1.0, Sigma=[{'type': 'exp',\n 'h': 1.0 / 3.0, 'amp': 1.0}, {'type': 'expwhole', 'h': 1.0 / 3.0, 'amp':\n 1.0, 'Rhole': 0.5}], hz=[{'type': 'exp', 'h': 1.0 / 27.0}, {'type':\n 'sech2', 'h': 1.0 / 27.0}], a=1.0, N=2, L=2)\n", (186960, 187204), False, 'from galpy import potential\n'), ((187447, 187488), 'numpy.linspace', 'numpy.linspace', (['(0.1 / 27.0)', '(3.0 / 27)', '(101)'], {}), '(0.1 / 27.0, 3.0 / 27, 101)\n', (187461, 187488), False, 'import numpy\n'), ((189205, 189284), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(13.5)', 'hr': '(1.0 / 3.0)', 'hz': '(1.0 / 27.0)'}), '(amp=13.5, hr=1.0 / 3.0, hz=1.0 / 27.0)\n', (189245, 189284), False, 'from galpy import potential\n'), ((189761, 189790), 'numpy.linspace', 'numpy.linspace', (['(0.3)', '(1.5)', '(101)'], {}), '(0.3, 1.5, 101)\n', (189775, 189790), False, 'import numpy\n'), ((189801, 189842), 'numpy.linspace', 'numpy.linspace', (['(0.1 / 27.0)', '(3.0 / 27)', '(101)'], {}), '(0.1 / 27.0, 3.0 / 27, 101)\n', (189815, 189842), False, 'import numpy\n'), ((191657, 191736), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'amp': '(13.5)', 'hr': '(1.0 / 3.0)', 'hz': '(1.0 / 27.0)'}), '(amp=13.5, hr=1.0 / 3.0, hz=1.0 / 27.0)\n', (191697, 191736), False, 'from galpy import potential\n'), ((192009, 192038), 'numpy.linspace', 'numpy.linspace', (['(0.3)', '(1.5)', '(101)'], {}), '(0.3, 1.5, 101)\n', (192023, 192038), False, 'import numpy\n'), ((192049, 192090), 'numpy.linspace', 'numpy.linspace', (['(0.1 / 27.0)', '(3.0 / 27)', '(101)'], {}), '(0.1 / 27.0, 3.0 / 27, 101)\n', (192063, 192090), False, 'import numpy\n'), ((192945, 192975), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (192973, 192975), False, 'from galpy import potential\n'), ((193055, 193101), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'dp'}), '(pot=dp)\n', (193093, 193101), False, 'from galpy import potential\n'), ((194954, 195013), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro', 'vo': 'vo'}), '(amp=0.55, a=1.3, ro=ro, vo=vo)\n', (194982, 195013), False, 'from galpy import potential\n'), ((196053, 196112), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro', 'vo': 'vo'}), '(amp=0.55, a=1.3, ro=ro, vo=vo)\n', (196081, 196112), False, 'from galpy import potential\n'), ((196119, 196165), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (196157, 196165), False, 'from galpy import potential\n'), ((196180, 196226), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (196203, 196226), False, 'from galpy.util import conversion\n'), ((196670, 196722), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro'}), '(amp=0.55, a=1.3, ro=ro)\n', (196698, 196722), False, 'from galpy import potential\n'), ((196730, 196776), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (196768, 196776), False, 'from galpy import potential\n'), ((196791, 196837), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (196814, 196837), False, 'from galpy.util import conversion\n'), ((197157, 197209), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'vo': 'vo'}), '(amp=0.55, a=1.3, vo=vo)\n', (197185, 197209), False, 'from galpy import potential\n'), ((197217, 197263), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (197255, 197263), False, 'from galpy import potential\n'), ((197278, 197324), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (197301, 197324), False, 'from galpy.util import conversion\n'), ((197912, 197958), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (197950, 197958), False, 'from galpy import potential\n'), ((197973, 198019), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (197996, 198019), False, 'from galpy.util import conversion\n'), ((198534, 198580), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (198572, 198580), False, 'from galpy import potential\n'), ((198595, 198641), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (198618, 198641), False, 'from galpy.util import conversion\n'), ((199032, 199078), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp'}), '(pot=hp)\n', (199070, 199078), False, 'from galpy import potential\n'), ((199093, 199139), 'galpy.util.conversion.get_physical', 'conversion.get_physical', (['hpw'], {'include_set': '(True)'}), '(hpw, include_set=True)\n', (199116, 199139), False, 'from galpy.util import conversion\n'), ((199585, 199615), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (199613, 199615), False, 'from galpy import potential\n'), ((199625, 199671), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'dp'}), '(pot=dp)\n', (199663, 199671), False, 'from galpy import potential\n'), ((199689, 199706), 'pickle.dumps', 'pickle.dumps', (['dwp'], {}), '(dwp)\n', (199701, 199706), False, 'import pickle\n'), ((199726, 199751), 'pickle.loads', 'pickle.loads', (['pickled_dwp'], {}), '(pickled_dwp)\n', (199738, 199751), False, 'import pickle\n'), ((199890, 199917), 'numpy.linspace', 'numpy.linspace', (['(0.1)', '(1)', '(100)'], {}), '(0.1, 1, 100)\n', (199904, 199917), False, 'import numpy\n'), ((199928, 199954), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (199942, 199954), False, 'import numpy\n'), ((199967, 200003), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2 * numpy.pi)', '(100)'], {}), '(0, 2 * numpy.pi, 100)\n', (199981, 200003), False, 'import numpy\n'), ((200012, 200037), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (200026, 200037), False, 'import numpy\n'), ((200274, 200304), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (200302, 200304), False, 'from galpy import potential\n'), ((200314, 200360), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'dp'}), '(pot=dp)\n', (200352, 200360), False, 'from galpy import potential\n'), ((200491, 200537), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'dp'}), '(pot=dp)\n', (200529, 200537), False, 'from galpy import potential\n'), ((200847, 200910), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'b': '(0.8)'}), '(normalize=1.0, q=0.9, b=0.8)\n', (200881, 200910), False, 'from galpy import potential\n'), ((202739, 202802), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'b': '(0.8)'}), '(normalize=1.0, q=0.9, b=0.8)\n', (202773, 202802), False, 'from galpy import potential\n'), ((203920, 203960), 'galpy.potential.RingPotential', 'potential.RingPotential', ([], {'amp': '(3.0)', 'a': '(0.75)'}), '(amp=3.0, a=0.75)\n', (203943, 203960), False, 'from galpy import potential\n'), ((204469, 204518), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (204503, 204518), False, 'from galpy import potential\n'), ((204532, 204602), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'lp', 'tform': '(4.0)', 'tsteady': '(3.0)'}), '(pot=lp, tform=4.0, tsteady=3.0)\n', (204570, 204602), False, 'from galpy import potential\n'), ((204668, 204754), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'lp', 'tform': '(4.0)', 'tsteady': '(3.0)', 'decay': '(True)'}), '(pot=lp, tform=4.0, tsteady=3.0,\n decay=True)\n', (204706, 204754), False, 'from galpy import potential\n'), ((204809, 204840), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(10.0)', '(1001)'], {}), '(0.0, 10.0, 1001)\n', (204823, 204840), False, 'import numpy\n'), ((205366, 205438), 'galpy.potential.RazorThinExponentialDiskPotential', 'potential.RazorThinExponentialDiskPotential', ([], {'normalize': '(1.0)', 'hr': '(3.0 / 8.0)'}), '(normalize=1.0, hr=3.0 / 8.0)\n', (205409, 205438), False, 'from galpy import potential\n'), ((205688, 205729), 'galpy.potential.FerrersPotential', 'potential.FerrersPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (205714, 205729), False, 'from galpy import potential\n'), ((206192, 206228), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {}), '()\n', (206226, 206228), False, 'from galpy import potential\n'), ((206361, 206394), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'b': '(0.0)'}), '(b=0.0)\n', (206387, 206394), False, 'from galpy import potential\n'), ((206700, 206733), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'b': '(0.0)'}), '(b=0.0)\n', (206726, 206733), False, 'from galpy import potential\n'), ((206982, 207018), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {}), '()\n', (207016, 207018), False, 'from galpy import potential\n'), ((207263, 207303), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (207288, 207303), False, 'from galpy import potential\n'), ((207771, 207812), 'galpy.potential.ttensor', 'potential.ttensor', (['[pmass]', '(1.0)', '(0.0)', '(0.0)'], {}), '([pmass], 1.0, 0.0, 0.0)\n', (207788, 207812), False, 'from galpy import potential\n'), ((208029, 208085), 'galpy.potential.ttensor', 'potential.ttensor', (['[pmass]', '(1.0)', '(0.0)', '(0.0)'], {'eigenval': '(True)'}), '([pmass], 1.0, 0.0, 0.0, eigenval=True)\n', (208046, 208085), False, 'from galpy import potential\n'), ((208265, 208306), 'galpy.potential.ttensor', 'potential.ttensor', (['[pmass]', '(1.0)', '(1.0)', '(1.0)'], {}), '([pmass], 1.0, 1.0, 1.0)\n', (208282, 208306), False, 'from galpy import potential\n'), ((209580, 209643), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'b': '(0.8)', 'q': '(0.7)'}), '(normalize=1.0, b=0.8, q=0.7)\n', (209614, 209643), False, 'from galpy import potential\n'), ((210011, 210064), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (210030, 210064), False, 'from galpy import potential\n'), ((210928, 210981), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (210947, 210981), False, 'from galpy import potential\n'), ((211819, 211861), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'normalize': '(True)'}), '(normalize=True)\n', (211845, 211861), False, 'from galpy import potential\n'), ((213298, 213351), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (213317, 213351), False, 'from galpy import potential\n'), ((213695, 213748), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (213714, 213748), False, 'from galpy import potential\n'), ((214062, 214104), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'normalize': '(True)'}), '(normalize=True)\n', (214088, 214104), False, 'from galpy import potential\n'), ((214441, 214494), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (214460, 214494), False, 'from galpy import potential\n'), ((214533, 214588), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (214546, 214588), False, 'from galpy import potential\n'), ((214815, 214870), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (214828, 214870), False, 'from galpy import potential\n'), ((215097, 215152), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (215110, 215152), False, 'from galpy import potential\n'), ((215406, 215459), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (215425, 215459), False, 'from galpy import potential\n'), ((215498, 215553), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (215511, 215553), False, 'from galpy import potential\n'), ((215780, 215835), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (215793, 215835), False, 'from galpy import potential\n'), ((216062, 216117), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rtrial', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rtrial, E, Lz)\n', (216075, 216117), False, 'from galpy import potential\n'), ((216341, 216383), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'normalize': '(True)'}), '(normalize=True)\n', (216367, 216383), False, 'from galpy import potential\n'), ((217244, 217297), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (217263, 217297), False, 'from galpy import potential\n'), ((217589, 217642), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', 'Lz'], {}), '(potential.MWPotential2014, E, Lz)\n', (217608, 217642), False, 'from galpy import potential\n'), ((218241, 218283), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(1.0)', 'a': 'a'}), '(amp=1.0, a=a)\n', (218269, 218283), False, 'from galpy import potential\n'), ((218476, 218537), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'amp': '(1.0)', 'a': 'a', 'alpha': 'alpha'}), '(amp=1.0, a=a, alpha=alpha)\n', (218510, 218537), False, 'from galpy import potential\n'), ((218716, 218756), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(1.0)', 'b': 'a'}), '(amp=1.0, b=a)\n', (218742, 218756), False, 'from galpy import potential\n'), ((218998, 219040), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(1.0)', 'a': 'a'}), '(amp=1.0, a=a)\n', (219026, 219040), False, 'from galpy import potential\n'), ((219219, 219280), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'amp': '(1.0)', 'a': 'a', 'alpha': 'alpha'}), '(amp=1.0, a=a, alpha=alpha)\n', (219253, 219280), False, 'from galpy import potential\n'), ((219455, 219512), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': '(1.0)', 'a': 'a', 'b': '(a / 5.0)'}), '(amp=1.0, a=a, b=a / 5.0)\n', (219487, 219512), False, 'from galpy import potential\n'), ((223878, 223934), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'amp': '(1.0)', 'a': '(0.5)', 'b': '(0.05)'}), '(amp=1.0, a=0.5, b=0.05)\n', (223910, 223934), False, 'from galpy import potential\n'), ((224142, 224172), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (224170, 224172), False, 'from galpy import potential\n'), ((225109, 225165), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (225143, 225165), False, 'from galpy import potential\n'), ((226279, 226335), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (226313, 226335), False, 'from galpy import potential\n'), ((226344, 226405), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(0.2)', 'a': '(0.4)', 'b': '(0.1)'}), '(normalize=0.2, a=0.4, b=0.1)\n', (226376, 226405), False, 'from galpy import potential\n'), ((226414, 226464), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(0.4)', 'a': '(0.1)'}), '(normalize=0.4, a=0.1)\n', (226442, 226464), False, 'from galpy import potential\n'), ((227694, 227750), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (227728, 227750), False, 'from galpy import potential\n'), ((228453, 228523), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo)\n', (228487, 228523), False, 'from galpy import potential\n'), ((228576, 228652), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo)\n', (228610, 228652), False, 'from galpy import potential\n'), ((228705, 228781), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo * 1.1)\n', (228739, 228781), False, 'from galpy import potential\n'), ((228836, 228922), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo *\n 1.1)\n', (228870, 228922), False, 'from galpy import potential\n'), ((231355, 231417), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(1.0)', 'a': '(2.0)', 'ro': '(8.3)', 'vo': '(230.0)'}), '(amp=1.0, a=2.0, ro=8.3, vo=230.0)\n', (231383, 231417), False, 'from galpy import potential\n'), ((232546, 232578), 'numpy.linspace', 'numpy.linspace', (['(-0.1)', '(0.1)', '(10001)'], {}), '(-0.1, 0.1, 10001)\n', (232560, 232578), False, 'import numpy\n'), ((232586, 232648), 'galpy.potential.IsothermalDiskPotential', 'potential.IsothermalDiskPotential', ([], {'amp': '(0.1)', 'sigma': '(20.5 / 220.0)'}), '(amp=0.1, sigma=20.5 / 220.0)\n', (232619, 232648), False, 'from galpy import potential\n'), ((232760, 232794), 'numpy.linspace', 'numpy.linspace', (['(-0.0002)', '(0.0002)', '(5)'], {}), '(-0.0002, 0.0002, 5)\n', (232774, 232794), False, 'import numpy\n'), ((233108, 233148), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (233133, 233148), False, 'from galpy import potential\n'), ((233370, 233398), 'galpy.potential.plotRotcurve', 'potential.plotRotcurve', (['[kp]'], {}), '([kp])\n', (233392, 233398), False, 'from galpy import potential\n'), ((233403, 233481), 'galpy.potential.plotRotcurve', 'potential.plotRotcurve', (['[kp]'], {'Rrange': '[0.01, 10.0]', 'grid': '(101)', 'savefilename': 'None'}), '([kp], Rrange=[0.01, 10.0], grid=101, savefilename=None)\n', (233425, 233481), False, 'from galpy import potential\n'), ((233599, 233617), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (233615, 233617), False, 'import tempfile\n'), ((234325, 234356), 'galpy.potential.plotEscapecurve', 'potential.plotEscapecurve', (['[kp]'], {}), '([kp])\n', (234350, 234356), False, 'from galpy import potential\n'), ((234361, 234447), 'galpy.potential.plotEscapecurve', 'potential.plotEscapecurve', (['[kp]'], {'Rrange': '[0.01, 10.0]', 'grid': '(101)', 'savefilename': 'None'}), '([kp], Rrange=[0.01, 10.0], grid=101, savefilename\n =None)\n', (234386, 234447), False, 'from galpy import potential\n'), ((234566, 234584), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (234582, 234584), False, 'import tempfile\n'), ((235408, 235426), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (235424, 235426), False, 'import tempfile\n'), ((236100, 236130), 'galpy.potential.plotPotentials', 'potential.plotPotentials', (['[kp]'], {}), '([kp])\n', (236124, 236130), False, 'from galpy import potential\n'), ((236197, 236215), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (236213, 236215), False, 'import tempfile\n'), ((237273, 237327), 'galpy.potential.plotPotentials', 'potential.plotPotentials', (['[kp]'], {'effective': '(True)', 'Lz': '(1.0)'}), '([kp], effective=True, Lz=1.0)\n', (237297, 237327), False, 'from galpy import potential\n'), ((237614, 237663), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (237648, 237663), False, 'from galpy import potential\n'), ((237946, 237964), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (237962, 237964), False, 'import tempfile\n'), ((238276, 238305), 'galpy.potential.plotDensities', 'potential.plotDensities', (['[lp]'], {}), '([lp])\n', (238299, 238305), False, 'from galpy import potential\n'), ((238310, 238499), 'galpy.potential.plotDensities', 'potential.plotDensities', (['[lp]'], {'t': '(1.0)', 'rmin': '(0.05)', 'rmax': '(1.8)', 'nrs': '(11)', 'zmin': '(-0.55)', 'zmax': '(0.55)', 'nzs': '(11)', 'aspect': '(1.0)', 'log': '(True)', 'xy': '(True)', 'justcontours': '(True)', 'ncontours': '(11)', 'savefilename': 'None'}), '([lp], t=1.0, rmin=0.05, rmax=1.8, nrs=11, zmin=-\n 0.55, zmax=0.55, nzs=11, aspect=1.0, log=True, xy=True, justcontours=\n True, ncontours=11, savefilename=None)\n', (238333, 238499), False, 'from galpy import potential\n'), ((238689, 238738), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (238723, 238738), False, 'from galpy import potential\n'), ((239073, 239091), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (239089, 239091), False, 'import tempfile\n'), ((239417, 239453), 'galpy.potential.plotSurfaceDensities', 'potential.plotSurfaceDensities', (['[lp]'], {}), '([lp])\n', (239447, 239453), False, 'from galpy import potential\n'), ((239458, 239652), 'galpy.potential.plotSurfaceDensities', 'potential.plotSurfaceDensities', (['[lp]'], {'t': '(1.0)', 'z': '(2.0)', 'xmin': '(0.05)', 'xmax': '(1.8)', 'nxs': '(11)', 'ymin': '(-0.55)', 'ymax': '(0.55)', 'nys': '(11)', 'aspect': '(1.0)', 'log': '(True)', 'justcontours': '(True)', 'ncontours': '(11)', 'savefilename': 'None'}), '([lp], t=1.0, z=2.0, xmin=0.05, xmax=1.8, nxs\n =11, ymin=-0.55, ymax=0.55, nys=11, aspect=1.0, log=True, justcontours=\n True, ncontours=11, savefilename=None)\n', (239488, 239652), False, 'from galpy import potential\n'), ((239915, 239933), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (239931, 239933), False, 'import tempfile\n'), ((240365, 240400), 'galpy.potential.EllipticalDiskPotential', 'potential.EllipticalDiskPotential', ([], {}), '()\n', (240398, 240400), False, 'from galpy import potential\n'), ((240433, 240451), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (240449, 240451), False, 'import tempfile\n'), ((240917, 240973), 'galpy.potential.plotplanarPotentials', 'potential.plotplanarPotentials', (['[dp]'], {'gridx': '(11)', 'gridy': '(11)'}), '([dp], gridx=11, gridy=11)\n', (240947, 240973), False, 'from galpy import potential\n'), ((241150, 241168), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (241166, 241168), False, 'import tempfile\n'), ((241550, 241568), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (241566, 241568), False, 'import tempfile\n'), ((259227, 259257), 'galpy.util.coords.cyl_to_spher', 'coords.cyl_to_spher', (['R', 'z', 'phi'], {}), '(R, z, phi)\n', (259246, 259257), False, 'from galpy.util import coords\n'), ((259378, 259408), 'galpy.util.coords.cyl_to_spher', 'coords.cyl_to_spher', (['R', 'z', 'phi'], {}), '(R, z, phi)\n', (259397, 259408), False, 'from galpy.util import coords\n'), ((259416, 259446), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {}), '()\n', (259444, 259446), False, 'from galpy import potential\n'), ((259581, 259611), 'galpy.util.coords.cyl_to_spher', 'coords.cyl_to_spher', (['R', 'z', 'phi'], {}), '(R, z, phi)\n', (259600, 259611), False, 'from galpy.util import coords\n'), ((66619, 66666), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['tp', '(0.8)'], {'phi': '(0.2)'}), '(tp, 0.8, phi=0.2)\n', (66648, 66666), False, 'from galpy import potential\n'), ((85353, 85388), 'scipy.special.gamma', 'special.gamma', (['(1.5 - pp.alpha / 2.0)'], {}), '(1.5 - pp.alpha / 2.0)\n', (85366, 85388), False, 'from scipy import special, integrate\n'), ((92276, 92313), 'galpy.orbit.Orbit', 'Orbit', (['[1.0, 0.1, 1.1, 0.1, 0.0, 0.0]'], {}), '([1.0, 0.1, 1.1, 0.1, 0.0, 0.0])\n', (92281, 92313), False, 'from galpy.orbit import Orbit\n'), ((92316, 92350), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (92329, 92350), False, 'import pytest\n'), ((92417, 92451), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (92430, 92451), False, 'import pytest\n'), ((92472, 92501), 'galpy.potential.mass', 'potential.mass', (['mop', '(1.0)', '(0.0)'], {}), '(mop, 1.0, 0.0)\n', (92486, 92501), False, 'from galpy import potential\n'), ((92507, 92541), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (92520, 92541), False, 'import pytest\n'), ((92562, 92593), 'galpy.potential.mass', 'potential.mass', (['[mop]', '(1.0)', '(0.0)'], {}), '([mop], 1.0, 0.0)\n', (92576, 92593), False, 'from galpy import potential\n'), ((99154, 99193), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (99167, 99193), False, 'import pytest\n'), ((99219, 99266), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['"""something else"""'], {}), "('something else')\n", (99248, 99266), False, 'from galpy import potential\n'), ((99367, 99406), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (99380, 99406), False, 'import pytest\n'), ((99432, 99473), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['[3, 4, 45]'], {}), '([3, 4, 45])\n', (99461, 99473), False, 'from galpy import potential\n'), ((99481, 99520), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (99494, 99520), False, 'import pytest\n'), ((99546, 99591), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['[lp, 3, 4, 45]'], {}), '([lp, 3, 4, 45])\n', (99575, 99591), False, 'from galpy import potential\n'), ((99736, 99775), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (99749, 99775), False, 'import pytest\n'), ((99801, 99836), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['lpna'], {}), '(lpna)\n', (99830, 99836), False, 'from galpy import potential\n'), ((99846, 99885), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (99859, 99885), False, 'import pytest\n'), ((99911, 99948), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['[lpna]'], {}), '([lpna])\n', (99940, 99948), False, 'from galpy import potential\n'), ((100245, 100279), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (100258, 100279), False, 'import pytest\n'), ((100305, 100346), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['[pp, cdfc]'], {}), '([pp, cdfc])\n', (100334, 100346), False, 'from galpy import potential\n'), ((100355, 100389), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (100368, 100389), False, 'import pytest\n'), ((100415, 100450), 'galpy.potential.RZToplanarPotential', 'potential.RZToplanarPotential', (['cdfc'], {}), '(cdfc)\n', (100444, 100450), False, 'from galpy import potential\n'), ((101267, 101312), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['"""something else"""'], {}), "('something else')\n", (101294, 101312), False, 'from galpy import potential\n'), ((101596, 101635), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (101609, 101635), False, 'import pytest\n'), ((101661, 101700), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['[3, 4, 45]'], {}), '([3, 4, 45])\n', (101688, 101700), False, 'from galpy import potential\n'), ((101995, 102029), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (102008, 102029), False, 'import pytest\n'), ((102055, 102094), 'galpy.potential.toPlanarPotential', 'potential.toPlanarPotential', (['[pp, cdfc]'], {}), '([pp, cdfc])\n', (102082, 102094), False, 'from galpy import potential\n'), ((103004, 103043), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (103017, 103043), False, 'import pytest\n'), ((103069, 103123), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['"""something else"""', '(1.2)'], {}), "('something else', 1.2)\n", (103100, 103123), False, 'from galpy import potential\n'), ((103223, 103262), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (103236, 103262), False, 'import pytest\n'), ((103288, 103336), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['[3, 4, 45]', '(1.2)'], {}), '([3, 4, 45], 1.2)\n', (103319, 103336), False, 'from galpy import potential\n'), ((103343, 103382), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (103356, 103382), False, 'import pytest\n'), ((103408, 103460), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['[lp, 3, 4, 45]', '(1.2)'], {}), '([lp, 3, 4, 45], 1.2)\n', (103439, 103460), False, 'from galpy import potential\n'), ((103523, 103562), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (103536, 103562), False, 'import pytest\n'), ((103720, 103759), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (103733, 103759), False, 'import pytest\n'), ((103992, 104031), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (104005, 104031), False, 'import pytest\n'), ((104057, 104099), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['lpna', '(1.2)'], {}), '(lpna, 1.2)\n', (104088, 104099), False, 'from galpy import potential\n'), ((104108, 104147), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (104121, 104147), False, 'import pytest\n'), ((104173, 104217), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['[lpna]', '(1.2)'], {}), '([lpna], 1.2)\n', (104204, 104217), False, 'from galpy import potential\n'), ((104513, 104547), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (104526, 104547), False, 'import pytest\n'), ((104573, 104621), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['[pp, cdfc]', '(1.2)'], {}), '([pp, cdfc], 1.2)\n', (104604, 104621), False, 'from galpy import potential\n'), ((104629, 104663), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (104642, 104663), False, 'import pytest\n'), ((104689, 104731), 'galpy.potential.RZToverticalPotential', 'potential.RZToverticalPotential', (['cdfc', '(1.2)'], {}), '(cdfc, 1.2)\n', (104720, 104731), False, 'from galpy import potential\n'), ((105890, 105951), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['"""something else"""', '(1.2)'], {'phi': '(0.8)'}), "('something else', 1.2, phi=0.8)\n", (105919, 105951), False, 'from galpy import potential\n'), ((106223, 106262), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (106236, 106262), False, 'import pytest\n'), ((106427, 106466), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (106440, 106466), False, 'import pytest\n'), ((106629, 106668), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (106642, 106668), False, 'import pytest\n'), ((106694, 106740), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['[3, 4, 45]', '(1.2)'], {}), '([3, 4, 45], 1.2)\n', (106723, 106740), False, 'from galpy import potential\n'), ((107034, 107068), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (107047, 107068), False, 'import pytest\n'), ((107094, 107149), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['[pp, cdfc]', '(1.2)'], {'phi': '(0.8)'}), '([pp, cdfc], 1.2, phi=0.8)\n', (107123, 107149), False, 'from galpy import potential\n'), ((107156, 107190), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (107169, 107190), False, 'import pytest\n'), ((107216, 107265), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['cdfc', '(1.2)'], {'phi': '(0.8)'}), '(cdfc, 1.2, phi=0.8)\n', (107245, 107265), False, 'from galpy import potential\n'), ((107375, 107414), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (107388, 107414), False, 'import pytest\n'), ((107441, 107480), 'galpy.potential.toVerticalPotential', 'potential.toVerticalPotential', (['tnp', '(1.2)'], {}), '(tnp, 1.2)\n', (107470, 107480), False, 'from galpy import potential\n'), ((115273, 115301), 'galpy.potential.plotRotcurve', 'potential.plotRotcurve', (['[dp]'], {}), '([dp])\n', (115295, 115301), False, 'from galpy import potential\n'), ((115536, 115567), 'galpy.potential.plotEscapecurve', 'potential.plotEscapecurve', (['[dp]'], {}), '([dp])\n', (115561, 115567), False, 'from galpy import potential\n'), ((118587, 118641), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': 'q'}), '(normalize=1.0, q=q)\n', (118621, 118641), False, 'from galpy import potential\n'), ((121943, 122016), 'galpy.potential.DoubleExponentialDiskPotential', 'potential.DoubleExponentialDiskPotential', ([], {'normalize': '(1.0)', 'hr': '(0.05)', 'hz': '(0.01)'}), '(normalize=1.0, hr=0.05, hz=0.01)\n', (121983, 122016), False, 'from galpy import potential\n'), ((122576, 122619), 'galpy.potential.evaluateplanarPotentials', 'potential.evaluateplanarPotentials', (['dp', '(1.0)'], {}), '(dp, 1.0)\n', (122610, 122619), False, 'from galpy import potential\n'), ((122831, 122871), 'galpy.potential.evaluateplanarRforces', 'potential.evaluateplanarRforces', (['dp', '(1.0)'], {}), '(dp, 1.0)\n', (122862, 122871), False, 'from galpy import potential\n'), ((123080, 123122), 'galpy.potential.evaluateplanarphiforces', 'potential.evaluateplanarphiforces', (['dp', '(1.0)'], {}), '(dp, 1.0)\n', (123113, 123122), False, 'from galpy import potential\n'), ((123333, 123374), 'galpy.potential.evaluateplanarR2derivs', 'potential.evaluateplanarR2derivs', (['dp', '(1.0)'], {}), '(dp, 1.0)\n', (123365, 123374), False, 'from galpy import potential\n'), ((123842, 123861), 'numpy.ones_like', 'numpy.ones_like', (['rs'], {}), '(rs)\n', (123857, 123861), False, 'import numpy\n'), ((126063, 126082), 'numpy.ones_like', 'numpy.ones_like', (['rs'], {}), '(rs)\n', (126078, 126082), False, 'import numpy\n'), ((126096, 126115), 'numpy.ones_like', 'numpy.ones_like', (['rs'], {}), '(rs)\n', (126111, 126115), False, 'import numpy\n'), ((133651, 133670), 'numpy.ones_like', 'numpy.ones_like', (['rs'], {}), '(rs)\n', (133666, 133670), False, 'import numpy\n'), ((133684, 133703), 'numpy.ones_like', 'numpy.ones_like', (['rs'], {}), '(rs)\n', (133699, 133703), False, 'import numpy\n'), ((140036, 140068), 'numpy.fabs', 'numpy.fabs', (['(pot[1]._a - 3.0 / R0)'], {}), '(pot[1]._a - 3.0 / R0)\n', (140046, 140068), False, 'import numpy\n'), ((140139, 140172), 'numpy.fabs', 'numpy.fabs', (['(pot[1]._b - 0.28 / R0)'], {}), '(pot[1]._b - 0.28 / R0)\n', (140149, 140172), False, 'import numpy\n'), ((140392, 140424), 'numpy.fabs', 'numpy.fabs', (['(pot[2].a - 16.0 / R0)'], {}), '(pot[2].a - 16.0 / R0)\n', (140402, 140424), False, 'import numpy\n'), ((151721, 151745), 'numpy.fabs', 'numpy.fabs', (['(np.a - tnp.a)'], {}), '(np.a - tnp.a)\n', (151731, 151745), False, 'import numpy\n'), ((151834, 151894), 'numpy.fabs', 'numpy.fabs', (['(np._amp - tnp._amp * 4.0 * numpy.pi * tnp.a ** 3)'], {}), '(np._amp - tnp._amp * 4.0 * numpy.pi * tnp.a ** 3)\n', (151844, 151894), False, 'import numpy\n'), ((152554, 152578), 'numpy.fabs', 'numpy.fabs', (['(np.a - tnp.a)'], {}), '(np.a - tnp.a)\n', (152564, 152578), False, 'import numpy\n'), ((152667, 152727), 'numpy.fabs', 'numpy.fabs', (['(np._amp - tnp._amp * 4.0 * numpy.pi * tnp.a ** 3)'], {}), '(np._amp - tnp._amp * 4.0 * numpy.pi * tnp.a ** 3)\n', (152677, 152727), False, 'import numpy\n'), ((154296, 154360), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {'amp': '(2.0)', 'a': '(3.0)', 'ro': 'ro_setup', 'vo': 'vo_setup'}), '(amp=2.0, a=3.0, ro=ro_setup, vo=vo_setup)\n', (154318, 154360), False, 'from galpy import potential\n'), ((156063, 156090), 'numpy.fabs', 'numpy.fabs', (['(rmax_opt - rmax)'], {}), '(rmax_opt - rmax)\n', (156073, 156090), False, 'import numpy\n'), ((156895, 156909), 'galpy.potential.epifreq', 'epifreq', (['lp', 'R'], {}), '(lp, R)\n', (156902, 156909), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((156910, 156923), 'galpy.potential.omegac', 'omegac', (['lp', 'R'], {}), '(lp, R)\n', (156916, 156923), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((157822, 157854), 'galpy.potential.LinShuReductionFactor', 'LinShuReductionFactor', (['lp', 'R', 'sr'], {}), '(lp, R, sr)\n', (157843, 157854), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((160043, 160092), 'galpy.potential.nemo_accname', 'potential.nemo_accname', (['potential.MWPotential2014'], {}), '(potential.MWPotential2014)\n', (160065, 160092), False, 'from galpy import potential\n'), ((160278, 160304), 'galpy.potential.nemo_accname', 'potential.nemo_accname', (['pp'], {}), '(pp)\n', (160300, 160304), False, 'from galpy import potential\n'), ((167638, 167693), 'galpy.potential.MN3ExponentialDiskPotential', 'potential.MN3ExponentialDiskPotential', ([], {'amp': '(1.0)', 'hz': '(50.0)'}), '(amp=1.0, hz=50.0)\n', (167675, 167693), False, 'from galpy import potential\n'), ((168017, 168053), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (168040, 168053), False, 'import warnings\n'), ((168068, 168113), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (168089, 168113), False, 'import warnings\n'), ((168125, 168185), 'galpy.potential.MN3ExponentialDiskPotential', 'MN3ExponentialDiskPotential', ([], {'normalize': '(1.0)', 'hz': '(1.438)', 'hr': '(1.0)'}), '(normalize=1.0, hz=1.438, hr=1.0)\n', (168152, 168185), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((168569, 168605), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (168592, 168605), False, 'import warnings\n'), ((168620, 168665), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (168641, 168665), False, 'import warnings\n'), ((168677, 168752), 'galpy.potential.MN3ExponentialDiskPotential', 'MN3ExponentialDiskPotential', ([], {'normalize': '(1.0)', 'hr': '(1.0)', 'hz': '(0.7727)', 'posdens': '(True)'}), '(normalize=1.0, hr=1.0, hz=0.7727, posdens=True)\n', (168704, 168752), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((169287, 169314), 'numpy.fabs', 'numpy.fabs', (['(mn._brd - 1.875)'], {}), '(mn._brd - 1.875)\n', (169297, 169314), False, 'import numpy\n'), ((169463, 169489), 'numpy.fabs', 'numpy.fabs', (['(mn._brd - 0.75)'], {}), '(mn._brd - 0.75)\n', (169473, 169489), False, 'import numpy\n'), ((169648, 169673), 'numpy.fabs', 'numpy.fabs', (['(mn._brd - 2.1)'], {}), '(mn._brd - 2.1)\n', (169658, 169673), False, 'import numpy\n'), ((169816, 169841), 'numpy.fabs', 'numpy.fabs', (['(mn._brd - 0.9)'], {}), '(mn._brd - 0.9)\n', (169826, 169841), False, 'import numpy\n'), ((176248, 176270), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (176261, 176270), False, 'import pytest\n'), ((176298, 176344), 'galpy.potential.TwoPowerTriaxialPotential', 'potential.TwoPowerTriaxialPotential', ([], {'alpha': '(3.5)'}), '(alpha=3.5)\n', (176333, 176344), False, 'from galpy import potential\n'), ((176421, 176443), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (176434, 176443), False, 'import pytest\n'), ((176471, 176516), 'galpy.potential.TwoPowerTriaxialPotential', 'potential.TwoPowerTriaxialPotential', ([], {'beta': '(1.0)'}), '(beta=1.0)\n', (176506, 176516), False, 'from galpy import potential\n'), ((176680, 176702), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (176693, 176702), False, 'import pytest\n'), ((176730, 176776), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'alpha': '(-0.5)'}), '(alpha=-0.5)\n', (176764, 176776), False, 'from galpy import potential\n'), ((176785, 176807), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (176798, 176807), False, 'import pytest\n'), ((176835, 176880), 'galpy.potential.DehnenSphericalPotential', 'potential.DehnenSphericalPotential', ([], {'alpha': '(3.5)'}), '(alpha=3.5)\n', (176869, 176880), False, 'from galpy import potential\n'), ((177006, 177031), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (177019, 177031), False, 'import pytest\n'), ((177059, 177093), 'galpy.potential.FerrersPotential', 'potential.FerrersPotential', ([], {'n': '(-1.0)'}), '(n=-1.0)\n', (177085, 177093), False, 'from galpy import potential\n'), ((177253, 177278), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (177266, 177278), False, 'import pytest\n'), ((177306, 177361), 'galpy.potential.SphericalShellPotential', 'potential.SphericalShellPotential', ([], {'normalize': '(1.0)', 'a': '(2.0)'}), '(normalize=1.0, a=2.0)\n', (177339, 177361), False, 'from galpy import potential\n'), ((177499, 177524), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (177512, 177524), False, 'import pytest\n'), ((177552, 177597), 'galpy.potential.RingPotential', 'potential.RingPotential', ([], {'normalize': '(1.0)', 'a': '(2.0)'}), '(normalize=1.0, a=2.0)\n', (177575, 177597), False, 'from galpy import potential\n'), ((178086, 178128), 'numpy.fabs', 'numpy.fabs', (['(phis - 30.0 / 180.0 * numpy.pi)'], {}), '(phis - 30.0 / 180.0 * numpy.pi)\n', (178096, 178128), False, 'import numpy\n'), ((178402, 178444), 'numpy.fabs', 'numpy.fabs', (['(phis - 30.0 / 180.0 * numpy.pi)'], {}), '(phis - 30.0 / 180.0 * numpy.pi)\n', (178412, 178444), False, 'import numpy\n'), ((178958, 179001), 'numpy.fabs', 'numpy.fabs', (['(phis - 120.0 / 180.0 * numpy.pi)'], {}), '(phis - 120.0 / 180.0 * numpy.pi)\n', (178968, 179001), False, 'import numpy\n'), ((179275, 179318), 'numpy.fabs', 'numpy.fabs', (['(phis - 120.0 / 180.0 * numpy.pi)'], {}), '(phis - 120.0 / 180.0 * numpy.pi)\n', (179285, 179318), False, 'import numpy\n'), ((179943, 179958), 'numpy.cos', 'numpy.cos', (['phis'], {}), '(phis)\n', (179952, 179958), False, 'import numpy\n'), ((179970, 179985), 'numpy.sin', 'numpy.sin', (['phis'], {}), '(phis)\n', (179979, 179985), False, 'import numpy\n'), ((180163, 180205), 'numpy.fabs', 'numpy.fabs', (['(phis - 30.0 / 180.0 * numpy.pi)'], {}), '(phis - 30.0 / 180.0 * numpy.pi)\n', (180173, 180205), False, 'import numpy\n'), ((180486, 180528), 'numpy.fabs', 'numpy.fabs', (['(phis - 30.0 / 180.0 * numpy.pi)'], {}), '(phis - 30.0 / 180.0 * numpy.pi)\n', (180496, 180528), False, 'import numpy\n'), ((180982, 180997), 'numpy.cos', 'numpy.cos', (['phis'], {}), '(phis)\n', (180991, 180997), False, 'import numpy\n'), ((181040, 181055), 'numpy.sin', 'numpy.sin', (['phis'], {}), '(phis)\n', (181049, 181055), False, 'import numpy\n'), ((181233, 181276), 'numpy.fabs', 'numpy.fabs', (['(phis - 120.0 / 180.0 * numpy.pi)'], {}), '(phis - 120.0 / 180.0 * numpy.pi)\n', (181243, 181276), False, 'import numpy\n'), ((181557, 181600), 'numpy.fabs', 'numpy.fabs', (['(phis - 120.0 / 180.0 * numpy.pi)'], {}), '(phis - 120.0 / 180.0 * numpy.pi)\n', (181567, 181600), False, 'import numpy\n'), ((181927, 181966), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (181940, 181966), False, 'import pytest\n'), ((181987, 182030), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182015, 182030), False, 'from galpy import potential\n'), ((182036, 182075), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182049, 182075), False, 'import pytest\n'), ((182096, 182138), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182123, 182138), False, 'from galpy import potential\n'), ((182144, 182183), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182157, 182183), False, 'import pytest\n'), ((182204, 182244), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182229, 182244), False, 'from galpy import potential\n'), ((182250, 182289), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182263, 182289), False, 'import pytest\n'), ((182310, 182350), 'galpy.potential.evaluatezforces', 'potential.evaluatezforces', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182335, 182350), False, 'from galpy import potential\n'), ((182356, 182395), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182369, 182395), False, 'import pytest\n'), ((182416, 182458), 'galpy.potential.evaluatephiforces', 'potential.evaluatephiforces', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182443, 182458), False, 'from galpy import potential\n'), ((182464, 182503), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182477, 182503), False, 'import pytest\n'), ((182524, 182565), 'galpy.potential.evaluateR2derivs', 'potential.evaluateR2derivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182550, 182565), False, 'from galpy import potential\n'), ((182571, 182610), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182584, 182610), False, 'import pytest\n'), ((182631, 182672), 'galpy.potential.evaluatez2derivs', 'potential.evaluatez2derivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182657, 182672), False, 'from galpy import potential\n'), ((182678, 182717), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182691, 182717), False, 'import pytest\n'), ((182738, 182779), 'galpy.potential.evaluateRzderivs', 'potential.evaluateRzderivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182764, 182779), False, 'from galpy import potential\n'), ((182785, 182824), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182798, 182824), False, 'import pytest\n'), ((182845, 182888), 'galpy.potential.evaluatephi2derivs', 'potential.evaluatephi2derivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182873, 182888), False, 'from galpy import potential\n'), ((182894, 182933), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (182907, 182933), False, 'import pytest\n'), ((182954, 182997), 'galpy.potential.evaluateRphiderivs', 'potential.evaluateRphiderivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (182982, 182997), False, 'from galpy import potential\n'), ((183003, 183042), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (183016, 183042), False, 'import pytest\n'), ((183063, 183103), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (183088, 183103), False, 'from galpy import potential\n'), ((183109, 183148), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (183122, 183148), False, 'import pytest\n'), ((183169, 183210), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['tnp', '(1.0)', '(0.0)'], {}), '(tnp, 1.0, 0.0)\n', (183195, 183210), False, 'from galpy import potential\n'), ((183216, 183255), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (183229, 183255), False, 'import pytest\n'), ((183276, 183325), 'galpy.potential.evaluateSurfaceDensities', 'potential.evaluateSurfaceDensities', (['tnp', '(1.0)', '(0.1)'], {}), '(tnp, 1.0, 0.1)\n', (183310, 183325), False, 'from galpy import potential\n'), ((188662, 188687), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (188675, 188687), False, 'import pytest\n'), ((189850, 189873), 'numpy.ones_like', 'numpy.ones_like', (['testzs'], {}), '(testzs)\n', (189865, 189873), False, 'import numpy\n'), ((189893, 189916), 'numpy.ones_like', 'numpy.ones_like', (['testRs'], {}), '(testRs)\n', (189908, 189916), False, 'import numpy\n'), ((192098, 192121), 'numpy.ones_like', 'numpy.ones_like', (['testzs'], {}), '(testzs)\n', (192113, 192121), False, 'import numpy\n'), ((192141, 192164), 'numpy.ones_like', 'numpy.ones_like', (['testRs'], {}), '(testRs)\n', (192156, 192164), False, 'import numpy\n'), ((194633, 194658), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (194646, 194658), False, 'import pytest\n'), ((194679, 194724), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': '(1)'}), '(pot=1)\n', (194717, 194724), False, 'from galpy import potential\n'), ((195020, 195049), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195033, 195049), False, 'import pytest\n'), ((195070, 195136), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': '(1.1 * ro)', 'vo': 'vo'}), '(pot=hp, ro=1.1 * ro, vo=vo)\n', (195108, 195136), False, 'from galpy import potential\n'), ((195142, 195171), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195155, 195171), False, 'import pytest\n'), ((195192, 195258), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': 'ro', 'vo': '(vo * 1.1)'}), '(pot=hp, ro=ro, vo=vo * 1.1)\n', (195230, 195258), False, 'from galpy import potential\n'), ((195264, 195293), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195277, 195293), False, 'import pytest\n'), ((195314, 195386), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': '(1.1 * ro)', 'vo': '(vo * 1.1)'}), '(pot=hp, ro=1.1 * ro, vo=vo * 1.1)\n', (195352, 195386), False, 'from galpy import potential\n'), ((195475, 195504), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195488, 195504), False, 'import pytest\n'), ((195525, 195591), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': '(1.1 * ro)', 'vo': 'vo'}), '(pot=hp, ro=1.1 * ro, vo=vo)\n', (195563, 195591), False, 'from galpy import potential\n'), ((195597, 195626), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195610, 195626), False, 'import pytest\n'), ((195647, 195713), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': 'ro', 'vo': '(vo * 1.1)'}), '(pot=hp, ro=ro, vo=vo * 1.1)\n', (195685, 195713), False, 'from galpy import potential\n'), ((195719, 195748), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (195732, 195748), False, 'import pytest\n'), ((195769, 195841), 'galpy.potential.DehnenSmoothWrapperPotential', 'potential.DehnenSmoothWrapperPotential', ([], {'pot': 'hp', 'ro': '(1.1 * ro)', 'vo': '(vo * 1.1)'}), '(pot=hp, ro=1.1 * ro, vo=vo * 1.1)\n', (195807, 195841), False, 'from galpy import potential\n'), ((196399, 196430), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['ro'] - ro)"], {}), "(hpw_phys['ro'] - ro)\n", (196409, 196430), False, 'import numpy\n'), ((196527, 196558), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['vo'] - vo)"], {}), "(hpw_phys['vo'] - vo)\n", (196537, 196558), False, 'import numpy\n'), ((197014, 197045), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['ro'] - ro)"], {}), "(hpw_phys['ro'] - ro)\n", (197024, 197045), False, 'import numpy\n'), ((197501, 197532), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['vo'] - vo)"], {}), "(hpw_phys['vo'] - vo)\n", (197511, 197532), False, 'import numpy\n'), ((198192, 198223), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['ro'] - ro)"], {}), "(hpw_phys['ro'] - ro)\n", (198202, 198223), False, 'import numpy\n'), ((198320, 198351), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['vo'] - vo)"], {}), "(hpw_phys['vo'] - vo)\n", (198330, 198351), False, 'import numpy\n'), ((198818, 198849), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['ro'] - ro)"], {}), "(hpw_phys['ro'] - ro)\n", (198828, 198849), False, 'import numpy\n'), ((199316, 199347), 'numpy.fabs', 'numpy.fabs', (["(hpw_phys['vo'] - vo)"], {}), "(hpw_phys['vo'] - vo)\n", (199326, 199347), False, 'import numpy\n'), ((202979, 203018), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (202992, 203018), False, 'import pytest\n'), ((203046, 203098), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['[lp, cdfc]', 'R', 'z'], {'phi': 'phi'}), '([lp, cdfc], R, z, phi=phi)\n', (203071, 203098), False, 'from galpy import potential\n'), ((203104, 203143), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (203117, 203143), False, 'import pytest\n'), ((203171, 203225), 'galpy.potential.evaluatephiforces', 'potential.evaluatephiforces', (['[lp, cdfc]', 'R', 'z'], {'phi': 'phi'}), '([lp, cdfc], R, z, phi=phi)\n', (203198, 203225), False, 'from galpy import potential\n'), ((203231, 203270), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (203244, 203270), False, 'import pytest\n'), ((203298, 203350), 'galpy.potential.evaluatezforces', 'potential.evaluatezforces', (['[lp, cdfc]', 'R', 'z'], {'phi': 'phi'}), '([lp, cdfc], R, z, phi=phi)\n', (203323, 203350), False, 'from galpy import potential\n'), ((203356, 203395), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (203369, 203395), False, 'import pytest\n'), ((203423, 203475), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['[lp, cdfc]', 'R', 'z'], {'phi': 'phi'}), '([lp, cdfc], R, z, phi=phi)\n', (203448, 203475), False, 'from galpy import potential\n'), ((205946, 205986), 'numpy.fabs', 'numpy.fabs', (['(rzderiv - rzderiv_finitediff)'], {}), '(rzderiv - rzderiv_finitediff)\n', (205956, 205986), False, 'import numpy\n'), ((207028, 207067), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (207041, 207067), False, 'import pytest\n'), ((207120, 207159), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (207133, 207159), False, 'import pytest\n'), ((207187, 207218), 'galpy.potential.rtide', 'potential.rtide', (['[lp]', '(1.0)', '(0.0)'], {}), '([lp], 1.0, 0.0)\n', (207202, 207218), False, 'from galpy import potential\n'), ((208825, 208865), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (208850, 208865), False, 'from galpy import potential\n'), ((208877, 208933), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(3.0)', 'q': '(0.8)'}), '(normalize=3.0, q=0.8)\n', (208911, 208933), False, 'from galpy import potential\n'), ((208944, 209005), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(0.5)', 'a': '(3.0)', 'b': '(0.5)'}), '(normalize=0.5, a=3.0, b=0.5)\n', (208976, 209005), False, 'from galpy import potential\n'), ((209650, 209689), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (209663, 209689), False, 'import pytest\n'), ((209747, 209786), 'pytest.raises', 'pytest.raises', (['potential.PotentialError'], {}), '(potential.PotentialError)\n', (209760, 209786), False, 'import pytest\n'), ((209814, 209850), 'galpy.potential.ttensor', 'potential.ttensor', (['lp', '(1.0)', '(0.0)', '(0.0)'], {}), '(lp, 1.0, 0.0, 0.0)\n', (209831, 209850), False, 'from galpy import potential\n'), ((212846, 212910), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rc', '(0.0)'], {}), '(potential.MWPotential2014, Rc, 0.0)\n', (212874, 212910), False, 'from galpy import potential\n'), ((212985, 213031), 'galpy.potential.vcirc', 'potential.vcirc', (['potential.MWPotential2014', 'Rc'], {}), '(potential.MWPotential2014, Rc)\n', (213000, 213031), False, 'from galpy import potential\n'), ((217319, 217381), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', '(Rmin - 0.0001)', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmin - 0.0001, E, Lz)\n', (217332, 217381), False, 'from galpy import potential\n'), ((217429, 217491), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', '(Rmax + 0.0001)', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmax + 0.0001, E, Lz)\n', (217442, 217491), False, 'from galpy import potential\n'), ((217664, 217726), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', '(Rmin - 0.0001)', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmin - 0.0001, E, Lz)\n', (217677, 217726), False, 'from galpy import potential\n'), ((217774, 217836), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', '(Rmax + 0.0001)', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmax + 0.0001, E, Lz)\n', (217787, 217836), False, 'from galpy import potential\n'), ((218011, 218036), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (218024, 218036), False, 'import pytest\n'), ((218057, 218115), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', '(0.7)', '(E + 100)', 'Lz'], {}), '(potential.MWPotential2014, 0.7, E + 100, Lz)\n', (218070, 218115), False, 'from galpy import potential\n'), ((220665, 220693), 'numpy.array', 'numpy.array', (['[0.5, 1.0, 2.0]'], {}), '([0.5, 1.0, 2.0])\n', (220676, 220693), False, 'import numpy\n'), ((220702, 220748), 'numpy.array', 'numpy.array', (['[0.0, 0.125, -0.125, 0.25, -0.25]'], {}), '([0.0, 0.125, -0.125, 0.25, -0.25])\n', (220713, 220748), False, 'import numpy\n'), ((220756, 220842), 'numpy.array', 'numpy.array', (['[0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 + numpy.pi]'], {}), '([0.0, 0.5, -0.5, 1.0, -1.0, numpy.pi, 0.5 + numpy.pi, 1.0 +\n numpy.pi])\n', (220767, 220842), False, 'import numpy\n'), ((224523, 224559), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (224546, 224559), False, 'import warnings\n'), ((224569, 224614), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'FutureWarning'], {}), "('error', FutureWarning)\n", (224590, 224614), False, 'import warnings\n'), ((224749, 224785), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (224772, 224785), False, 'import warnings\n'), ((224795, 224840), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'FutureWarning'], {}), "('error', FutureWarning)\n", (224816, 224840), False, 'import warnings\n'), ((225173, 225197), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225186, 225197), False, 'import pytest\n'), ((225239, 225263), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225252, 225263), False, 'import pytest\n'), ((225305, 225329), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225318, 225329), False, 'import pytest\n'), ((225455, 225479), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225468, 225479), False, 'import pytest\n'), ((225521, 225545), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225534, 225545), False, 'import pytest\n'), ((225587, 225611), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225600, 225611), False, 'import pytest\n'), ((225742, 225766), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225755, 225766), False, 'import pytest\n'), ((225808, 225832), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225821, 225832), False, 'import pytest\n'), ((225874, 225898), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (225887, 225898), False, 'import pytest\n'), ((227758, 227782), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (227771, 227782), False, 'import pytest\n'), ((227818, 227842), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (227831, 227842), False, 'import pytest\n'), ((227962, 227986), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (227975, 227986), False, 'import pytest\n'), ((228022, 228046), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (228035, 228046), False, 'import pytest\n'), ((228171, 228195), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (228184, 228195), False, 'import pytest\n'), ((228231, 228255), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (228244, 228255), False, 'import pytest\n'), ((228967, 228996), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (228980, 228996), False, 'import pytest\n'), ((229028, 229057), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229041, 229057), False, 'import pytest\n'), ((229089, 229118), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229102, 229118), False, 'import pytest\n'), ((229152, 229181), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229165, 229181), False, 'import pytest\n'), ((229213, 229242), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229226, 229242), False, 'import pytest\n'), ((229274, 229303), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229287, 229303), False, 'import pytest\n'), ((229904, 229933), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229917, 229933), False, 'import pytest\n'), ((229965, 229994), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (229978, 229994), False, 'import pytest\n'), ((230026, 230055), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230039, 230055), False, 'import pytest\n'), ((230089, 230118), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230102, 230118), False, 'import pytest\n'), ((230150, 230179), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230163, 230179), False, 'import pytest\n'), ((230211, 230240), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230224, 230240), False, 'import pytest\n'), ((230861, 230890), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230874, 230890), False, 'import pytest\n'), ((230922, 230951), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230935, 230951), False, 'import pytest\n'), ((230983, 231012), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (230996, 231012), False, 'import pytest\n'), ((231046, 231075), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (231059, 231075), False, 'import pytest\n'), ((231107, 231136), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (231120, 231136), False, 'import pytest\n'), ((231168, 231197), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (231181, 231197), False, 'import pytest\n'), ((231527, 231554), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (231540, 231554), False, 'import pytest\n'), ((231668, 231695), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (231681, 231695), False, 'import pytest\n'), ((232877, 232904), 'numpy.fabs', 'numpy.fabs', (['(dens_at_0 - 0.1)'], {}), '(dens_at_0 - 0.1)\n', (232887, 232904), False, 'import numpy\n'), ((233635, 233653), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (233643, 233653), False, 'import os\n'), ((233680, 233707), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (233689, 233707), False, 'import os\n'), ((234053, 234080), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (234062, 234080), False, 'import os\n'), ((234602, 234620), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (234610, 234620), False, 'import os\n'), ((234647, 234674), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (234656, 234674), False, 'import os\n'), ((235038, 235065), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (235047, 235065), False, 'import os\n'), ((235444, 235462), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (235452, 235462), False, 'import os\n'), ((235489, 235516), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (235498, 235516), False, 'import os\n'), ((236068, 236095), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (236077, 236095), False, 'import os\n'), ((236233, 236251), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (236241, 236251), False, 'import os\n'), ((236278, 236305), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (236287, 236305), False, 'import os\n'), ((236334, 236505), 'galpy.potential.plotPotentials', 'potential.plotPotentials', (['[kp]'], {'rmin': '(0.01)', 'rmax': '(1.8)', 'nrs': '(11)', 'zmin': '(-0.55)', 'zmax': '(0.55)', 'nzs': '(11)', 'justcontours': '(True)', 'xy': '(True)', 'ncontours': '(11)', 'savefilename': 'tmp_savefilename'}), '([kp], rmin=0.01, rmax=1.8, nrs=11, zmin=-0.55,\n zmax=0.55, nzs=11, justcontours=True, xy=True, ncontours=11,\n savefilename=tmp_savefilename)\n', (236358, 236505), False, 'from galpy import potential\n'), ((236673, 236820), 'galpy.potential.plotPotentials', 'potential.plotPotentials', (['[kp]'], {'t': '(1.0)', 'rmin': '(0.01)', 'rmax': '(1.8)', 'nrs': '(11)', 'zmin': '(-0.55)', 'zmax': '(0.55)', 'nzs': '(11)', 'ncontours': '(11)', 'savefilename': 'tmp_savefilename'}), '([kp], t=1.0, rmin=0.01, rmax=1.8, nrs=11, zmin=-\n 0.55, zmax=0.55, nzs=11, ncontours=11, savefilename=tmp_savefilename)\n', (236697, 236820), False, 'from galpy import potential\n'), ((236930, 236957), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (236939, 236957), False, 'import os\n'), ((237342, 237397), 'galpy.potential.plotPotentials', 'potential.plotPotentials', (['[kp]'], {'effective': '(True)', 'Lz': 'None'}), '([kp], effective=True, Lz=None)\n', (237366, 237397), False, 'from galpy import potential\n'), ((237982, 238000), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (237990, 238000), False, 'import os\n'), ((238027, 238054), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (238036, 238054), False, 'import os\n'), ((238244, 238271), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (238253, 238271), False, 'import os\n'), ((239109, 239127), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (239117, 239127), False, 'import os\n'), ((239154, 239181), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (239163, 239181), False, 'import os\n'), ((239385, 239412), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (239394, 239412), False, 'import os\n'), ((239951, 239969), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (239959, 239969), False, 'import os\n'), ((239996, 240023), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (240005, 240023), False, 'import os\n'), ((240329, 240356), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (240338, 240356), False, 'import os\n'), ((240469, 240487), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (240477, 240487), False, 'import os\n'), ((240514, 240541), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (240523, 240541), False, 'import os\n'), ((240885, 240912), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (240894, 240912), False, 'import os\n'), ((241052, 241099), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (241084, 241099), False, 'from galpy import potential\n'), ((241186, 241204), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (241194, 241204), False, 'import os\n'), ((241231, 241258), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (241240, 241258), False, 'import os\n'), ((241490, 241517), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (241499, 241517), False, 'import os\n'), ((241586, 241604), 'os.close', 'os.close', (['savefile'], {}), '(savefile)\n', (241594, 241604), False, 'import os\n'), ((241631, 241658), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (241640, 241658), False, 'import os\n'), ((241687, 241790), 'galpy.potential.plotlinearPotentials', 'potential.plotlinearPotentials', (['lip'], {'t': '(0.0)', 'min': '(-15.0)', 'max': '(15)', 'ns': '(21)', 'savefilename': 'tmp_savefilename'}), '(lip, t=0.0, min=-15.0, max=15, ns=21,\n savefilename=tmp_savefilename)\n', (241717, 241790), False, 'from galpy import potential\n'), ((241868, 241971), 'galpy.potential.plotlinearPotentials', 'potential.plotlinearPotentials', (['lip'], {'t': '(0.0)', 'min': '(-15.0)', 'max': '(15)', 'ns': '(21)', 'savefilename': 'tmp_savefilename'}), '(lip, t=0.0, min=-15.0, max=15, ns=21,\n savefilename=tmp_savefilename)\n', (241898, 241971), False, 'from galpy import potential\n'), ((242022, 242049), 'os.remove', 'os.remove', (['tmp_savefilename'], {}), '(tmp_savefilename)\n', (242031, 242049), False, 'import os\n'), ((242737, 242835), 'galpy.potential.SoftenedNeedleBarPotential.__init__', 'SoftenedNeedleBarPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(1e-06)', 'b': '(0.0)', 'c': '(10.0)', 'omegab': '(0.0)', 'pa': '(0.0)'}), '(self, amp=1.0, a=1e-06, b=0.0, c=10.0,\n omegab=0.0, pa=0.0)\n', (242772, 242835), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((243272, 243350), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(1.5)', 'beta': '(3.0)'}), '(self, amp=1.0, a=5.0, alpha=1.5, beta=3.0)\n', (243307, 243350), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((243464, 243542), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(1.5)', 'beta': '(4.0)'}), '(self, amp=1.0, a=5.0, alpha=1.5, beta=4.0)\n', (243499, 243542), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((243660, 243736), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(0)', 'beta': '(4.0)'}), '(self, amp=1.0, a=5.0, alpha=0, beta=4.0)\n', (243695, 243736), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((243853, 243931), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(1.0)', 'beta': '(4.0)'}), '(self, amp=1.0, a=5.0, alpha=1.0, beta=4.0)\n', (243888, 243931), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244043, 244121), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(2.0)', 'beta': '(4.0)'}), '(self, amp=1.0, a=5.0, alpha=2.0, beta=4.0)\n', (244078, 244121), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244231, 244309), 'galpy.potential.TwoPowerSphericalPotential.__init__', 'TwoPowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(1.0)', 'beta': '(3.0)'}), '(self, amp=1.0, a=5.0, alpha=1.0, beta=3.0)\n', (244266, 244309), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244417, 244475), 'galpy.potential.PowerSphericalPotential.__init__', 'PowerSphericalPotential.__init__', (['self'], {'amp': '(1.0)', 'alpha': '(2.0)'}), '(self, amp=1.0, alpha=2.0)\n', (244449, 244475), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244585, 244645), 'galpy.potential.MiyamotoNagaiPotential.__init__', 'MiyamotoNagaiPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(0.0)', 'b': '(0.1)'}), '(self, amp=1.0, a=0.0, b=0.1)\n', (244616, 244645), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244756, 244805), 'galpy.potential.FlattenedPowerPotential.__init__', 'FlattenedPowerPotential.__init__', (['self'], {'alpha': '(0.0)'}), '(self, alpha=0.0)\n', (244788, 244805), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((244929, 245000), 'galpy.potential.MN3ExponentialDiskPotential.__init__', 'MN3ExponentialDiskPotential.__init__', (['self'], {'normalize': '(1.0)', 'posdens': '(True)'}), '(self, normalize=1.0, posdens=True)\n', (244965, 245000), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((245125, 245193), 'galpy.potential.MN3ExponentialDiskPotential.__init__', 'MN3ExponentialDiskPotential.__init__', (['self'], {'normalize': '(1.0)', 'sech': '(True)'}), '(self, normalize=1.0, sech=True)\n', (245161, 245193), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((245324, 245355), 'galpy.potential.BurkertPotential.__init__', 'BurkertPotential.__init__', (['self'], {}), '(self)\n', (245349, 245355), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((245523, 245593), 'galpy.potential.TriaxialHernquistPotential.__init__', 'TriaxialHernquistPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(0.2)'}), '(self, normalize=1.0, b=1.0, c=0.2)\n', (245558, 245593), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((245688, 245752), 'galpy.potential.TriaxialNFWPotential.__init__', 'TriaxialNFWPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(0.2)'}), '(self, normalize=1.0, b=1.0, c=0.2)\n', (245717, 245752), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((245851, 245929), 'galpy.potential.TriaxialNFWPotential.__init__', 'TriaxialNFWPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(0.2)', 'glorder': 'None'}), '(self, normalize=1.0, b=1.0, c=0.2, glorder=None)\n', (245880, 245929), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246027, 246093), 'galpy.potential.TriaxialJaffePotential.__init__', 'TriaxialJaffePotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(0.2)'}), '(self, normalize=1.0, b=1.0, c=0.2)\n', (246058, 246093), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246201, 246271), 'galpy.potential.TriaxialHernquistPotential.__init__', 'TriaxialHernquistPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.8)'}), '(self, normalize=1.0, b=1.0, c=1.8)\n', (246236, 246271), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246368, 246432), 'galpy.potential.TriaxialNFWPotential.__init__', 'TriaxialNFWPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.8)'}), '(self, normalize=1.0, b=1.0, c=1.8)\n', (246397, 246432), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246533, 246599), 'galpy.potential.TriaxialJaffePotential.__init__', 'TriaxialJaffePotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(1.8)'}), '(self, normalize=1.0, b=1.0, c=1.8)\n', (246564, 246599), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246703, 246748), 'galpy.potential.SpiralArmsPotential.__init__', 'SpiralArmsPotential.__init__', (['self'], {'omega': '(1.1)'}), '(self, omega=1.1)\n', (246731, 246748), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((246836, 246954), 'galpy.potential.SpiralArmsPotential.__init__', 'SpiralArmsPotential.__init__', (['self'], {'omega': '(1.3)', 'N': '(4.0)', 'Cs': '[8.0 / 3.0 / numpy.pi, 1.0 / 2.0, 8.0 / 15.0 / numpy.pi]'}), '(self, omega=1.3, N=4.0, Cs=[8.0 / 3.0 / numpy.\n pi, 1.0 / 2.0, 8.0 / 15.0 / numpy.pi])\n', (246864, 246954), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((247027, 247097), 'galpy.potential.TriaxialHernquistPotential.__init__', 'TriaxialHernquistPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.4)', 'c': '(0.6)'}), '(self, normalize=1.0, b=1.4, c=0.6)\n', (247062, 247097), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((247196, 247260), 'galpy.potential.TriaxialNFWPotential.__init__', 'TriaxialNFWPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(0.2)', 'c': '(1.8)'}), '(self, normalize=1.0, b=0.2, c=1.8)\n', (247225, 247260), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((247362, 247428), 'galpy.potential.TriaxialJaffePotential.__init__', 'TriaxialJaffePotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(0.4)', 'c': '(0.7)'}), '(self, normalize=1.0, b=0.4, c=0.7)\n', (247393, 247428), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((247784, 247856), 'galpy.potential.TriaxialNFWPotential.__init__', 'TriaxialNFWPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.5)', 'c': '(0.2)', 'pa': '(0.2)'}), '(self, normalize=1.0, b=1.5, c=0.2, pa=0.2)\n', (247813, 247856), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((248623, 248701), 'galpy.potential.LogarithmicHaloPotential.__init__', 'LogarithmicHaloPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(0.7)', 'q': '(0.9)', 'core': '(0.5)'}), '(self, normalize=1.0, b=0.7, q=0.9, core=0.5)\n', (248656, 248701), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((248953, 249049), 'galpy.potential.TwoPowerTriaxialPotential.__init__', 'TwoPowerTriaxialPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(1.0)', 'beta': '(4.0)', 'b': '(0.3)', 'c': '(1.8)'}), '(self, amp=1.0, a=5.0, alpha=1.0, beta=\n 4.0, b=0.3, c=1.8)\n', (248987, 249049), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((249194, 249290), 'galpy.potential.TwoPowerTriaxialPotential.__init__', 'TwoPowerTriaxialPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(2.0)', 'alpha': '(1.0)', 'beta': '(3.0)', 'b': '(1.3)', 'c': '(0.8)'}), '(self, amp=1.0, a=2.0, alpha=1.0, beta=\n 3.0, b=1.3, c=0.8)\n', (249228, 249290), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((249492, 249588), 'galpy.potential.TwoPowerTriaxialPotential.__init__', 'TwoPowerTriaxialPotential.__init__', (['self'], {'amp': '(1.0)', 'a': '(5.0)', 'alpha': '(2.0)', 'beta': '(4.0)', 'b': '(1.3)', 'c': '(1.8)'}), '(self, amp=1.0, a=5.0, alpha=2.0, beta=\n 4.0, b=1.3, c=1.8)\n', (249526, 249588), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((250417, 250450), 'galpy.potential.HernquistPotential', 'HernquistPotential', ([], {'normalize': '(0.5)'}), '(normalize=0.5)\n', (250435, 250450), False, 'from galpy.potential import HernquistPotential\n'), ((251336, 251369), 'galpy.potential.HernquistPotential', 'HernquistPotential', ([], {'normalize': '(0.5)'}), '(normalize=0.5)\n', (251354, 251369), False, 'from galpy.potential import HernquistPotential\n'), ((253374, 253434), 'galpy.potential.FerrersPotential.__init__', 'FerrersPotential.__init__', (['self'], {'normalize': '(1.0)', 'b': '(1.0)', 'c': '(0.2)'}), '(self, normalize=1.0, b=1.0, c=0.2)\n', (253399, 253434), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((253529, 253718), 'galpy.potential.interpRZPotential.__init__', 'interpRZPotential.__init__', (['self'], {'RZPot': 'MWPotential', 'rgrid': '(0.01, 2.1, 101)', 'zgrid': '(0.0, 0.26, 101)', 'logR': '(True)', 'interpPot': '(True)', 'interpRforce': '(True)', 'interpzforce': '(True)', 'interpDens': '(True)'}), '(self, RZPot=MWPotential, rgrid=(0.01, 2.1, 101),\n zgrid=(0.0, 0.26, 101), logR=True, interpPot=True, interpRforce=True,\n interpzforce=True, interpDens=True)\n', (253555, 253718), False, 'from galpy.potential import TwoPowerSphericalPotential, MiyamotoNagaiPotential, PowerSphericalPotential, interpRZPotential, MWPotential, FlattenedPowerPotential, MN3ExponentialDiskPotential, TriaxialHernquistPotential, TriaxialNFWPotential, TriaxialJaffePotential, TwoPowerTriaxialPotential, BurkertPotential, SoftenedNeedleBarPotential, FerrersPotential, DiskSCFPotential, SpiralArmsPotential, LogarithmicHaloPotential\n'), ((253998, 254032), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'amp': '(1.0)'}), '(amp=1.0)\n', (254023, 254032), False, 'from galpy import potential\n'), ((254043, 254062), 'pynbody.new', 'pynbody.new', ([], {'star': '(1)'}), '(star=1)\n', (254054, 254062), False, 'import pynbody\n'), ((254161, 254208), 'galpy.potential.SnapshotRZPotential.__init__', 'potential.SnapshotRZPotential.__init__', (['self', 's'], {}), '(self, s)\n', (254199, 254208), False, 'from galpy import potential\n'), ((254376, 254410), 'galpy.potential.KeplerPotential', 'potential.KeplerPotential', ([], {'amp': '(1.0)'}), '(amp=1.0)\n', (254401, 254410), False, 'from galpy import potential\n'), ((254421, 254440), 'pynbody.new', 'pynbody.new', ([], {'star': '(1)'}), '(star=1)\n', (254432, 254440), False, 'import pynbody\n'), ((254539, 254683), 'galpy.potential.InterpSnapshotRZPotential.__init__', 'potential.InterpSnapshotRZPotential.__init__', (['self', 's'], {'rgrid': '(0.01, 2.0, 101)', 'zgrid': '(0.0, 0.3, 101)', 'logR': '(False)', 'interpPot': '(True)', 'zsym': '(True)'}), '(self, s, rgrid=(0.01, 2.0, 101\n ), zgrid=(0.0, 0.3, 101), logR=False, interpPot=True, zsym=True)\n', (254583, 254683), False, 'from galpy import potential\n'), ((255332, 255477), 'galpy.potential.DehnenBarPotential.__init__', 'DehnenBarPotential.__init__', (['self'], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'tform': '(0.5)', 'tsteady': '(0.5)', 'alpha': '(0.01)', 'Af': '(0.04)'}), '(self, omegab=1.9, rb=0.4, barphi=25.0 * numpy.\n pi / 180.0, beta=0.0, tform=0.5, tsteady=0.5, alpha=0.01, Af=0.04)\n', (255359, 255477), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((255654, 255801), 'galpy.potential.DehnenBarPotential.__init__', 'DehnenBarPotential.__init__', (['self'], {'omegab': '(1.9)', 'rb': '(0.6)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'tform': '(-1.0)', 'tsteady': '(1.01)', 'alpha': '(0.01)', 'Af': '(0.04)'}), '(self, omegab=1.9, rb=0.6, barphi=25.0 * numpy.\n pi / 180.0, beta=0.0, tform=-1.0, tsteady=1.01, alpha=0.01, Af=0.04)\n', (255681, 255801), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((255977, 256123), 'galpy.potential.DehnenBarPotential.__init__', 'DehnenBarPotential.__init__', (['self'], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'tform': '(-5.0)', 'tsteady': '(2.0)', 'alpha': '(0.01)', 'Af': '(0.04)'}), '(self, omegab=1.9, rb=0.4, barphi=25.0 * numpy.\n pi / 180.0, beta=0.0, tform=-5.0, tsteady=2.0, alpha=0.01, Af=0.04)\n', (256004, 256123), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((256304, 256434), 'galpy.potential.CosmphiDiskPotential.__init__', 'CosmphiDiskPotential.__init__', (['self'], {'amp': '(1.0)', 'phib': '(25.0 * numpy.pi / 180.0)', 'p': '(1.0)', 'phio': '(0.01)', 'm': '(1.0)', 'rb': '(0.9)', 'cp': '(-0.05)', 'sp': '(0.05)'}), '(self, amp=1.0, phib=25.0 * numpy.pi / 180.0,\n p=1.0, phio=0.01, m=1.0, rb=0.9, cp=-0.05, sp=0.05)\n', (256333, 256434), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((256582, 256705), 'galpy.potential.CosmphiDiskPotential.__init__', 'CosmphiDiskPotential.__init__', (['self'], {'amp': '(1.0)', 'phib': '(25.0 * numpy.pi / 180.0)', 'p': '(-1.0)', 'phio': '(0.01)', 'm': '(1.0)', 'cp': '(-0.05)', 'sp': '(0.05)'}), '(self, amp=1.0, phib=25.0 * numpy.pi / 180.0,\n p=-1.0, phio=0.01, m=1.0, cp=-0.05, sp=0.05)\n', (256611, 256705), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((256858, 257003), 'galpy.potential.EllipticalDiskPotential.__init__', 'EllipticalDiskPotential.__init__', (['self'], {'amp': '(1.0)', 'phib': '(25.0 * numpy.pi / 180.0)', 'p': '(1.0)', 'twophio': '(0.02)', 'tform': '(0.5)', 'tsteady': '(1.0)', 'cp': '(0.05)', 'sp': '(0.05)'}), '(self, amp=1.0, phib=25.0 * numpy.pi / \n 180.0, p=1.0, twophio=0.02, tform=0.5, tsteady=1.0, cp=0.05, sp=0.05)\n', (256890, 257003), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((257204, 257352), 'galpy.potential.EllipticalDiskPotential.__init__', 'EllipticalDiskPotential.__init__', (['self'], {'amp': '(1.0)', 'phib': '(25.0 * numpy.pi / 180.0)', 'p': '(1.0)', 'twophio': '(0.02)', 'tform': '(-1.0)', 'tsteady': 'None', 'cp': '(-0.05)', 'sp': '(0.05)'}), '(self, amp=1.0, phib=25.0 * numpy.pi / \n 180.0, p=1.0, twophio=0.02, tform=-1.0, tsteady=None, cp=-0.05, sp=0.05)\n', (257236, 257352), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((257552, 257700), 'galpy.potential.EllipticalDiskPotential.__init__', 'EllipticalDiskPotential.__init__', (['self'], {'amp': '(1.0)', 'phib': '(25.0 * numpy.pi / 180.0)', 'p': '(1.0)', 'twophio': '(0.02)', 'tform': '(-5.0)', 'tsteady': '(-1.0)', 'cp': '(-0.05)', 'sp': '(0.05)'}), '(self, amp=1.0, phib=25.0 * numpy.pi / \n 180.0, p=1.0, twophio=0.02, tform=-5.0, tsteady=-1.0, cp=-0.05, sp=0.05)\n', (257584, 257700), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((257900, 258034), 'galpy.potential.SteadyLogSpiralPotential.__init__', 'SteadyLogSpiralPotential.__init__', (['self'], {'amp': '(1.0)', 'omegas': '(0.65)', 'A': '(-0.035)', 'm': '(2)', 'gamma': '(numpy.pi / 4.0)', 'p': '(-0.3)', 'tform': '(0.5)', 'tsteady': '(1.0)'}), '(self, amp=1.0, omegas=0.65, A=-0.035, m=2,\n gamma=numpy.pi / 4.0, p=-0.3, tform=0.5, tsteady=1.0)\n', (257933, 258034), False, 'from galpy.potential import SteadyLogSpiralPotential\n'), ((258246, 258382), 'galpy.potential.SteadyLogSpiralPotential.__init__', 'SteadyLogSpiralPotential.__init__', (['self'], {'amp': '(1.0)', 'omegas': '(0.65)', 'A': '(-0.035)', 'm': '(2)', 'gamma': '(numpy.pi / 4.0)', 'p': '(-0.3)', 'tform': '(-1.0)', 'tsteady': 'None'}), '(self, amp=1.0, omegas=0.65, A=-0.035, m=2,\n gamma=numpy.pi / 4.0, p=-0.3, tform=-1.0, tsteady=None)\n', (258279, 258382), False, 'from galpy.potential import SteadyLogSpiralPotential\n'), ((258594, 258730), 'galpy.potential.SteadyLogSpiralPotential.__init__', 'SteadyLogSpiralPotential.__init__', (['self'], {'amp': '(1.0)', 'omegas': '(0.65)', 'A': '(-0.035)', 'm': '(2)', 'gamma': '(numpy.pi / 4.0)', 'p': '(-0.3)', 'tform': '(-1.0)', 'tsteady': '(-5.0)'}), '(self, amp=1.0, omegas=0.65, A=-0.035, m=2,\n gamma=numpy.pi / 4.0, p=-0.3, tform=-1.0, tsteady=-5.0)\n', (258627, 258730), False, 'from galpy.potential import SteadyLogSpiralPotential\n'), ((258944, 259057), 'galpy.potential.TransientLogSpiralPotential.__init__', 'TransientLogSpiralPotential.__init__', (['self'], {'amp': '(1.0)', 'omegas': '(0.65)', 'A': '(-0.035)', 'm': '(2)', 'gamma': '(numpy.pi / 4.0)', 'p': '(-0.3)'}), '(self, amp=1.0, omegas=0.65, A=-0.035,\n m=2, gamma=numpy.pi / 4.0, p=-0.3)\n', (258980, 259057), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((259986, 260038), 'galpy.potential.scf_compute_coeffs_spherical', 'potential.scf_compute_coeffs_spherical', (['rho_Zeeuw', '(2)'], {}), '(rho_Zeeuw, 2)\n', (260024, 260038), False, 'from galpy import potential\n'), ((260046, 260114), 'galpy.potential.SCFPotential.__init__', 'potential.SCFPotential.__init__', (['self'], {'amp': '(1.0)', 'Acos': 'Acos', 'Asin': 'Asin'}), '(self, amp=1.0, Acos=Acos, Asin=Asin)\n', (260077, 260114), False, 'from galpy import potential\n'), ((260211, 260235), 'galpy.potential.NFWPotential', 'potential.NFWPotential', ([], {}), '()\n', (260233, 260235), False, 'from galpy import potential\n'), ((260257, 260309), 'galpy.potential.scf_compute_coeffs_spherical', 'potential.scf_compute_coeffs_spherical', (['nfw.dens', '(10)'], {}), '(nfw.dens, 10)\n', (260295, 260309), False, 'from galpy import potential\n'), ((260317, 260385), 'galpy.potential.SCFPotential.__init__', 'potential.SCFPotential.__init__', (['self'], {'amp': '(1.0)', 'Acos': 'Acos', 'Asin': 'Asin'}), '(self, amp=1.0, Acos=Acos, Asin=Asin)\n', (260348, 260385), False, 'from galpy import potential\n'), ((260496, 260549), 'galpy.potential.scf_compute_coeffs_axi', 'potential.scf_compute_coeffs_axi', (['axi_density1', '(10)', '(2)'], {}), '(axi_density1, 10, 2)\n', (260528, 260549), False, 'from galpy import potential\n'), ((260556, 260624), 'galpy.potential.SCFPotential.__init__', 'potential.SCFPotential.__init__', (['self'], {'amp': '(1.0)', 'Acos': 'Acos', 'Asin': 'Asin'}), '(self, amp=1.0, Acos=Acos, Asin=Asin)\n', (260587, 260624), False, 'from galpy import potential\n'), ((260756, 260809), 'galpy.potential.scf_compute_coeffs_axi', 'potential.scf_compute_coeffs_axi', (['axi_density2', '(10)', '(2)'], {}), '(axi_density2, 10, 2)\n', (260788, 260809), False, 'from galpy import potential\n'), ((260816, 260884), 'galpy.potential.SCFPotential.__init__', 'potential.SCFPotential.__init__', (['self'], {'amp': '(1.0)', 'Acos': 'Acos', 'Asin': 'Asin'}), '(self, amp=1.0, Acos=Acos, Asin=Asin)\n', (260847, 260884), False, 'from galpy import potential\n'), ((260991, 261054), 'galpy.potential.scf_compute_coeffs', 'potential.scf_compute_coeffs', (['scf_density', '(10)', '(10)'], {'phi_order': '(30)'}), '(scf_density, 10, 10, phi_order=30)\n', (261019, 261054), False, 'from galpy import potential\n'), ((261060, 261128), 'galpy.potential.SCFPotential.__init__', 'potential.SCFPotential.__init__', (['self'], {'amp': '(1.0)', 'Acos': 'Acos', 'Asin': 'Asin'}), '(self, amp=1.0, Acos=Acos, Asin=Asin)\n', (261091, 261128), False, 'from galpy import potential\n'), ((261275, 261333), 'galpy.potential.HomogeneousSpherePotential', 'potential.HomogeneousSpherePotential', ([], {'normalize': '(1.0)', 'R': '(1.1)'}), '(normalize=1.0, R=1.1)\n', (261311, 261333), False, 'from galpy import potential\n'), ((261600, 261658), 'galpy.potential.HomogeneousSpherePotential', 'potential.HomogeneousSpherePotential', ([], {'normalize': '(1.0)', 'R': '(1.1)'}), '(normalize=1.0, R=1.1)\n', (261636, 261658), False, 'from galpy import potential\n'), ((262598, 262631), 'galpy.potential.Potential.__init__', 'Potential.__init__', (['self'], {'amp': '(1.0)'}), '(self, amp=1.0)\n', (262616, 262631), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((262653, 262677), 'galpy.potential._isNonAxi', '_isNonAxi', (['self._potlist'], {}), '(self._potlist)\n', (262662, 262677), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((262764, 262835), 'galpy.potential.evaluatePotentials', 'evaluatePotentials', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't', 'dR': 'dR', 'dphi': 'dphi'}), '(self._potlist, R, z, phi=phi, t=t, dR=dR, dphi=dphi)\n', (262782, 262835), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((262919, 262969), 'galpy.potential.evaluateRforces', 'evaluateRforces', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (262934, 262969), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263022, 263074), 'galpy.potential.evaluatephiforces', 'evaluatephiforces', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (263039, 263074), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263125, 263175), 'galpy.potential.evaluatezforces', 'evaluatezforces', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (263140, 263175), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263227, 263278), 'galpy.potential.evaluateR2derivs', 'evaluateR2derivs', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (263243, 263278), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263330, 263381), 'galpy.potential.evaluatez2derivs', 'evaluatez2derivs', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (263346, 263381), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263433, 263484), 'galpy.potential.evaluateRzderivs', 'evaluateRzderivs', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, z, phi=phi, t=t)\n', (263449, 263484), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263538, 263599), 'galpy.potential.evaluatePotentials', 'evaluatePotentials', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't', 'dphi': '(2)'}), '(self._potlist, R, z, phi=phi, t=t, dphi=2)\n', (263556, 263599), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263652, 263719), 'galpy.potential.evaluatePotentials', 'evaluatePotentials', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't', 'dR': '(1)', 'dphi': '(1)'}), '(self._potlist, R, z, phi=phi, t=t, dR=1, dphi=1)\n', (263670, 263719), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((263820, 263899), 'galpy.potential.evaluateDensities', 'evaluateDensities', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't', 'forcepoisson': 'forcepoisson'}), '(self._potlist, R, z, phi=phi, t=t, forcepoisson=forcepoisson)\n', (263837, 263899), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((264004, 264095), 'galpy.potential.evaluateSurfaceDensities', 'evaluateSurfaceDensities', (['self._potlist', 'R', 'z'], {'phi': 'phi', 't': 't', 'forcepoisson': 'forcepoisson'}), '(self._potlist, R, z, phi=phi, t=t, forcepoisson=\n forcepoisson)\n', (264028, 264095), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((264165, 264198), 'galpy.potential.vcirc', 'potential.vcirc', (['self._potlist', 'R'], {}), '(self._potlist, R)\n', (264180, 264198), False, 'from galpy import potential\n'), ((264655, 264694), 'galpy.potential.planarPotential.__init__', 'planarPotential.__init__', (['self'], {'amp': '(1.0)'}), '(self, amp=1.0)\n', (264679, 264694), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((264716, 264740), 'galpy.potential._isNonAxi', '_isNonAxi', (['self._potlist'], {}), '(self._potlist)\n', (264725, 264740), False, 'from galpy.potential import Potential, evaluatePotentials, evaluateRforces, evaluatezforces, evaluatephiforces, evaluateR2derivs, evaluatez2derivs, evaluateRzderivs, evaluateDensities, _isNonAxi, evaluateSurfaceDensities\n'), ((264825, 264881), 'galpy.potential.evaluateplanarPotentials', 'evaluateplanarPotentials', (['self._potlist', 'R'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, phi=phi, t=t)\n', (264849, 264881), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((264931, 264984), 'galpy.potential.evaluateplanarRforces', 'evaluateplanarRforces', (['self._potlist', 'R'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, phi=phi, t=t)\n', (264952, 264984), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((265036, 265091), 'galpy.potential.evaluateplanarphiforces', 'evaluateplanarphiforces', (['self._potlist', 'R'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, phi=phi, t=t)\n', (265059, 265091), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((265142, 265196), 'galpy.potential.evaluateplanarR2derivs', 'evaluateplanarR2derivs', (['self._potlist', 'R'], {'phi': 'phi', 't': 't'}), '(self._potlist, R, phi=phi, t=t)\n', (265164, 265196), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((265249, 265313), 'galpy.potential.evaluateplanarPotentials', 'evaluateplanarPotentials', (['self._potlist', 'R'], {'phi': 'phi', 't': 't', 'dphi': '(2)'}), '(self._potlist, R, phi=phi, t=t, dphi=2)\n', (265273, 265313), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((265365, 265435), 'galpy.potential.evaluateplanarPotentials', 'evaluateplanarPotentials', (['self._potlist', 'R'], {'phi': 'phi', 't': 't', 'dR': '(1)', 'dphi': '(1)'}), '(self._potlist, R, phi=phi, t=t, dR=1, dphi=1)\n', (265389, 265435), False, 'from galpy.potential import planarPotential, evaluateplanarPotentials, evaluateplanarRforces, evaluateplanarphiforces, evaluateplanarR2derivs\n'), ((265510, 265543), 'galpy.potential.vcirc', 'potential.vcirc', (['self._potlist', 'R'], {}), '(self._potlist, R)\n', (265525, 265543), False, 'from galpy import potential\n'), ((271049, 271084), 'galpy.potential.RZToverticalPotential', 'RZToverticalPotential', (['potlist', '(1.0)'], {}), '(potlist, 1.0)\n', (271070, 271084), False, 'from galpy.potential import linearPotential, evaluatelinearPotentials, evaluatelinearForces, RZToverticalPotential\n'), ((271091, 271130), 'galpy.potential.linearPotential.__init__', 'linearPotential.__init__', (['self'], {'amp': '(1.0)'}), '(self, amp=1.0)\n', (271115, 271130), False, 'from galpy.potential import linearPotential, evaluatelinearPotentials, evaluatelinearForces, RZToverticalPotential\n'), ((271213, 271260), 'galpy.potential.evaluatelinearPotentials', 'evaluatelinearPotentials', (['self._potlist', 'R'], {'t': 't'}), '(self._potlist, R, t=t)\n', (271237, 271260), False, 'from galpy.potential import linearPotential, evaluatelinearPotentials, evaluatelinearForces, RZToverticalPotential\n'), ((271303, 271346), 'galpy.potential.evaluatelinearForces', 'evaluatelinearForces', (['self._potlist', 'R'], {'t': 't'}), '(self._potlist, R, t=t)\n', (271323, 271346), False, 'from galpy.potential import linearPotential, evaluatelinearPotentials, evaluatelinearForces, RZToverticalPotential\n'), ((272230, 272272), 'galpy.orbit.Orbit', 'Orbit', (['[self._rc, 0.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([self._rc, 0.0, 1.0, 0.0, 0.0, 0.0])\n', (272235, 272272), False, 'from galpy.orbit import Orbit\n'), ((272275, 272322), 'galpy.orbit.Orbit', 'Orbit', (['[self._rc, 0.0, 1.0, 0.0, 0.0, numpy.pi]'], {}), '([self._rc, 0.0, 1.0, 0.0, 0.0, numpy.pi])\n', (272280, 272322), False, 'from galpy.orbit import Orbit\n'), ((272326, 272375), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (272360, 272375), False, 'from galpy import potential\n'), ((272390, 272419), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'maxt', 'nt'], {}), '(0.0, maxt, nt)\n', (272404, 272419), False, 'import numpy\n'), ((272534, 272569), 'galpy.potential.MovingObjectPotential', 'potential.MovingObjectPotential', (['o1'], {}), '(o1)\n', (272565, 272569), False, 'from galpy import potential\n'), ((272589, 272624), 'galpy.potential.MovingObjectPotential', 'potential.MovingObjectPotential', (['o2'], {}), '(o2)\n', (272620, 272624), False, 'from galpy import potential\n'), ((273031, 273073), 'galpy.orbit.Orbit', 'Orbit', (['[self._rc, 0.0, 1.0, 0.0, 0.0, 0.0]'], {}), '([self._rc, 0.0, 1.0, 0.0, 0.0, 0.0])\n', (273036, 273073), False, 'from galpy.orbit import Orbit\n'), ((273076, 273123), 'galpy.orbit.Orbit', 'Orbit', (['[self._rc, 0.0, 1.0, 0.0, 0.0, numpy.pi]'], {}), '([self._rc, 0.0, 1.0, 0.0, 0.0, numpy.pi])\n', (273081, 273123), False, 'from galpy.orbit import Orbit\n'), ((273127, 273176), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (273161, 273176), False, 'from galpy import potential\n'), ((273191, 273220), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'maxt', 'nt'], {}), '(0.0, maxt, nt)\n', (273205, 273220), False, 'import numpy\n'), ((273332, 273376), 'galpy.potential.PlummerPotential', 'potential.PlummerPotential', ([], {'amp': '(0.06)', 'b': '(0.01)'}), '(amp=0.06, b=0.01)\n', (273358, 273376), False, 'from galpy import potential\n'), ((273396, 273442), 'galpy.potential.MovingObjectPotential', 'potential.MovingObjectPotential', (['o1'], {'pot': 'oplum'}), '(o1, pot=oplum)\n', (273427, 273442), False, 'from galpy import potential\n'), ((273462, 273508), 'galpy.potential.MovingObjectPotential', 'potential.MovingObjectPotential', (['o2'], {'pot': 'oplum'}), '(o2, pot=oplum)\n', (273493, 273508), False, 'from galpy import potential\n'), ((274594, 274639), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'tform': '(-100.0)', 'tsteady': '(1.0)'}), '(tform=-100.0, tsteady=1.0)\n', (274612, 274639), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((275084, 275216), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (275102, 275216), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((275310, 275397), 'galpy.potential.DehnenSmoothWrapperPotential.__new__', 'DehnenSmoothWrapperPotential.__new__', (['cls'], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(0.5)', 'tsteady': '(0.5)'}), '(cls, amp=1.0, pot=dpn, tform=0.5,\n tsteady=0.5)\n', (275346, 275397), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((275698, 275830), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (275716, 275830), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((275924, 276013), 'galpy.potential.DehnenSmoothWrapperPotential.__new__', 'DehnenSmoothWrapperPotential.__new__', (['cls'], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(-1.0)', 'tsteady': '(1.01)'}), '(cls, amp=1.0, pot=dpn, tform=-1.0,\n tsteady=1.01)\n', (275960, 276013), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((276245, 276377), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (276263, 276377), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((276471, 276559), 'galpy.potential.DehnenSmoothWrapperPotential.__new__', 'DehnenSmoothWrapperPotential.__new__', (['cls'], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(-5.0)', 'tsteady': '(2.0)'}), '(cls, amp=1.0, pot=dpn, tform=-5.0,\n tsteady=2.0)\n', (276507, 276559), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((276792, 276924), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (276810, 276924), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((277018, 277118), 'galpy.potential.DehnenSmoothWrapperPotential.__new__', 'DehnenSmoothWrapperPotential.__new__', (['cls'], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(-0.5)', 'tsteady': '(1.0)', 'decay': '(True)'}), '(cls, amp=1.0, pot=dpn, tform=-0.5,\n tsteady=1.0, decay=True)\n', (277054, 277118), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((277284, 277416), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (277302, 277416), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((277953, 278085), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (277971, 278085), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((278566, 278698), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (278584, 278698), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((279814, 279867), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {'omega': '(0.0)', 'phi_ref': '(0.0)'}), '(omega=0.0, phi_ref=0.0)\n', (279843, 279867), False, 'from galpy import potential\n'), ((281776, 281829), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {'omega': '(0.0)', 'phi_ref': '(0.0)'}), '(omega=0.0, phi_ref=0.0)\n', (281805, 281829), False, 'from galpy import potential\n'), ((283498, 283543), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'tform': '(-100.0)', 'tsteady': '(1.0)'}), '(tform=-100.0, tsteady=1.0)\n', (283516, 283543), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((283572, 283659), 'galpy.potential.GaussianAmplitudeWrapperPotential.__new__', 'GaussianAmplitudeWrapperPotential.__new__', (['cls'], {'amp': '(1.0)', 'pot': 'dpn', 'to': '(0.0)', 'sigma': '(1.0)'}), '(cls, amp=1.0, pot=dpn, to=0.0,\n sigma=1.0)\n', (283613, 283659), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((283802, 283934), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (283820, 283934), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((284427, 284559), 'galpy.potential.DehnenBarPotential', 'DehnenBarPotential', ([], {'omegab': '(1.9)', 'rb': '(0.4)', 'barphi': '(25.0 * numpy.pi / 180.0)', 'beta': '(0.0)', 'alpha': '(0.01)', 'Af': '(0.04)', 'tform': '(-99.0)', 'tsteady': '(1.0)'}), '(omegab=1.9, rb=0.4, barphi=25.0 * numpy.pi / 180.0, beta\n =0.0, alpha=0.01, Af=0.04, tform=-99.0, tsteady=1.0)\n', (284445, 284559), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((67373, 67420), 'galpy.potential.evaluatelinearPotentials', 'potential.evaluatelinearPotentials', (['pot', 'x'], {'t': 't'}), '(pot, x, t=t)\n', (67407, 67420), False, 'from galpy import potential\n'), ((67700, 67743), 'galpy.potential.evaluatelinearForces', 'potential.evaluatelinearForces', (['pot', 'x'], {'t': 't'}), '(pot, x, t=t)\n', (67730, 67743), False, 'from galpy import potential\n'), ((95642, 95749), 'galpy.potential.EllipsoidalPotential.EllipsoidalPotential.__init__', 'EllipsoidalPotential.__init__', (['self'], {'amp': 'amp', 'b': 'b', 'c': 'c', 'zvec': 'zvec', 'pa': 'pa', 'glorder': 'glorder', 'ro': 'ro', 'vo': 'vo'}), '(self, amp=amp, b=b, c=c, zvec=zvec, pa=pa,\n glorder=glorder, ro=ro, vo=vo)\n', (95671, 95749), False, 'from galpy.potential.EllipsoidalPotential import EllipsoidalPotential\n'), ((113983, 114004), 'numpy.fabs', 'numpy.fabs', (['(vcs - 1.0)'], {}), '(vcs - 1.0)\n', (113993, 114004), False, 'import numpy\n'), ((114721, 114742), 'numpy.fabs', 'numpy.fabs', (['(vcs - 1.0)'], {}), '(vcs - 1.0)\n', (114731, 114742), False, 'import numpy\n'), ((154727, 154752), 'numpy.fabs', 'numpy.fabs', (['(od - overdens)'], {}), '(od - overdens)\n', (154737, 154752), False, 'import numpy\n'), ((155121, 155174), 'galpy.util.conversion.dens_in_meanmatterdens', 'conversion.dens_in_meanmatterdens', (['vo', 'ro'], {'H': 'H', 'Om': 'Om'}), '(vo, ro, H=H, Om=Om)\n', (155154, 155174), False, 'from galpy.util import conversion\n'), ((155187, 155212), 'numpy.fabs', 'numpy.fabs', (['(od - overdens)'], {}), '(od - overdens)\n', (155197, 155212), False, 'import numpy\n'), ((156994, 157009), 'numpy.sqrt', 'numpy.sqrt', (['(4.0)'], {}), '(4.0)\n', (157004, 157009), False, 'import numpy\n'), ((157272, 157288), 'numpy.sqrt', 'numpy.sqrt', (['(10.0)'], {}), '(10.0)\n', (157282, 157288), False, 'import numpy\n'), ((165330, 165364), 'galpy.potential.nemo_accpars', 'potential.nemo_accpars', (['mp', 'vo', 'ro'], {}), '(mp, vo, ro)\n', (165352, 165364), False, 'from galpy import potential\n'), ((165776, 165810), 'galpy.potential.nemo_accpars', 'potential.nemo_accpars', (['pp', 'vo', 'ro'], {}), '(pp, vo, ro)\n', (165798, 165810), False, 'from galpy import potential\n'), ((166311, 166351), 'galpy.potential.nemo_accpars', 'potential.nemo_accpars', (['[mp, pp]', 'vo', 'ro'], {}), '([mp, pp], vo, ro)\n', (166333, 166351), False, 'from galpy import potential\n'), ((184426, 184441), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (184436, 184441), False, 'import numpy\n'), ((184441, 184456), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (184451, 184456), False, 'import numpy\n'), ((195398, 195457), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro', 'vo': 'vo'}), '(amp=0.55, a=1.3, ro=ro, vo=vo)\n', (195426, 195457), False, 'from galpy import potential\n'), ((197835, 197894), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro', 'vo': 'vo'}), '(amp=0.55, a=1.3, ro=ro, vo=vo)\n', (197863, 197894), False, 'from galpy import potential\n'), ((198463, 198515), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'ro': 'ro'}), '(amp=0.55, a=1.3, ro=ro)\n', (198491, 198515), False, 'from galpy import potential\n'), ((198961, 199013), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'amp': '(0.55)', 'a': '(1.3)', 'vo': 'vo'}), '(amp=0.55, a=1.3, vo=vo)\n', (198989, 199013), False, 'from galpy import potential\n'), ((200440, 200470), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (200468, 200470), False, 'from galpy import potential\n'), ((208325, 208358), 'numpy.fabs', 'numpy.fabs', (['(tij[0][1] - tij[1][0])'], {}), '(tij[0][1] - tij[1][0])\n', (208335, 208358), False, 'import numpy\n'), ((208448, 208481), 'numpy.fabs', 'numpy.fabs', (['(tij[0][2] - tij[2][0])'], {}), '(tij[0][2] - tij[2][0])\n', (208458, 208481), False, 'import numpy\n'), ((208571, 208604), 'numpy.fabs', 'numpy.fabs', (['(tij[1][2] - tij[2][1])'], {}), '(tij[1][2] - tij[2][1])\n', (208581, 208604), False, 'import numpy\n'), ((210459, 210536), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'R_a_little_less', '(0.0)'], {}), '(potential.MWPotential2014, R_a_little_less, 0.0)\n', (210487, 210536), False, 'from galpy import potential\n'), ((210678, 210755), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'R_a_little_more', '(0.0)'], {}), '(potential.MWPotential2014, R_a_little_more, 0.0)\n', (210706, 210755), False, 'from galpy import potential\n'), ((211376, 211453), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'R_a_little_less', '(0.0)'], {}), '(potential.MWPotential2014, R_a_little_less, 0.0)\n', (211404, 211453), False, 'from galpy import potential\n'), ((211595, 211672), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'R_a_little_more', '(0.0)'], {}), '(potential.MWPotential2014, R_a_little_more, 0.0)\n', (211623, 211672), False, 'from galpy import potential\n'), ((212271, 212326), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'R_a_little_less', '(0.0)'], {}), '(pot, R_a_little_less, 0.0)\n', (212299, 212326), False, 'from galpy import potential\n'), ((212468, 212523), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'R_a_little_more', '(0.0)'], {}), '(pot, R_a_little_more, 0.0)\n', (212496, 212523), False, 'from galpy import potential\n'), ((213064, 213129), 'galpy.potential.zvc_range', 'potential.zvc_range', (['potential.MWPotential2014', 'E', '(Lzmax + 0.0001)'], {}), '(potential.MWPotential2014, E, Lzmax + 0.0001)\n', (213083, 213129), False, 'from galpy import potential\n'), ((213372, 213425), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rmin', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmin, E, Lz)\n', (213385, 213425), False, 'from galpy import potential\n'), ((213508, 213561), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rmax', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmax, E, Lz)\n', (213521, 213561), False, 'from galpy import potential\n'), ((213769, 213822), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rmin', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmin, E, Lz)\n', (213782, 213822), False, 'from galpy import potential\n'), ((213905, 213958), 'galpy.potential.zvc', 'potential.zvc', (['potential.MWPotential2014', 'Rmax', 'E', 'Lz'], {}), '(potential.MWPotential2014, Rmax, E, Lz)\n', (213918, 213958), False, 'from galpy import potential\n'), ((224892, 224913), 'numpy.atleast_1d', 'numpy.atleast_1d', (['(1.0)'], {}), '(1.0)\n', (224908, 224913), False, 'import numpy\n'), ((224913, 224934), 'numpy.atleast_1d', 'numpy.atleast_1d', (['(0.0)'], {}), '(0.0)\n', (224929, 224934), False, 'import numpy\n'), ((225380, 225436), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (225414, 225436), False, 'from galpy import potential\n'), ((225662, 225718), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (225696, 225718), False, 'from galpy import potential\n'), ((226661, 226717), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (226695, 226717), False, 'from galpy import potential\n'), ((226737, 226798), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(0.2)', 'a': '(0.4)', 'b': '(0.1)'}), '(normalize=0.2, a=0.4, b=0.1)\n', (226769, 226798), False, 'from galpy import potential\n'), ((226818, 226868), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(0.4)', 'a': '(0.1)'}), '(normalize=0.4, a=0.1)\n', (226846, 226868), False, 'from galpy import potential\n'), ((227076, 227132), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (227110, 227132), False, 'from galpy import potential\n'), ((227157, 227218), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(0.2)', 'a': '(0.4)', 'b': '(0.1)'}), '(normalize=0.2, a=0.4, b=0.1)\n', (227189, 227218), False, 'from galpy import potential\n'), ((227243, 227293), 'galpy.potential.HernquistPotential', 'potential.HernquistPotential', ([], {'normalize': '(0.4)', 'a': '(0.1)'}), '(normalize=0.4, a=0.1)\n', (227271, 227293), False, 'from galpy import potential\n'), ((227887, 227943), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (227921, 227943), False, 'from galpy import potential\n'), ((228091, 228147), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (228125, 228147), False, 'from galpy import potential\n'), ((229346, 229416), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo)\n', (229380, 229416), False, 'from galpy import potential\n'), ((229480, 229556), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo)\n', (229514, 229556), False, 'from galpy import potential\n'), ((229620, 229696), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo * 1.1)\n', (229654, 229696), False, 'from galpy import potential\n'), ((229762, 229848), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo *\n 1.1)\n', (229796, 229848), False, 'from galpy import potential\n'), ((230283, 230353), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo)\n', (230317, 230353), False, 'from galpy import potential\n'), ((230422, 230498), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': 'vo'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo)\n', (230456, 230498), False, 'from galpy import potential\n'), ((230567, 230643), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': 'ro', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro, vo=vo * 1.1)\n', (230601, 230643), False, 'from galpy import potential\n'), ((230714, 230800), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)', 'ro': '(ro * 1.1)', 'vo': '(vo * 1.1)'}), '(normalize=1.0, q=0.9, ro=ro * 1.1, vo=vo *\n 1.1)\n', (230748, 230800), False, 'from galpy import potential\n'), ((231911, 231942), 'numpy.geomspace', 'numpy.geomspace', (['(0.01)', '(5.0)', '(201)'], {}), '(0.01, 5.0, 201)\n', (231926, 231942), False, 'import numpy\n'), ((259286, 259310), 'numpy.power', 'numpy.power', (['(a + r)', '(-4.0)'], {}), '(a + r, -4.0)\n', (259297, 259310), False, 'import numpy\n'), ((274530, 274582), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (274560, 274582), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((275020, 275072), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (275050, 275072), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((275634, 275686), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (275664, 275686), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((276181, 276233), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (276211, 276233), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((276728, 276780), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (276758, 276780), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((279334, 279386), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (279364, 279386), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((279750, 279802), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (279780, 279802), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((281712, 281764), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (281742, 281764), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((283434, 283486), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (283464, 283486), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((285202, 285254), 'galpy.potential.WrapperPotential.parentWrapperPotential.__new__', 'parentWrapperPotential.__new__', (['cls', '*args'], {}), '(cls, *args, **kwargs)\n', (285232, 285254), False, 'from galpy.potential.WrapperPotential import parentWrapperPotential, WrapperPotential, planarWrapperPotential\n'), ((13500, 13554), 'galpy.potential.evaluatezforces', 'potential.evaluatezforces', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (13525, 13554), False, 'from galpy import potential\n'), ((72420, 72474), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', '(0)', '(0)'], {'phi': '(0.0)', 't': '(0.0)'}), '(tp, 0, 0, phi=0.0, t=0.0)\n', (72448, 72474), False, 'from galpy import potential\n'), ((77631, 77693), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', 'numpy.inf', '(0)'], {'phi': '(0.0)', 't': '(0.0)'}), '(tp, numpy.inf, 0, phi=0.0, t=0.0)\n', (77659, 77693), False, 'from galpy import potential\n'), ((77777, 77834), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', '_INF', '(0)'], {'phi': '(0.0)', 't': '(0.0)'}), '(tp, _INF, 0, phi=0.0, t=0.0)\n', (77805, 77834), False, 'from galpy import potential\n'), ((81264, 81299), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['pp', 'R', 'z'], {}), '(pp, R, z)\n', (81289, 81299), False, 'from galpy import potential\n'), ((82036, 82093), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['[pp, cdfc]', 'R', 'z'], {'phi': 'phi', 'v': 'v'}), '([pp, cdfc], R, z, phi=phi, v=v)\n', (82061, 82093), False, 'from galpy import potential\n'), ((82271, 82322), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['cdfc', 'R', 'z'], {'phi': 'phi', 'v': 'v'}), '(cdfc, R, z, phi=phi, v=v)\n', (82296, 82322), False, 'from galpy import potential\n'), ((84653, 84690), 'galpy.potential.mass', 'potential.mass', (['pp', 'tR'], {'forceint': '(True)'}), '(pp, tR, forceint=True)\n', (84667, 84690), False, 'from galpy import potential\n'), ((84813, 84852), 'galpy.potential.mass', 'potential.mass', (['[pp]', 'tR'], {'forceint': '(True)'}), '([pp], tR, forceint=True)\n', (84827, 84852), False, 'from galpy import potential\n'), ((86265, 86285), 'numpy.log', 'numpy.log', (['(tR / np.a)'], {}), '(tR / np.a)\n', (86274, 86285), False, 'import numpy\n'), ((86849, 86869), 'numpy.log', 'numpy.log', (['(tR / np.a)'], {}), '(tR / np.a)\n', (86858, 86869), False, 'import numpy\n'), ((90020, 90042), 'numpy.exp', 'numpy.exp', (['(-z / dp._hz)'], {}), '(-z / dp._hz)\n', (90029, 90042), False, 'import numpy\n'), ((113031, 113055), 'galpy.potential.vcirc', 'potential.vcirc', (['kp', '(1.0)'], {}), '(kp, 1.0)\n', (113046, 113055), False, 'from galpy import potential\n'), ((113370, 113393), 'galpy.potential.vesc', 'potential.vesc', (['kp', '(1.0)'], {}), '(kp, 1.0)\n', (113384, 113393), False, 'from galpy import potential\n'), ((115840, 115871), 'galpy.potential.calcRotcurve', 'potential.calcRotcurve', (['lp', '(0.8)'], {}), '(lp, 0.8)\n', (115862, 115871), False, 'from galpy import potential\n'), ((116013, 116047), 'galpy.potential.calcEscapecurve', 'potential.calcEscapecurve', (['lp', '(0.8)'], {}), '(lp, 0.8)\n', (116038, 116047), False, 'from galpy import potential\n'), ((118136, 118171), 'galpy.potential.vterm', 'potential.vterm', (['lp', '(30.0)'], {'deg': '(True)'}), '(lp, 30.0, deg=True)\n', (118151, 118171), False, 'from galpy import potential\n'), ((118287, 118333), 'galpy.potential.vterm', 'potential.vterm', (['lp', '(numpy.pi / 3.0)'], {'deg': '(False)'}), '(lp, numpy.pi / 3.0, deg=False)\n', (118302, 118333), False, 'from galpy import potential\n'), ((142954, 143002), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13I', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13I, 1.0, quantity=False)\n', (142969, 143002), False, 'from galpy import potential\n'), ((143709, 143756), 'galpy.potential.vesc', 'potential.vesc', (['Irrgang13I', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13I, 1.0, quantity=False)\n', (143723, 143756), False, 'from galpy import potential\n'), ((144471, 144520), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13II', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13II, 1.0, quantity=False)\n', (144486, 144520), False, 'from galpy import potential\n'), ((145233, 145281), 'galpy.potential.vesc', 'potential.vesc', (['Irrgang13II', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13II, 1.0, quantity=False)\n', (145247, 145281), False, 'from galpy import potential\n'), ((146006, 146056), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13III', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13III, 1.0, quantity=False)\n', (146021, 146056), False, 'from galpy import potential\n'), ((146571, 146620), 'galpy.potential.vesc', 'potential.vesc', (['Irrgang13III', '(1.0)'], {'quantity': '(False)'}), '(Irrgang13III, 1.0, quantity=False)\n', (146585, 146620), False, 'from galpy import potential\n'), ((148791, 148816), 'galpy.potential.vcirc', 'potential.vcirc', (['pot', '(1.0)'], {}), '(pot, 1.0)\n', (148806, 148816), False, 'from galpy import potential\n'), ((156940, 156955), 'numpy.sqrt', 'numpy.sqrt', (['(0.2)'], {}), '(0.2)\n', (156950, 156955), False, 'import numpy\n'), ((157039, 157096), 'galpy.potential.LinShuReductionFactor', 'LinShuReductionFactor', (['lp', 'R', 'sr'], {'m': 'm', 'k': 'k', 'OmegaP': 'OmegaP'}), '(lp, R, sr, m=m, k=k, OmegaP=OmegaP)\n', (157060, 157096), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((157218, 157233), 'numpy.sqrt', 'numpy.sqrt', (['(0.8)'], {}), '(0.8)\n', (157228, 157233), False, 'import numpy\n'), ((157318, 157375), 'galpy.potential.LinShuReductionFactor', 'LinShuReductionFactor', (['lp', 'R', 'sr'], {'m': 'm', 'k': 'k', 'OmegaP': 'OmegaP'}), '(lp, R, sr, m=m, k=k, OmegaP=OmegaP)\n', (157339, 157375), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((157651, 157697), 'galpy.potential.LinShuReductionFactor', 'LinShuReductionFactor', (['lp', 'R', 'sr'], {'nonaxiPot': 'sp'}), '(lp, R, sr, nonaxiPot=sp)\n', (157672, 157697), False, 'from galpy.potential import LinShuReductionFactor, LogarithmicHaloPotential, omegac, epifreq\n'), ((179735, 179748), 'numpy.cos', 'numpy.cos', (['pa'], {}), '(pa)\n', (179744, 179748), False, 'import numpy\n'), ((180805, 180818), 'numpy.cos', 'numpy.cos', (['pa'], {}), '(pa)\n', (180814, 180818), False, 'import numpy\n'), ((184638, 184653), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (184648, 184653), False, 'import numpy\n'), ((184656, 184671), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (184666, 184671), False, 'import numpy\n'), ((189450, 189469), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (189459, 189469), False, 'import numpy\n'), ((201092, 201147), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201120, 201147), False, 'from galpy import potential\n'), ((201143, 201198), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201171, 201198), False, 'from galpy import potential\n'), ((201281, 201335), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201308, 201335), False, 'from galpy import potential\n'), ((201331, 201385), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201358, 201385), False, 'from galpy import potential\n'), ((201467, 201520), 'galpy.potential.evaluateR2derivs', 'potential.evaluateR2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201493, 201520), False, 'from galpy import potential\n'), ((201516, 201569), 'galpy.potential.evaluateR2derivs', 'potential.evaluateR2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201542, 201569), False, 'from galpy import potential\n'), ((201650, 201703), 'galpy.potential.evaluatez2derivs', 'potential.evaluatez2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201676, 201703), False, 'from galpy import potential\n'), ((201699, 201752), 'galpy.potential.evaluatez2derivs', 'potential.evaluatez2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201725, 201752), False, 'from galpy import potential\n'), ((201833, 201886), 'galpy.potential.evaluateRzderivs', 'potential.evaluateRzderivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201859, 201886), False, 'from galpy import potential\n'), ((201882, 201935), 'galpy.potential.evaluateRzderivs', 'potential.evaluateRzderivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (201908, 201935), False, 'from galpy import potential\n'), ((202016, 202071), 'galpy.potential.evaluatephi2derivs', 'potential.evaluatephi2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202044, 202071), False, 'from galpy import potential\n'), ((202067, 202122), 'galpy.potential.evaluatephi2derivs', 'potential.evaluatephi2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202095, 202122), False, 'from galpy import potential\n'), ((202205, 202260), 'galpy.potential.evaluateRphiderivs', 'potential.evaluateRphiderivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202233, 202260), False, 'from galpy import potential\n'), ((202256, 202311), 'galpy.potential.evaluateRphiderivs', 'potential.evaluateRphiderivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202284, 202311), False, 'from galpy import potential\n'), ((202394, 202447), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202420, 202447), False, 'from galpy import potential\n'), ((202443, 202496), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['[lp, cdfc]', 'R', 'z'], {'phi': '(1.0)'}), '([lp, cdfc], R, z, phi=1.0)\n', (202469, 202496), False, 'from galpy import potential\n'), ((209258, 209317), 'galpy.potential.ttensor', 'potential.ttensor', (['potential.MWPotential2014', 'R', 'z'], {'phi': 'phi'}), '(potential.MWPotential2014, R, z, phi=phi)\n', (209275, 209317), False, 'from galpy import potential\n'), ((209328, 209397), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['potential.MWPotential2014', 'R', 'z'], {'phi': 'phi'}), '(potential.MWPotential2014, R, z, phi=phi)\n', (209355, 209397), False, 'from galpy import potential\n'), ((212918, 212964), 'galpy.potential.vcirc', 'potential.vcirc', (['potential.MWPotential2014', 'Rc'], {}), '(potential.MWPotential2014, Rc)\n', (212933, 212964), False, 'from galpy import potential\n'), ((218777, 218796), 'galpy.potential.rhalf', 'potential.rhalf', (['pp'], {}), '(pp)\n', (218792, 218796), False, 'from galpy import potential\n'), ((219300, 219321), 'galpy.potential.tdyn', 'potential.tdyn', (['hp', 'R'], {}), '(hp, R)\n', (219314, 219321), False, 'from galpy import potential\n'), ((220197, 220264), 'galpy.potential.NumericalPotentialDerivativesMixin.__init__', 'potential.NumericalPotentialDerivativesMixin.__init__', (['self', 'kwargs'], {}), '(self, kwargs)\n', (220250, 220264), False, 'from galpy import potential\n'), ((231622, 231653), 'numpy.geomspace', 'numpy.geomspace', (['(0.01)', '(5.0)', '(201)'], {}), '(0.01, 5.0, 201)\n', (231637, 231653), False, 'import numpy\n'), ((231763, 231794), 'numpy.geomspace', 'numpy.geomspace', (['(0.01)', '(5.0)', '(201)'], {}), '(0.01, 5.0, 201)\n', (231778, 231794), False, 'import numpy\n'), ((243119, 243169), 'numpy.log', 'numpy.log', (['((x - self._a + Tm) / (x + self._a + Tp))'], {}), '((x - self._a + Tm) / (x + self._a + Tp))\n', (243128, 243169), False, 'import numpy\n'), ((259481, 259497), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (259490, 259497), False, 'import numpy\n'), ((259500, 259516), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (259509, 259516), False, 'import numpy\n'), ((259645, 259661), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (259654, 259661), False, 'import numpy\n'), ((259664, 259680), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (259673, 259680), False, 'import numpy\n'), ((261458, 261487), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(1.1)', '(201)'], {}), '(0.0, 1.1, 201)\n', (261472, 261487), False, 'import numpy\n'), ((261927, 261956), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(1.1)', '(201)'], {}), '(0.0, 1.1, 201)\n', (261941, 261956), False, 'import numpy\n'), ((279465, 279514), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (279499, 279514), False, 'from galpy import potential\n'), ((285338, 285387), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (285372, 285387), False, 'from galpy import potential\n'), ((9622, 9664), 'galpy.potential.evaluatelinearForces', 'potential.evaluatelinearForces', (['tp', 'Rs[ii]'], {}), '(tp, Rs[ii])\n', (9652, 9664), False, 'from galpy import potential\n'), ((11506, 11565), 'galpy.potential.evaluateplanarphiforces', 'potential.evaluateplanarphiforces', (['tp', 'Rs[ii]'], {'phi': 'phis[jj]'}), '(tp, Rs[ii], phi=phis[jj])\n', (11539, 11565), False, 'from galpy import potential\n'), ((11787, 11846), 'galpy.potential.evaluatephiforces', 'potential.evaluatephiforces', (['tp', 'Rs[ii]', '(0.05)'], {'phi': 'phis[jj]'}), '(tp, Rs[ii], 0.05, phi=phis[jj])\n', (11814, 11846), False, 'from galpy import potential\n'), ((26484, 26539), 'galpy.potential.evaluatez2derivs', 'potential.evaluatez2derivs', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (26510, 26539), False, 'from galpy import potential\n'), ((28173, 28228), 'galpy.potential.evaluateRzderivs', 'potential.evaluateRzderivs', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (28199, 28228), False, 'from galpy import potential\n'), ((35687, 35773), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': 'phis[kk]', 'forcepoisson': '(False)'}), '(tp, Rs[ii], Zs[jj], phi=phis[kk], forcepoisson=\n False)\n', (35714, 35773), False, 'from galpy import potential\n'), ((41704, 41796), 'galpy.potential.evaluateSurfaceDensities', 'potential.evaluateSurfaceDensities', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': 'phis[kk]', 'forcepoisson': '(False)'}), '(tp, Rs[ii], Zs[jj], phi=phis[kk],\n forcepoisson=False)\n', (41738, 41796), False, 'from galpy import potential\n'), ((67477, 67526), 'galpy.potential.evaluatelinearPotentials', 'potential.evaluatelinearPotentials', (['pot', 'xs'], {'t': 'ts'}), '(pot, xs, t=ts)\n', (67511, 67526), False, 'from galpy import potential\n'), ((67800, 67845), 'galpy.potential.evaluatelinearForces', 'potential.evaluatelinearForces', (['pot', 'xs'], {'t': 'ts'}), '(pot, xs, t=ts)\n', (67830, 67845), False, 'from galpy import potential\n'), ((81686, 81701), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (81696, 81701), False, 'import numpy\n'), ((83390, 83428), 'galpy.potential.evaluateR2derivs', 'potential.evaluateR2derivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83416, 83428), False, 'from galpy import potential\n'), ((83621, 83659), 'galpy.potential.evaluatez2derivs', 'potential.evaluatez2derivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83647, 83659), False, 'from galpy import potential\n'), ((83852, 83890), 'galpy.potential.evaluateRzderivs', 'potential.evaluateRzderivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83878, 83890), False, 'from galpy import potential\n'), ((91035, 91057), 'numpy.exp', 'numpy.exp', (['(-r / rp._hr)'], {}), '(-r / rp._hr)\n', (91044, 91057), False, 'import numpy\n'), ((100220, 100235), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (100230, 100235), False, 'import numpy\n'), ((101970, 101985), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (101980, 101985), False, 'import numpy\n'), ((104488, 104503), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (104498, 104503), False, 'import numpy\n'), ((107009, 107024), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (107019, 107024), False, 'import numpy\n'), ((109233, 109248), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (109243, 109248), False, 'import numpy\n'), ((109423, 109438), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (109433, 109438), False, 'import numpy\n'), ((109616, 109631), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (109626, 109631), False, 'import numpy\n'), ((109818, 109833), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (109828, 109833), False, 'import numpy\n'), ((112270, 112313), 'galpy.potential.rl', 'potential.rl', (['potential.MWPotential2014', 'lz'], {}), '(potential.MWPotential2014, lz)\n', (112282, 112313), False, 'from galpy import potential\n'), ((116806, 116838), 'galpy.potential.lindbladR', 'potential.lindbladR', (['lp', '(0.5)', '(-2)'], {}), '(lp, 0.5, -2)\n', (116825, 116838), False, 'from galpy import potential\n'), ((119373, 119407), 'galpy.potential.flattening', 'potential.flattening', (['lp', '(0.5)', '(0.1)'], {}), '(lp, 0.5, 0.1)\n', (119393, 119407), False, 'from galpy import potential\n'), ((121428, 121457), 'galpy.potential.verticalfreq', 'potential.verticalfreq', (['np', 'r'], {}), '(np, r)\n', (121450, 121457), False, 'from galpy import potential\n'), ((121594, 121625), 'galpy.potential.verticalfreq', 'potential.verticalfreq', (['[bp]', 'r'], {}), '([bp], r)\n', (121616, 121625), False, 'from galpy import potential\n'), ((141086, 141125), 'galpy.util.conversion.force_in_2piGmsolpc2', 'conversion.force_in_2piGmsolpc2', (['vo', 'ro'], {}), '(vo, ro)\n', (141117, 141125), False, 'from galpy.util import conversion\n'), ((141270, 141342), 'galpy.potential.evaluateDensities', 'potential.evaluateDensities', (['McMillan17[1]', '(1.0)', '(0.0)'], {'use_physical': '(False)'}), '(McMillan17[1], 1.0, 0.0, use_physical=False)\n', (141297, 141342), False, 'from galpy import potential\n'), ((141412, 141446), 'galpy.util.conversion.dens_in_msolpc3', 'conversion.dens_in_msolpc3', (['vo', 'ro'], {}), '(vo, ro)\n', (141438, 141446), False, 'from galpy.util import conversion\n'), ((149026, 149065), 'galpy.util.conversion.force_in_2piGmsolpc2', 'conversion.force_in_2piGmsolpc2', (['vo', 'ro'], {}), '(vo, ro)\n', (149057, 149065), False, 'from galpy.util import conversion\n'), ((179721, 179734), 'numpy.sin', 'numpy.sin', (['pa'], {}), '(pa)\n', (179730, 179734), False, 'import numpy\n'), ((180788, 180801), 'numpy.sin', 'numpy.sin', (['pa'], {}), '(pa)\n', (180797, 180801), False, 'import numpy\n'), ((183726, 183741), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (183736, 183741), False, 'import numpy\n'), ((183741, 183756), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (183751, 183756), False, 'import numpy\n'), ((189529, 189548), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (189538, 189548), False, 'import numpy\n'), ((189609, 189628), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (189618, 189628), False, 'import numpy\n'), ((201038, 201053), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (201048, 201053), False, 'import numpy\n'), ((202930, 202945), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (202940, 202945), False, 'import numpy\n'), ((206575, 206613), 'galpy.potential.rtide', 'potential.rtide', (['[lp]', '(1.0)', '(0.0)'], {'M': '(1.0)'}), '([lp], 1.0, 0.0, M=1.0)\n', (206590, 206613), False, 'from galpy import potential\n'), ((206753, 206794), 'galpy.potential.rtide', 'potential.rtide', (['[pmass]', '(1.0)', '(0.0)'], {'M': '(1.0)'}), '([pmass], 1.0, 0.0, M=1.0)\n', (206768, 206794), False, 'from galpy import potential\n'), ((207427, 207450), 'numpy.diag', 'numpy.diag', (['[2, -1, -1]'], {}), '([2, -1, -1])\n', (207437, 207450), False, 'import numpy\n'), ((207633, 207657), 'numpy.array', 'numpy.array', (['[2, -1, -1]'], {}), '([2, -1, -1])\n', (207644, 207657), False, 'import numpy\n'), ((207899, 207922), 'numpy.diag', 'numpy.diag', (['[2, -1, -1]'], {}), '([2, -1, -1])\n', (207909, 207922), False, 'import numpy\n'), ((208118, 208142), 'numpy.array', 'numpy.array', (['[2, -1, -1]'], {}), '([2, -1, -1])\n', (208129, 208142), False, 'import numpy\n'), ((210085, 210151), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rmin', '(0.0)'], {}), '(potential.MWPotential2014, Rmin, 0.0)\n', (210113, 210151), False, 'from galpy import potential\n'), ((210262, 210328), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rmax', '(0.0)'], {}), '(potential.MWPotential2014, Rmax, 0.0)\n', (210290, 210328), False, 'from galpy import potential\n'), ((211002, 211068), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rmin', '(0.0)'], {}), '(potential.MWPotential2014, Rmin, 0.0)\n', (211030, 211068), False, 'from galpy import potential\n'), ((211179, 211245), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rmax', '(0.0)'], {}), '(potential.MWPotential2014, Rmax, 0.0)\n', (211207, 211245), False, 'from galpy import potential\n'), ((211941, 211985), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'Rmin', '(0.0)'], {}), '(pot, Rmin, 0.0)\n', (211969, 211985), False, 'from galpy import potential\n'), ((212096, 212140), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'Rmax', '(0.0)'], {}), '(pot, Rmax, 0.0)\n', (212124, 212140), False, 'from galpy import potential\n'), ((214608, 214679), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (214636, 214679), False, 'from galpy import potential\n'), ((214890, 214961), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (214918, 214961), False, 'from galpy import potential\n'), ((215172, 215243), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (215200, 215243), False, 'from galpy import potential\n'), ((215573, 215644), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (215601, 215644), False, 'from galpy import potential\n'), ((215855, 215926), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (215883, 215926), False, 'from galpy import potential\n'), ((216137, 216208), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['potential.MWPotential2014', 'Rtrial', 'ztrial'], {}), '(potential.MWPotential2014, Rtrial, ztrial)\n', (216165, 216208), False, 'from galpy import potential\n'), ((216524, 216573), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'Rtrial', 'ztrial'], {}), '(pot, Rtrial, ztrial)\n', (216552, 216573), False, 'from galpy import potential\n'), ((216752, 216801), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'Rtrial', 'ztrial'], {}), '(pot, Rtrial, ztrial)\n', (216780, 216801), False, 'from galpy import potential\n'), ((216980, 217029), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['pot', 'Rtrial', 'ztrial'], {}), '(pot, Rtrial, ztrial)\n', (217008, 217029), False, 'from galpy import potential\n'), ((218799, 218836), 'numpy.sqrt', 'numpy.sqrt', (['(0.5 ** (-2.0 / 3.0) - 1.0)'], {}), '(0.5 ** (-2.0 / 3.0) - 1.0)\n', (218809, 218836), False, 'import numpy\n'), ((247639, 247653), 'numpy.sin', 'numpy.sin', (['(0.5)'], {}), '(0.5)\n', (247648, 247653), False, 'import numpy\n'), ((247657, 247671), 'numpy.cos', 'numpy.cos', (['(0.5)'], {}), '(0.5)\n', (247666, 247671), False, 'import numpy\n'), ((248108, 248122), 'numpy.sin', 'numpy.sin', (['(0.5)'], {}), '(0.5)\n', (248117, 248122), False, 'import numpy\n'), ((248126, 248140), 'numpy.cos', 'numpy.cos', (['(0.5)'], {}), '(0.5)\n', (248135, 248140), False, 'import numpy\n'), ((248411, 248425), 'numpy.sin', 'numpy.sin', (['(0.5)'], {}), '(0.5)\n', (248420, 248425), False, 'import numpy\n'), ((248429, 248443), 'numpy.cos', 'numpy.cos', (['(0.5)'], {}), '(0.5)\n', (248438, 248443), False, 'import numpy\n'), ((259779, 259793), 'numpy.cos', 'numpy.cos', (['phi'], {}), '(phi)\n', (259788, 259793), False, 'import numpy\n'), ((259796, 259810), 'numpy.sin', 'numpy.sin', (['phi'], {}), '(phi)\n', (259805, 259810), False, 'import numpy\n'), ((265822, 265871), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (265856, 265871), False, 'from galpy import potential\n'), ((265920, 266033), 'galpy.potential.EllipticalDiskPotential', 'potential.EllipticalDiskPotential', ([], {'phib': '(numpy.pi / 2.0)', 'p': '(0.0)', 'tform': 'None', 'tsteady': 'None', 'twophio': '(14.0 / 220.0)'}), '(phib=numpy.pi / 2.0, p=0.0, tform=None,\n tsteady=None, twophio=14.0 / 220.0)\n', (265953, 266033), False, 'from galpy import potential\n'), ((266243, 266292), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (266277, 266292), False, 'from galpy import potential\n'), ((266341, 266454), 'galpy.potential.EllipticalDiskPotential', 'potential.EllipticalDiskPotential', ([], {'phib': '(numpy.pi / 2.0)', 'p': '(0.0)', 'twophio': '(14.0 / 220.0)', 'tform': '(1.0)', 'tsteady': '(250.0)'}), '(phib=numpy.pi / 2.0, p=0.0, twophio=14.0 /\n 220.0, tform=1.0, tsteady=250.0)\n', (266374, 266454), False, 'from galpy import potential\n'), ((266656, 266705), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (266690, 266705), False, 'from galpy import potential\n'), ((266754, 266832), 'galpy.potential.LopsidedDiskPotential', 'potential.LopsidedDiskPotential', ([], {'phib': '(numpy.pi / 2.0)', 'p': '(0.0)', 'phio': '(10.0 / 220.0)'}), '(phib=numpy.pi / 2.0, p=0.0, phio=10.0 / 220.0)\n', (266785, 266832), False, 'from galpy import potential\n'), ((267041, 267090), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (267075, 267090), False, 'from galpy import potential\n'), ((267139, 267216), 'galpy.potential.CosmphiDiskPotential', 'potential.CosmphiDiskPotential', ([], {'phib': '(numpy.pi / 2.0)', 'p': '(0.0)', 'phio': '(10.0 / 220.0)'}), '(phib=numpy.pi / 2.0, p=0.0, phio=10.0 / 220.0)\n', (267169, 267216), False, 'from galpy import potential\n'), ((267431, 267480), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (267465, 267480), False, 'from galpy import potential\n'), ((267529, 267625), 'galpy.potential.CosmphiDiskPotential', 'potential.CosmphiDiskPotential', ([], {'phib': '(numpy.pi / 2.0)', 'p': '(0.0)', 'phio': '(10.0 / 220.0)', 'rb': '(0.99)', 'm': '(6)'}), '(phib=numpy.pi / 2.0, p=0.0, phio=10.0 / \n 220.0, rb=0.99, m=6)\n', (267559, 267625), False, 'from galpy import potential\n'), ((267807, 267856), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (267841, 267856), False, 'from galpy import potential\n'), ((267899, 267929), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {}), '()\n', (267927, 267929), False, 'from galpy import potential\n'), ((268155, 268204), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (268189, 268204), False, 'from galpy import potential\n'), ((268247, 268311), 'galpy.potential.DehnenBarPotential', 'potential.DehnenBarPotential', ([], {'tform': '(1.0)', 'tsteady': '(250.0)', 'rolr': '(2.5)'}), '(tform=1.0, tsteady=250.0, rolr=2.5)\n', (268275, 268311), False, 'from galpy import potential\n'), ((268553, 268602), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (268587, 268602), False, 'from galpy import potential\n'), ((268651, 268687), 'galpy.potential.SteadyLogSpiralPotential', 'potential.SteadyLogSpiralPotential', ([], {}), '()\n', (268685, 268687), False, 'from galpy import potential\n'), ((268937, 268986), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (268971, 268986), False, 'from galpy import potential\n'), ((269035, 269094), 'galpy.potential.SteadyLogSpiralPotential', 'potential.SteadyLogSpiralPotential', ([], {'tform': '(0.1)', 'tsteady': '(25.0)'}), '(tform=0.1, tsteady=25.0)\n', (269069, 269094), False, 'from galpy import potential\n'), ((269340, 269389), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (269374, 269389), False, 'from galpy import potential\n'), ((269438, 269485), 'galpy.potential.TransientLogSpiralPotential', 'potential.TransientLogSpiralPotential', ([], {'to': '(-10.0)'}), '(to=-10.0)\n', (269475, 269485), False, 'from galpy import potential\n'), ((269749, 269798), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (269783, 269798), False, 'from galpy import potential\n'), ((269841, 269872), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (269870, 269872), False, 'from galpy import potential\n'), ((270103, 270152), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (270137, 270152), False, 'from galpy import potential\n'), ((270195, 270235), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {'omega': '(1.3)'}), '(omega=1.3)\n', (270224, 270235), False, 'from galpy import potential\n'), ((270473, 270522), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (270507, 270522), False, 'from galpy import potential\n'), ((270565, 270664), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {'omega': '(1.3)', 'N': '(4)', 'Cs': '[8 / 3 / numpy.pi, 1 / 2, 8 / 15 / numpy.pi]'}), '(omega=1.3, N=4, Cs=[8 / 3 / numpy.pi, 1 / 2, \n 8 / 15 / numpy.pi])\n', (270594, 270664), False, 'from galpy import potential\n'), ((277556, 277605), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (277590, 277605), False, 'from galpy import potential\n'), ((278225, 278274), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (278259, 278274), False, 'from galpy import potential\n'), ((278296, 278368), 'galpy.potential.DehnenSmoothWrapperPotential', 'DehnenSmoothWrapperPotential', ([], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(0.1)', 'tsteady': '(500.0)'}), '(amp=1.0, pot=dpn, tform=0.1, tsteady=500.0)\n', (278324, 278368), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((278838, 278887), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (278872, 278887), False, 'from galpy import potential\n'), ((278909, 279000), 'galpy.potential.DehnenSmoothWrapperPotential', 'DehnenSmoothWrapperPotential', ([], {'amp': '(1.0)', 'pot': 'dpn', 'tform': '(-250.0)', 'tsteady': '(500.0)', 'decay': '(True)'}), '(amp=1.0, pot=dpn, tform=-250.0, tsteady=500.0,\n decay=True)\n', (278937, 279000), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((280260, 280309), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (280294, 280309), False, 'from galpy import potential\n'), ((281380, 281409), 'galpy.potential.HenonHeilesPotential', 'HenonHeilesPotential', ([], {'amp': '(1.0)'}), '(amp=1.0)\n', (281400, 281409), False, 'from galpy.potential import DehnenBarPotential, CosmphiDiskPotential, EllipticalDiskPotential, SteadyLogSpiralPotential, TransientLogSpiralPotential, HenonHeilesPotential\n'), ((282369, 282418), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (282403, 282418), False, 'from galpy import potential\n'), ((282908, 282957), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (282942, 282957), False, 'from galpy import potential\n'), ((284074, 284123), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (284108, 284123), False, 'from galpy import potential\n'), ((284145, 284220), 'galpy.potential.GaussianAmplitudeWrapperPotential', 'GaussianAmplitudeWrapperPotential', ([], {'amp': '(1.0)', 'pot': 'dpn', 'to': '(10)', 'sigma': '(1000000.0)'}), '(amp=1.0, pot=dpn, to=10, sigma=1000000.0)\n', (284178, 284220), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((284699, 284748), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (284733, 284748), False, 'from galpy import potential\n'), ((284770, 284839), 'galpy.potential.GaussianAmplitudeWrapperPotential', 'GaussianAmplitudeWrapperPotential', ([], {'amp': '(1.0)', 'pot': 'dpn', 'to': '(10)', 'sigma': '(1.0)'}), '(amp=1.0, pot=dpn, to=10, sigma=1.0)\n', (284803, 284839), False, 'from galpy.potential import DehnenSmoothWrapperPotential, SolidBodyRotationWrapperPotential, CorotatingRotationWrapperPotential, GaussianAmplitudeWrapperPotential\n'), ((285605, 285636), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (285634, 285636), False, 'from galpy import potential\n'), ((9910, 9965), 'galpy.potential.evaluateplanarRforces', 'potential.evaluateplanarRforces', (['tp', 'Rs[ii]'], {'phi': 'Zs[jj]'}), '(tp, Rs[ii], phi=Zs[jj])\n', (9941, 9965), False, 'from galpy import potential\n'), ((10259, 10313), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (10284, 10313), False, 'from galpy import potential\n'), ((21756, 21816), 'galpy.potential.evaluatephi2derivs', 'potential.evaluatephi2derivs', (['tp', 'Rs[ii]', '(0.05)'], {'phi': 'phis[jj]'}), '(tp, Rs[ii], 0.05, phi=phis[jj])\n', (21784, 21816), False, 'from galpy import potential\n'), ((23768, 23828), 'galpy.potential.evaluateRphiderivs', 'potential.evaluateRphiderivs', (['tp', 'Rs[ii]', '(0.05)'], {'phi': 'phis[jj]'}), '(tp, Rs[ii], 0.05, phi=phis[jj])\n', (23796, 23828), False, 'from galpy import potential\n'), ((29796, 29870), 'galpy.potential.evaluateplanarPotentials', 'potential.evaluateplanarPotentials', (['tp', 'Rs[ii]'], {'phi': 'phis[jj]', 'dR': '(1)', 'dphi': '(1)'}), '(tp, Rs[ii], phi=phis[jj], dR=1, dphi=1)\n', (29830, 29870), False, 'from galpy import potential\n'), ((30172, 30245), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', 'Rs[ii]', '(0.1)'], {'phi': 'phis[jj]', 'dR': '(1)', 'dphi': '(1)'}), '(tp, Rs[ii], 0.1, phi=phis[jj], dR=1, dphi=1)\n', (30200, 30245), False, 'from galpy import potential\n'), ((73023, 73037), 'numpy.zeros', 'numpy.zeros', (['(4)'], {}), '(4)\n', (73034, 73037), False, 'import numpy\n'), ((73038, 73052), 'numpy.zeros', 'numpy.zeros', (['(4)'], {}), '(4)\n', (73049, 73052), False, 'import numpy\n'), ((78371, 78385), 'numpy.zeros', 'numpy.zeros', (['(4)'], {}), '(4)\n', (78382, 78385), False, 'import numpy\n'), ((78550, 78564), 'numpy.zeros', 'numpy.zeros', (['(4)'], {}), '(4)\n', (78561, 78564), False, 'import numpy\n'), ((81226, 81261), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['pp', 'R', 'z'], {}), '(pp, R, z)\n', (81251, 81261), False, 'from galpy import potential\n'), ((81979, 82036), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['[pp, cdfc]', 'R', 'z'], {'phi': 'phi', 'v': 'v'}), '([pp, cdfc], R, z, phi=phi, v=v)\n', (82004, 82036), False, 'from galpy import potential\n'), ((82219, 82270), 'galpy.potential.evaluateRforces', 'potential.evaluateRforces', (['cdfc', 'R', 'z'], {'phi': 'phi', 'v': 'v'}), '(cdfc, R, z, phi=phi, v=v)\n', (82244, 82270), False, 'from galpy import potential\n'), ((83427, 83465), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83453, 83465), False, 'from galpy import potential\n'), ((83474, 83511), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83499, 83511), False, 'from galpy import potential\n'), ((83658, 83696), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83684, 83696), False, 'from galpy import potential\n'), ((83705, 83742), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83730, 83742), False, 'from galpy import potential\n'), ((89994, 90016), 'numpy.exp', 'numpy.exp', (['(-r / dp._hr)'], {}), '(-r / dp._hr)\n', (90003, 90016), False, 'import numpy\n'), ((112226, 112269), 'galpy.potential.rl', 'potential.rl', (['potential.MWPotential2014', 'lz'], {}), '(potential.MWPotential2014, lz)\n', (112238, 112269), False, 'from galpy import potential\n'), ((117949, 117964), 'numpy.sqrt', 'numpy.sqrt', (['(3.0)'], {}), '(3.0)\n', (117959, 117964), False, 'import numpy\n'), ((118329, 118344), 'numpy.sqrt', 'numpy.sqrt', (['(3.0)'], {}), '(3.0)\n', (118339, 118344), False, 'import numpy\n'), ((140943, 141017), 'galpy.potential.evaluatezforces', 'potential.evaluatezforces', (['McMillan17', '(1.0)', '(1.1 / 8.21)'], {'use_physical': '(False)'}), '(McMillan17, 1.0, 1.1 / 8.21, use_physical=False)\n', (140968, 141017), False, 'from galpy import potential\n'), ((148966, 149031), 'galpy.potential.evaluatezforces', 'potential.evaluatezforces', (['pot', '(1.0)', '(1.1 / ro)'], {'use_physical': '(False)'}), '(pot, 1.0, 1.1 / ro, use_physical=False)\n', (148991, 149031), False, 'from galpy import potential\n'), ((183979, 183994), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (183989, 183994), False, 'import numpy\n'), ((183997, 184012), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (184007, 184012), False, 'import numpy\n'), ((218319, 218334), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (218329, 218334), False, 'import numpy\n'), ((252345, 252364), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252354, 252364), False, 'import numpy\n'), ((252414, 252433), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252423, 252433), False, 'import numpy\n'), ((271954, 272001), 'galpy.potential.MiyamotoNagaiPotential', 'potential.MiyamotoNagaiPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (271986, 272001), False, 'from galpy import potential\n'), ((9460, 9506), 'galpy.potential.evaluatelinearPotentials', 'potential.evaluatelinearPotentials', (['tp', 'Rs[ii]'], {}), '(tp, Rs[ii])\n', (9494, 9506), False, 'from galpy import potential\n'), ((9540, 9591), 'galpy.potential.evaluatelinearPotentials', 'potential.evaluatelinearPotentials', (['tp', '(Rs[ii] + dr)'], {}), '(tp, Rs[ii] + dr)\n', (9574, 9591), False, 'from galpy import potential\n'), ((10589, 10621), 'numpy.fabs', 'numpy.fabs', (['(tRforce - mpotderivR)'], {}), '(tRforce - mpotderivR)\n', (10599, 10621), False, 'import numpy\n'), ((10621, 10665), 'numpy.fabs', 'numpy.fabs', (['((tRforce - mpotderivR) / tRforce)'], {}), '((tRforce - mpotderivR) / tRforce)\n', (10631, 10665), False, 'import numpy\n'), ((10942, 10974), 'numpy.fabs', 'numpy.fabs', (['(tRforce - mpotderivR)'], {}), '(tRforce - mpotderivR)\n', (10952, 10974), False, 'import numpy\n'), ((10974, 11018), 'numpy.fabs', 'numpy.fabs', (['((tRforce - mpotderivR) / tRforce)'], {}), '((tRforce - mpotderivR) / tRforce)\n', (10984, 11018), False, 'import numpy\n'), ((13834, 13856), 'numpy.fabs', 'numpy.fabs', (['mpotderivz'], {}), '(mpotderivz)\n', (13844, 13856), False, 'import numpy\n'), ((13857, 13901), 'numpy.fabs', 'numpy.fabs', (['((tzforce - mpotderivz) / tzforce)'], {}), '((tzforce - mpotderivz) / tzforce)\n', (13867, 13901), False, 'import numpy\n'), ((14158, 14180), 'numpy.fabs', 'numpy.fabs', (['mpotderivz'], {}), '(mpotderivz)\n', (14168, 14180), False, 'import numpy\n'), ((14181, 14225), 'numpy.fabs', 'numpy.fabs', (['((tzforce - mpotderivz) / tzforce)'], {}), '((tzforce - mpotderivz) / tzforce)\n', (14191, 14225), False, 'import numpy\n'), ((19211, 19229), 'numpy.fabs', 'numpy.fabs', (['Zs[jj]'], {}), '(Zs[jj])\n', (19221, 19229), False, 'import numpy\n'), ((19790, 19846), 'galpy.potential.evaluateplanarR2derivs', 'potential.evaluateplanarR2derivs', (['tp', 'Rs[ii]'], {'phi': 'Zs[jj]'}), '(tp, Rs[ii], phi=Zs[jj])\n', (19822, 19846), False, 'from galpy import potential\n'), ((20084, 20139), 'galpy.potential.evaluateR2derivs', 'potential.evaluateR2derivs', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (20110, 20139), False, 'from galpy import potential\n'), ((78357, 78370), 'numpy.ones', 'numpy.ones', (['(4)'], {}), '(4)\n', (78367, 78370), False, 'import numpy\n'), ((78536, 78549), 'numpy.ones', 'numpy.ones', (['(4)'], {}), '(4)\n', (78546, 78549), False, 'import numpy\n'), ((78972, 78987), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (78982, 78987), False, 'import numpy\n'), ((78995, 79010), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79005, 79010), False, 'import numpy\n'), ((79264, 79279), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79274, 79279), False, 'import numpy\n'), ((79287, 79302), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79297, 79302), False, 'import numpy\n'), ((79550, 79565), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79560, 79565), False, 'import numpy\n'), ((79573, 79588), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79583, 79588), False, 'import numpy\n'), ((79828, 79843), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79838, 79843), False, 'import numpy\n'), ((79851, 79866), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (79861, 79866), False, 'import numpy\n'), ((80093, 80108), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80103, 80108), False, 'import numpy\n'), ((80116, 80131), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80126, 80131), False, 'import numpy\n'), ((80385, 80400), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80395, 80400), False, 'import numpy\n'), ((80408, 80423), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80418, 80423), False, 'import numpy\n'), ((80689, 80704), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80699, 80704), False, 'import numpy\n'), ((80712, 80727), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (80722, 80727), False, 'import numpy\n'), ((83936, 83973), 'galpy.potential.evaluaterforces', 'potential.evaluaterforces', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83961, 83973), False, 'from galpy import potential\n'), ((116475, 116490), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (116485, 116490), False, 'import numpy\n'), ((116637, 116652), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (116647, 116652), False, 'import numpy\n'), ((116846, 116861), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (116856, 116861), False, 'import numpy\n'), ((117041, 117056), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (117051, 117056), False, 'import numpy\n'), ((252491, 252510), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252500, 252510), False, 'import numpy\n'), ((252567, 252586), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252576, 252586), False, 'import numpy\n'), ((252645, 252664), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252654, 252664), False, 'import numpy\n'), ((252722, 252741), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252731, 252741), False, 'import numpy\n'), ((253096, 253109), 'numpy.sign', 'numpy.sign', (['z'], {}), '(z)\n', (253106, 253109), False, 'import numpy\n'), ((280397, 280428), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (280426, 280428), False, 'from galpy import potential\n'), ((280783, 280832), 'galpy.potential.LogarithmicHaloPotential', 'potential.LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (280817, 280832), False, 'from galpy import potential\n'), ((282507, 282538), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (282536, 282538), False, 'from galpy import potential\n'), ((283046, 283077), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (283075, 283077), False, 'from galpy import potential\n'), ((9760, 9818), 'galpy.potential.evaluateplanarPotentials', 'potential.evaluateplanarPotentials', (['tp', 'Rs[ii]'], {'phi': 'Zs[jj]'}), '(tp, Rs[ii], phi=Zs[jj])\n', (9794, 9818), False, 'from galpy import potential\n'), ((9817, 9880), 'galpy.potential.evaluateplanarPotentials', 'potential.evaluateplanarPotentials', (['tp', '(Rs[ii] + dr)'], {'phi': 'Zs[jj]'}), '(tp, Rs[ii] + dr, phi=Zs[jj])\n', (9851, 9880), False, 'from galpy import potential\n'), ((10081, 10138), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', 'Rs[ii]', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii], Zs[jj], phi=1.0)\n', (10109, 10138), False, 'from galpy import potential\n'), ((10169, 10231), 'galpy.potential.evaluatePotentials', 'potential.evaluatePotentials', (['tp', '(Rs[ii] + dr)', 'Zs[jj]'], {'phi': '(1.0)'}), '(tp, Rs[ii] + dr, Zs[jj], phi=1.0)\n', (10197, 10231), False, 'from galpy import potential\n'), ((20463, 20499), 'numpy.fabs', 'numpy.fabs', (['(tR2deriv - mRforcederivR)'], {}), '(tR2deriv - mRforcederivR)\n', (20473, 20499), False, 'import numpy\n'), ((20499, 20548), 'numpy.fabs', 'numpy.fabs', (['((tR2deriv - mRforcederivR) / tR2deriv)'], {}), '((tR2deriv - mRforcederivR) / tR2deriv)\n', (20509, 20548), False, 'import numpy\n'), ((20874, 20910), 'numpy.fabs', 'numpy.fabs', (['(tR2deriv - mRforcederivR)'], {}), '(tR2deriv - mRforcederivR)\n', (20884, 20910), False, 'import numpy\n'), ((20910, 20959), 'numpy.fabs', 'numpy.fabs', (['((tR2deriv - mRforcederivR) / tR2deriv)'], {}), '((tR2deriv - mRforcederivR) / tR2deriv)\n', (20920, 20959), False, 'import numpy\n'), ((26869, 26905), 'numpy.fabs', 'numpy.fabs', (['(tz2deriv - mzforcederivz)'], {}), '(tz2deriv - mzforcederivz)\n', (26879, 26905), False, 'import numpy\n'), ((26905, 26954), 'numpy.fabs', 'numpy.fabs', (['((tz2deriv - mzforcederivz) / tz2deriv)'], {}), '((tz2deriv - mzforcederivz) / tz2deriv)\n', (26915, 26954), False, 'import numpy\n'), ((27286, 27322), 'numpy.fabs', 'numpy.fabs', (['(tz2deriv - mzforcederivz)'], {}), '(tz2deriv - mzforcederivz)\n', (27296, 27322), False, 'import numpy\n'), ((27322, 27371), 'numpy.fabs', 'numpy.fabs', (['((tz2deriv - mzforcederivz) / tz2deriv)'], {}), '((tz2deriv - mzforcederivz) / tz2deriv)\n', (27332, 27371), False, 'import numpy\n'), ((28562, 28598), 'numpy.fabs', 'numpy.fabs', (['(tRzderiv - mRforcederivz)'], {}), '(tRzderiv - mRforcederivz)\n', (28572, 28598), False, 'import numpy\n'), ((28598, 28647), 'numpy.fabs', 'numpy.fabs', (['((tRzderiv - mRforcederivz) / tRzderiv)'], {}), '((tRzderiv - mRforcederivz) / tRzderiv)\n', (28608, 28647), False, 'import numpy\n'), ((28955, 28991), 'numpy.fabs', 'numpy.fabs', (['(tRzderiv - mRforcederivz)'], {}), '(tRzderiv - mRforcederivz)\n', (28965, 28991), False, 'import numpy\n'), ((28991, 29040), 'numpy.fabs', 'numpy.fabs', (['((tRzderiv - mRforcederivz) / tRzderiv)'], {}), '((tRzderiv - mRforcederivz) / tRzderiv)\n', (29001, 29040), False, 'import numpy\n'), ((30654, 30694), 'numpy.fabs', 'numpy.fabs', (['(tRphideriv - mRforcederivphi)'], {}), '(tRphideriv - mRforcederivphi)\n', (30664, 30694), False, 'import numpy\n'), ((30694, 30749), 'numpy.fabs', 'numpy.fabs', (['((tRphideriv - mRforcederivphi) / tRphideriv)'], {}), '((tRphideriv - mRforcederivphi) / tRphideriv)\n', (30704, 30749), False, 'import numpy\n'), ((31069, 31109), 'numpy.fabs', 'numpy.fabs', (['(tRphideriv - mRforcederivphi)'], {}), '(tRphideriv - mRforcederivphi)\n', (31079, 31109), False, 'import numpy\n'), ((31109, 31164), 'numpy.fabs', 'numpy.fabs', (['((tRphideriv - mRforcederivphi) / tRphideriv)'], {}), '((tRphideriv - mRforcederivphi) / tRphideriv)\n', (31119, 31164), False, 'import numpy\n'), ((36236, 36268), 'numpy.fabs', 'numpy.fabs', (['(tdens - tpoissondens)'], {}), '(tdens - tpoissondens)\n', (36246, 36268), False, 'import numpy\n'), ((36268, 36310), 'numpy.fabs', 'numpy.fabs', (['((tdens - tpoissondens) / tdens)'], {}), '((tdens - tpoissondens) / tdens)\n', (36278, 36310), False, 'import numpy\n'), ((36665, 36697), 'numpy.fabs', 'numpy.fabs', (['(tdens - tpoissondens)'], {}), '(tdens - tpoissondens)\n', (36675, 36697), False, 'import numpy\n'), ((36697, 36739), 'numpy.fabs', 'numpy.fabs', (['((tdens - tpoissondens) / tdens)'], {}), '((tdens - tpoissondens) / tdens)\n', (36707, 36739), False, 'import numpy\n'), ((42282, 42314), 'numpy.fabs', 'numpy.fabs', (['(tdens - tpoissondens)'], {}), '(tdens - tpoissondens)\n', (42292, 42314), False, 'import numpy\n'), ((42314, 42356), 'numpy.fabs', 'numpy.fabs', (['((tdens - tpoissondens) / tdens)'], {}), '((tdens - tpoissondens) / tdens)\n', (42324, 42356), False, 'import numpy\n'), ((42719, 42751), 'numpy.fabs', 'numpy.fabs', (['(tdens - tpoissondens)'], {}), '(tdens - tpoissondens)\n', (42729, 42751), False, 'import numpy\n'), ((42751, 42793), 'numpy.fabs', 'numpy.fabs', (['((tdens - tpoissondens) / tdens)'], {}), '((tdens - tpoissondens) / tdens)\n', (42761, 42793), False, 'import numpy\n'), ((83889, 83927), 'galpy.potential.evaluater2derivs', 'potential.evaluater2derivs', (['[pp]', 'R', 'z'], {}), '([pp], R, z)\n', (83915, 83927), False, 'from galpy import potential\n'), ((117978, 117993), 'numpy.sqrt', 'numpy.sqrt', (['(3.0)'], {}), '(3.0)\n', (117988, 117993), False, 'import numpy\n'), ((118358, 118373), 'numpy.sqrt', 'numpy.sqrt', (['(3.0)'], {}), '(3.0)\n', (118368, 118373), False, 'import numpy\n'), ((143889, 143941), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13I', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13I, 1.0, use_physical=False)\n', (143904, 143941), False, 'from galpy import potential\n'), ((143939, 143994), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13I', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13I, 1.0, use_physical=False)\n', (143957, 143994), False, 'from galpy import potential\n'), ((144116, 144168), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13I', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13I, 1.0, use_physical=False)\n', (144131, 144168), False, 'from galpy import potential\n'), ((144166, 144221), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13I', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13I, 1.0, use_physical=False)\n', (144184, 144221), False, 'from galpy import potential\n'), ((145415, 145468), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13II', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13II, 1.0, use_physical=False)\n', (145430, 145468), False, 'from galpy import potential\n'), ((145466, 145522), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13II', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13II, 1.0, use_physical=False)\n', (145484, 145522), False, 'from galpy import potential\n'), ((145645, 145698), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13II', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13II, 1.0, use_physical=False)\n', (145660, 145698), False, 'from galpy import potential\n'), ((145696, 145752), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13II', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13II, 1.0, use_physical=False)\n', (145714, 145752), False, 'from galpy import potential\n'), ((146755, 146809), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13III', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13III, 1.0, use_physical=False)\n', (146770, 146809), False, 'from galpy import potential\n'), ((146807, 146864), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13III', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13III, 1.0, use_physical=False)\n', (146825, 146864), False, 'from galpy import potential\n'), ((146988, 147042), 'galpy.potential.vcirc', 'potential.vcirc', (['Irrgang13III', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13III, 1.0, use_physical=False)\n', (147003, 147042), False, 'from galpy import potential\n'), ((147040, 147097), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['Irrgang13III', '(1.0)'], {'use_physical': '(False)'}), '(Irrgang13III, 1.0, use_physical=False)\n', (147058, 147097), False, 'from galpy import potential\n'), ((149217, 149262), 'galpy.potential.vcirc', 'potential.vcirc', (['pot', '(1.0)'], {'use_physical': '(False)'}), '(pot, 1.0, use_physical=False)\n', (149232, 149262), False, 'from galpy import potential\n'), ((149260, 149308), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['pot', '(1.0)'], {'use_physical': '(False)'}), '(pot, 1.0, use_physical=False)\n', (149278, 149308), False, 'from galpy import potential\n'), ((149452, 149497), 'galpy.potential.vcirc', 'potential.vcirc', (['pot', '(1.0)'], {'use_physical': '(False)'}), '(pot, 1.0, use_physical=False)\n', (149467, 149497), False, 'from galpy import potential\n'), ((149495, 149543), 'galpy.potential.dvcircdR', 'potential.dvcircdR', (['pot', '(1.0)'], {'use_physical': '(False)'}), '(pot, 1.0, use_physical=False)\n', (149513, 149543), False, 'from galpy import potential\n'), ((203829, 203849), 'scipy.special.jv', 'special.jv', (['(0)', '(k * R)'], {}), '(0, k * R)\n', (203839, 203849), False, 'from scipy import special, integrate\n'), ((203847, 203867), 'scipy.special.jv', 'special.jv', (['(0)', '(k * a)'], {}), '(0, k * a)\n', (203857, 203867), False, 'from scipy import special, integrate\n'), ((252094, 252113), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (252103, 252113), False, 'import numpy\n'), ((252872, 252885), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (252882, 252885), False, 'import numpy\n'), ((253022, 253035), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (253032, 253035), False, 'import numpy\n'), ((188773, 188792), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (188782, 188792), False, 'import numpy\n'), ((188811, 188837), 'numpy.cosh', 'numpy.cosh', (['(z / 2.0 * 27.0)'], {}), '(z / 2.0 * 27.0)\n', (188821, 188837), False, 'import numpy\n'), ((203878, 203891), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (203888, 203891), False, 'import numpy\n'), ((249826, 249845), 'numpy.exp', 'numpy.exp', (['(-3.0 * R)'], {}), '(-3.0 * R)\n', (249835, 249845), False, 'import numpy\n'), ((249886, 249912), 'numpy.cosh', 'numpy.cosh', (['(z / 2.0 * 27.0)'], {}), '(z / 2.0 * 27.0)\n', (249896, 249912), False, 'import numpy\n'), ((252166, 252179), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (252176, 252179), False, 'import numpy\n'), ((253169, 253182), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (253179, 253182), False, 'import numpy\n'), ((280931, 280962), 'galpy.potential.SpiralArmsPotential', 'potential.SpiralArmsPotential', ([], {}), '()\n', (280960, 280962), False, 'from galpy import potential\n'), ((12462, 12498), 'numpy.fabs', 'numpy.fabs', (['(tphiforce - mpotderivphi)'], {}), '(tphiforce - mpotderivphi)\n', (12472, 12498), False, 'import numpy\n'), ((12497, 12547), 'numpy.fabs', 'numpy.fabs', (['((tphiforce - mpotderivphi) / tphiforce)'], {}), '((tphiforce - mpotderivphi) / tphiforce)\n', (12507, 12547), False, 'import numpy\n'), ((12788, 12824), 'numpy.fabs', 'numpy.fabs', (['(tphiforce - mpotderivphi)'], {}), '(tphiforce - mpotderivphi)\n', (12798, 12824), False, 'import numpy\n'), ((12823, 12873), 'numpy.fabs', 'numpy.fabs', (['((tphiforce - mpotderivphi) / tphiforce)'], {}), '((tphiforce - mpotderivphi) / tphiforce)\n', (12833, 12873), False, 'import numpy\n'), ((250606, 250619), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (250616, 250619), False, 'import numpy\n'), ((251525, 251538), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (251535, 251538), False, 'import numpy\n'), ((252951, 252964), 'numpy.fabs', 'numpy.fabs', (['z'], {}), '(z)\n', (252961, 252964), False, 'import numpy\n'), ((22452, 22494), 'numpy.fabs', 'numpy.fabs', (['(tphi2deriv - mphiforcederivphi)'], {}), '(tphi2deriv - mphiforcederivphi)\n', (22462, 22494), False, 'import numpy\n'), ((22494, 22551), 'numpy.fabs', 'numpy.fabs', (['((tphi2deriv - mphiforcederivphi) / tphi2deriv)'], {}), '((tphi2deriv - mphiforcederivphi) / tphi2deriv)\n', (22504, 22551), False, 'import numpy\n'), ((22835, 22877), 'numpy.fabs', 'numpy.fabs', (['(tphi2deriv - mphiforcederivphi)'], {}), '(tphi2deriv - mphiforcederivphi)\n', (22845, 22877), False, 'import numpy\n'), ((22877, 22934), 'numpy.fabs', 'numpy.fabs', (['((tphi2deriv - mphiforcederivphi) / tphi2deriv)'], {}), '((tphi2deriv - mphiforcederivphi) / tphi2deriv)\n', (22887, 22934), False, 'import numpy\n'), ((24464, 24504), 'numpy.fabs', 'numpy.fabs', (['(tRphideriv - mRforcederivphi)'], {}), '(tRphideriv - mRforcederivphi)\n', (24474, 24504), False, 'import numpy\n'), ((24504, 24559), 'numpy.fabs', 'numpy.fabs', (['((tRphideriv - mRforcederivphi) / tRphideriv)'], {}), '((tRphideriv - mRforcederivphi) / tRphideriv)\n', (24514, 24559), False, 'import numpy\n'), ((24850, 24890), 'numpy.fabs', 'numpy.fabs', (['(tRphideriv - mRforcederivphi)'], {}), '(tRphideriv - mRforcederivphi)\n', (24860, 24890), False, 'import numpy\n'), ((24890, 24945), 'numpy.fabs', 'numpy.fabs', (['((tRphideriv - mRforcederivphi) / tRphideriv)'], {}), '((tRphideriv - mRforcederivphi) / tRphideriv)\n', (24900, 24945), False, 'import numpy\n')] |
"""
Orbit discipline for CADRE
"""
from math import sqrt
from six.moves import range
import numpy as np
from openmdao.api import ExplicitComponent
from CADRE import rk4
# Constants
mu = 398600.44
Re = 6378.137
J2 = 1.08264e-3
J3 = -2.51e-6
J4 = -1.60e-6
C1 = -mu
C2 = -1.5*mu*J2*Re**2
C3 = -2.5*mu*J3*Re**3
C4 = 1.875*mu*J4*Re**4
class Orbit_Dynamics(rk4.RK4):
"""
Computes the Earth to body position vector in Earth-centered intertial frame.
"""
def __init__(self, n_times, h):
super(Orbit_Dynamics, self).__init__(n_times, h)
self.n_times = n_times
self.options['state_var'] = 'r_e2b_I'
self.options['init_state_var'] = 'r_e2b_I0'
def setup(self):
self.add_input('r_e2b_I0', np.zeros((6, )), units=None, # fd_step=1e-2,
desc='Initial position and velocity vectors from earth to '
'satellite in Earth-centered inertial frame')
self.add_output('r_e2b_I', 1000.0*np.ones((6, self.n_times)), units=None,
desc='Position and velocity vectors from earth to satellite '
'in Earth-centered inertial frame over time')
self.dfdx = np.zeros((6, 1))
def f_dot(self, external, state):
x = state[0]
y = state[1]
z = state[2] if abs(state[2]) > 1e-15 else 1e-5
z2 = z*z
z3 = z2*z
z4 = z3*z
r = sqrt(x*x + y*y + z2)
r2 = r*r
r3 = r2*r
r4 = r3*r
r5 = r4*r
r7 = r5*r*r
T2 = 1 - 5*z2/r2
T3 = 3*z - 7*z3/r2
T4 = 1 - 14*z2/r2 + 21*z4/r4
T3z = 3*z - 0.6*r2/z
T4z = 4 - 28.0/3.0*z2/r2
f_dot = np.zeros((6,))
f_dot[0:3] = state[3:]
f_dot[3:] = state[0:3]*(C1/r3 + C2/r5*T2 + C3/r7*T3 + C4/r7*T4)
f_dot[5] += z*(2.0*C2/r5 + C3/r7*T3z + C4/r7*T4z)
return f_dot
def df_dy(self, external, state):
x = state[0]
y = state[1]
z = state[2] if abs(state[2]) > 1e-15 else 1e-5
z2 = z*z
z3 = z2*z
z4 = z3*z
r = sqrt(x*x + y*y + z2)
r2 = r*r
r3 = r2*r
r4 = r3*r
r5 = r4*r
r6 = r5*r
r7 = r6*r
r8 = r7*r
dr = np.array([x, y, z])/r
T2 = 1 - 5*z2/r2
T3 = 3*z - 7*z3/r2
T4 = 1 - 14*z2/r2 + 21*z4/r4
T3z = 3*z - 0.6*r2/z
T4z = 4 - 28.0/3.0*z2/r2
dT2 = (10*z2)/(r3)*dr
dT2[2] -= 10.*z/r2
dT3 = 14*z3/r3*dr
dT3[2] -= 21.*z2/r2 - 3
dT4 = (28*z2/r3 - 84.*z4/r5)*dr
dT4[2] -= 28*z/r2 - 84*z3/r4
dT3z = -1.2*r/z*dr
dT3z[2] += 0.6*r2/z2 + 3
dT4z = 56.0/3.0*z2/r3*dr
dT4z[2] -= 56.0/3.0*z/r2
eye = np.identity(3)
dfdy = np.zeros((6, 6))
dfdy[0:3, 3:] += eye
dfdy[3:, :3] += eye*(C1/r3 + C2/r5*T2 + C3/r7*T3 + C4/r7*T4)
fact = (-3*C1/r4 - 5*C2/r6*T2 - 7*C3/r8*T3 - 7*C4/r8*T4)
dfdy[3:, 0] += dr[0]*state[:3]*fact
dfdy[3:, 1] += dr[1]*state[:3]*fact
dfdy[3:, 2] += dr[2]*state[:3]*fact
dfdy[3:, 0] += state[:3]*(C2/r5*dT2[0] + C3/r7*dT3[0] + C4/r7*dT4[0])
dfdy[3:, 1] += state[:3]*(C2/r5*dT2[1] + C3/r7*dT3[1] + C4/r7*dT4[1])
dfdy[3:, 2] += state[:3]*(C2/r5*dT2[2] + C3/r7*dT3[2] + C4/r7*dT4[2])
dfdy[5, :3] += dr*z*(-5*C2/r6*2 - 7*C3/r8*T3z - 7*C4/r8*T4z)
dfdy[5, :3] += z*(C3/r7*dT3z + C4/r7*dT4z)
dfdy[5, 2] += (C2/r5*2 + C3/r7*T3z + C4/r7*T4z)
return dfdy
def df_dx(self, external, state):
return self.dfdx
class Orbit_Initial(ExplicitComponent):
"""
Computes initial position and velocity vectors of Earth to body position
"""
def setup(self):
# Inputs
self.add_input('altPerigee', 500.)
self.add_input('altApogee', 500.)
self.add_input('RAAN', 66.279)
self.add_input('Inc', 82.072)
self.add_input('argPerigee', 0.0)
self.add_input('trueAnomaly', 337.987)
# Outputs
self.add_output('r_e2b_I0', np.ones((6,)), units=None,
desc='Initial position and velocity vectors from Earth '
'to satellite in Earth-centered inertial frame')
self.declare_partials('*', '*')
def compute_rv(self, altPerigee, altApogee, RAAN, Inc, argPerigee, trueAnomaly):
"""
Compute position and velocity from orbital elements
"""
Re = 6378.137
mu = 398600.44
def S(v):
S = np.zeros((3, 3), complex)
S[0, :] = [0, -v[2], v[1]]
S[1, :] = [v[2], 0, -v[0]]
S[2, :] = [-v[1], v[0], 0]
return S
def getRotation(axis, angle):
R = np.eye(3, dtype=complex) + S(axis)*np.sin(angle) + \
(1 - np.cos(angle)) * (np.outer(axis, axis) - np.eye(3, dtype=complex))
return R
d2r = np.pi/180.0
r_perigee = Re + altPerigee
r_apogee = Re + altApogee
e = (r_apogee-r_perigee)/(r_apogee+r_perigee)
a = (r_perigee+r_apogee)/2
p = a*(1-e**2)
# h = np.sqrt(p*mu)
rmag0 = p/(1+e*np.cos(d2r*trueAnomaly))
r0_P = np.array([rmag0*np.cos(d2r*trueAnomaly),
rmag0*np.sin(d2r*trueAnomaly),
0], complex)
v0_P = np.array([-np.sqrt(mu/p)*np.sin(d2r*trueAnomaly),
np.sqrt(mu/p)*(e+np.cos(d2r*trueAnomaly)),
0], complex)
O_IP = np.eye(3, dtype=complex)
O_IP = np.dot(O_IP, getRotation(np.array([0, 0, 1]), RAAN*d2r))
O_IP = np.dot(O_IP, getRotation(np.array([1, 0, 0]), Inc*d2r))
O_IP = np.dot(O_IP, getRotation(np.array([0, 0, 1]), argPerigee*d2r))
r0_ECI = np.dot(O_IP, r0_P)
v0_ECI = np.dot(O_IP, v0_P)
return r0_ECI, v0_ECI
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
r0_ECI, v0_ECI = self.compute_rv(inputs['altPerigee'], inputs['altApogee'],
inputs['RAAN'], inputs['Inc'], inputs['argPerigee'],
inputs['trueAnomaly'])
outputs['r_e2b_I0'][:3] = r0_ECI.real
outputs['r_e2b_I0'][3:] = v0_ECI.real
def compute_partials(self, inputs, J):
"""
Calculate and save derivatives. (i.e., Jacobian).
"""
h = 1e-16
ih = complex(0, h)
v = np.zeros(6, complex)
v[:] = [inputs['altPerigee'], inputs['altApogee'], inputs['RAAN'],
inputs['Inc'], inputs['argPerigee'], inputs['trueAnomaly']]
jacs = np.zeros((6, 6))
# Find derivatives by complex step.
for i in range(6):
v[i] += ih
r0_ECI, v0_ECI = self.compute_rv(v[0], v[1], v[2], v[3], v[4], v[5])
v[i] -= ih
jacs[:3, i] = r0_ECI.imag/h
jacs[3:, i] = v0_ECI.imag/h
J['r_e2b_I0', 'altPerigee'] = jacs[:, 0]
J['r_e2b_I0', 'altApogee'] = jacs[:, 1]
J['r_e2b_I0', 'RAAN'] = jacs[:, 2]
J['r_e2b_I0', 'Inc'] = jacs[:, 3]
J['r_e2b_I0', 'argPerigee'] = jacs[:, 4]
J['r_e2b_I0', 'trueAnomaly'] = jacs[:, 5]
| [
"numpy.outer",
"numpy.eye",
"math.sqrt",
"six.moves.range",
"numpy.zeros",
"numpy.identity",
"numpy.ones",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] | [((1217, 1233), 'numpy.zeros', 'np.zeros', (['(6, 1)'], {}), '((6, 1))\n', (1225, 1233), True, 'import numpy as np\n'), ((1439, 1463), 'math.sqrt', 'sqrt', (['(x * x + y * y + z2)'], {}), '(x * x + y * y + z2)\n', (1443, 1463), False, 'from math import sqrt\n'), ((1721, 1735), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (1729, 1735), True, 'import numpy as np\n'), ((2124, 2148), 'math.sqrt', 'sqrt', (['(x * x + y * y + z2)'], {}), '(x * x + y * y + z2)\n', (2128, 2148), False, 'from math import sqrt\n'), ((2797, 2811), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (2808, 2811), True, 'import numpy as np\n'), ((2828, 2844), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (2836, 2844), True, 'import numpy as np\n'), ((5598, 5622), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'complex'}), '(3, dtype=complex)\n', (5604, 5622), True, 'import numpy as np\n'), ((5862, 5880), 'numpy.dot', 'np.dot', (['O_IP', 'r0_P'], {}), '(O_IP, r0_P)\n', (5868, 5880), True, 'import numpy as np\n'), ((5898, 5916), 'numpy.dot', 'np.dot', (['O_IP', 'v0_P'], {}), '(O_IP, v0_P)\n', (5904, 5916), True, 'import numpy as np\n'), ((6557, 6577), 'numpy.zeros', 'np.zeros', (['(6)', 'complex'], {}), '(6, complex)\n', (6565, 6577), True, 'import numpy as np\n'), ((6744, 6760), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (6752, 6760), True, 'import numpy as np\n'), ((6823, 6831), 'six.moves.range', 'range', (['(6)'], {}), '(6)\n', (6828, 6831), False, 'from six.moves import range\n'), ((749, 763), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (757, 763), True, 'import numpy as np\n'), ((2285, 2304), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (2293, 2304), True, 'import numpy as np\n'), ((4117, 4130), 'numpy.ones', 'np.ones', (['(6,)'], {}), '((6,))\n', (4124, 4130), True, 'import numpy as np\n'), ((4594, 4619), 'numpy.zeros', 'np.zeros', (['(3, 3)', 'complex'], {}), '((3, 3), complex)\n', (4602, 4619), True, 'import numpy as np\n'), ((995, 1021), 'numpy.ones', 'np.ones', (['(6, self.n_times)'], {}), '((6, self.n_times))\n', (1002, 1021), True, 'import numpy as np\n'), ((5663, 5682), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (5671, 5682), True, 'import numpy as np\n'), ((5735, 5754), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (5743, 5754), True, 'import numpy as np\n'), ((5806, 5825), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (5814, 5825), True, 'import numpy as np\n'), ((4813, 4837), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'complex'}), '(3, dtype=complex)\n', (4819, 4837), True, 'import numpy as np\n'), ((5236, 5261), 'numpy.cos', 'np.cos', (['(d2r * trueAnomaly)'], {}), '(d2r * trueAnomaly)\n', (5242, 5261), True, 'import numpy as np\n'), ((5292, 5317), 'numpy.cos', 'np.cos', (['(d2r * trueAnomaly)'], {}), '(d2r * trueAnomaly)\n', (5298, 5317), True, 'import numpy as np\n'), ((5348, 5373), 'numpy.sin', 'np.sin', (['(d2r * trueAnomaly)'], {}), '(d2r * trueAnomaly)\n', (5354, 5373), True, 'import numpy as np\n'), ((5451, 5476), 'numpy.sin', 'np.sin', (['(d2r * trueAnomaly)'], {}), '(d2r * trueAnomaly)\n', (5457, 5476), True, 'import numpy as np\n'), ((5501, 5516), 'numpy.sqrt', 'np.sqrt', (['(mu / p)'], {}), '(mu / p)\n', (5508, 5516), True, 'import numpy as np\n'), ((4848, 4861), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4854, 4861), True, 'import numpy as np\n'), ((4887, 4900), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4893, 4900), True, 'import numpy as np\n'), ((4905, 4925), 'numpy.outer', 'np.outer', (['axis', 'axis'], {}), '(axis, axis)\n', (4913, 4925), True, 'import numpy as np\n'), ((4928, 4952), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'complex'}), '(3, dtype=complex)\n', (4934, 4952), True, 'import numpy as np\n'), ((5437, 5452), 'numpy.sqrt', 'np.sqrt', (['(mu / p)'], {}), '(mu / p)\n', (5444, 5452), True, 'import numpy as np\n'), ((5518, 5543), 'numpy.cos', 'np.cos', (['(d2r * trueAnomaly)'], {}), '(d2r * trueAnomaly)\n', (5524, 5543), True, 'import numpy as np\n')] |
import logging
import numpy as np
import tensorflow as tf
from debias.datasets.training_data_loader import PREMISE_KEY
from debias.utils import py_utils, configured, ops
def build_epoch_fn(lst, sample=None, shuffle=False):
"""Build a function to return `lst` after sampling/shuffling"""
if sample:
def get():
ix = np.random.choice(len(lst), sample, replace=False)
if not shuffle:
ix.sort()
return [lst[i] for i in ix]
elif shuffle:
def get():
cpy = list(lst)
np.random.shuffle(cpy)
return cpy
else:
get = lambda: lst
return get
def build_stratified_epoch_fn(lst, n_groups):
"""Build a function to return `lst` after doing a stratified shuffle
Assuming the data is sorted by a per-example score, the data will yield examples
with scores that are deliberately spread out
We used this for some of the QA dataset so its preserved here for exactness,
although I *think* it doesn't really make a difference
"""
# Split lst into group, assuming lst is sorted by the score we are stratifying on,
# each group will contain examples with a similar score
groups = py_utils.split(lst, n_groups)
def build():
local_groups = [list(x) for x in groups]
for group in local_groups:
# Shuffle the individual groups
np.random.shuffle(group)
# Merge the groups
out = []
while local_groups:
for group in local_groups:
out.append(group.pop())
local_groups = [x for x in local_groups if len(x) > 0]
return out
return build
class QuantileBatcher(configured.Configured):
"""Batch a dataset by keeping a histogram of example lengths, and
batching together examples whose lengths are in the same quantile"""
def __init__(self, batch_size, hist_min, hist_max, hist_step, n_buckets):
self.batch_size = batch_size
self.hist_min = hist_min
self.hist_max = hist_max
self.hist_step = hist_step
self.n_buckets = n_buckets
def batch(self, dataset: tf.data.Dataset) -> tf.data.Dataset:
bounds = list(range(self.hist_min, self.hist_max, self.hist_step))
logging.info(
"Quantile bucketing from %d-%d with %d buckets" %
(bounds[0], bounds[-1], len(bounds)))
return dataset.apply(ops.bucket_by_quantiles(
len_fn=lambda x: tf.shape(x[PREMISE_KEY])[0],
batch_size=self.batch_size,
n_buckets=self.n_buckets,
hist_bounds=bounds
))
| [
"debias.utils.py_utils.split",
"tensorflow.shape",
"numpy.random.shuffle"
] | [((1156, 1185), 'debias.utils.py_utils.split', 'py_utils.split', (['lst', 'n_groups'], {}), '(lst, n_groups)\n', (1170, 1185), False, 'from debias.utils import py_utils, configured, ops\n'), ((1322, 1346), 'numpy.random.shuffle', 'np.random.shuffle', (['group'], {}), '(group)\n', (1339, 1346), True, 'import numpy as np\n'), ((522, 544), 'numpy.random.shuffle', 'np.random.shuffle', (['cpy'], {}), '(cpy)\n', (539, 544), True, 'import numpy as np\n'), ((2318, 2342), 'tensorflow.shape', 'tf.shape', (['x[PREMISE_KEY]'], {}), '(x[PREMISE_KEY])\n', (2326, 2342), True, 'import tensorflow as tf\n')] |
import os
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import json
from itertools import cycle
import seaborn as sns
import matplotlib.gridspec as gridspec
import copy
#USAGE: python .\DL_Layer_Analysis\plot_DL_json_results.py --main_dir RFWFC\results\mnist
def get_args():
parser = argparse.ArgumentParser(description='Network Smoothness Script')
parser.add_argument('--main_dir', type=str ,help='Results folder', default=None)
parser.add_argument('--checkpoints','--list', nargs='+', default=None)
args = parser.parse_args()
return args
def plot_layers(main_dir, checkpoints=None, plot_test=True, add_fill=False, remove_layers=0, \
use_clustering=False, remove_begin=0):
if plot_test:
if not use_clustering:
fig, axes = plt.subplots(1, 2)
gs = gridspec.GridSpec(1, 2, width_ratios=[10, 1])
axes = [None, None]
else:
fig, axes = plt.subplots(1, 3, figsize=(12, 10))
gs = gridspec.GridSpec(1, 3, width_ratios=[10, 1, 10])
axes = [None, None, None]
axes[2] = plt.subplot(gs[2])
axes[0] = plt.subplot(gs[0])
axes[1] = plt.subplot(gs[1])
axes[0].set_ylabel(r'$\alpha$'+'-score')
axes[1].set_xticks([])
axes[0].set_xticks([-1, 0, 1, 2, 3, 4, 5, 6] )
axes[1].set_ylabel('Test Accuracy')
axes[0].set_title(f"Comparing " + r'$\alpha$' + "-scores")
axes[0].set_xlabel("Layers")
axes[1].set_title("Test Accuracy Scores")
if use_clustering:
axes[2].set_title("Clustering Statistics")
axes[2].set_xlabel("Layers")
axes[2].set_title("Clustering Metrics")
else:
fig, axes = plt.subplots(1, 1)
axes = [axes]
if checkpoints is not None:
file_paths = checkpoints
else:
file_paths = list(Path(main_dir).glob('**/*.json'))
file_paths = [str(k) for k in file_paths]
try:
file_paths.sort(key=lambda x: str(x).split('/')[-5])
except Exception as e:
print(f"error:{e}")
clustering_stats = None
colors = sns.color_palette("pastel", 20)
test_results = []
handles = []
width = 0.25
for idx, file_path in enumerate(file_paths):
file_path = str(file_path)
epoch = str(file_path).split('/')[-2]
with open(file_path, "r+") as f:
result = json.load(f)
sizes = result["sizes"]
alphas = result["alphas"]
if remove_layers > 0:
sizes, alphas = sizes[:-remove_layers], alphas[:-remove_layers]
if remove_begin > 0:
sizes, alphas = sizes[remove_begin:], alphas[remove_begin:]
test_stats = None
if 'test_stats' in result:
test_stats = result['test_stats']
if 'clustering_stats' in result:
clustering_stats = result['clustering_stats']
if add_fill:
axes[0].fill_between(sizes, [k[0] for k in alphas], [k[-1] for k in alphas], \
alpha=0.2, linewidth=4)
values = [np.array(k).mean() for k in alphas]
print(values)
axes[0].plot(sizes, values, label=epoch, color=colors[idx%len(colors)])
if test_stats is not None and plot_test:
test_results.append([test_stats['top_1_accuracy']])
axes[1].bar(idx*width, [test_stats['top_1_accuracy']], width, \
label=str(epoch), color=colors[idx%len(colors)])
lines = ["-","--","-.",":", "-*", "-+"]
linecycler = cycle(lines)
if use_clustering:
if clustering_stats is not None and plot_test:
keys = sorted(list(clustering_stats.keys()))
if len(keys) == 0:
continue
stat_names = clustering_stats[list(keys)[0]].keys()
for chosen_stat in stat_names:
values = [clustering_stats[k][chosen_stat] for k in keys]
if idx == 0:
h, = axes[2].plot(keys, values, next(linecycler), color=colors[idx], label=f"{chosen_stat}")
handles.append(copy.copy(h))
else:
axes[2].plot(keys, values, next(linecycler), color=colors[idx])
axes[0].legend()
for h in handles:
h.set_color("black")
if use_clustering:
axes[2].legend(handles=handles)
plt.show()
if __name__ == '__main__':
args = get_args()
plot_epochs(args.main_dir, args.checkpoints, plot_test=True, add_fill=True) | [
"matplotlib.pyplot.subplot",
"json.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"copy.copy",
"pathlib.Path",
"numpy.array",
"seaborn.color_palette",
"itertools.cycle",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots"
] | [((343, 407), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Network Smoothness Script"""'}), "(description='Network Smoothness Script')\n", (366, 407), False, 'import argparse\n'), ((1953, 1984), 'seaborn.color_palette', 'sns.color_palette', (['"""pastel"""', '(20)'], {}), "('pastel', 20)\n", (1970, 1984), True, 'import seaborn as sns\n'), ((3866, 3876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3874, 3876), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1105), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (1098, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1120, 1138), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (1131, 1138), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1623), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1617, 1623), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3187), 'itertools.cycle', 'cycle', (['lines'], {}), '(lines)\n', (3180, 3187), False, 'from itertools import cycle\n'), ((796, 814), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (808, 814), True, 'import matplotlib.pyplot as plt\n'), ((823, 868), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'width_ratios': '[10, 1]'}), '(1, 2, width_ratios=[10, 1])\n', (840, 868), True, 'import matplotlib.gridspec as gridspec\n'), ((915, 951), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(12, 10)'}), '(1, 3, figsize=(12, 10))\n', (927, 951), True, 'import matplotlib.pyplot as plt\n'), ((960, 1009), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {'width_ratios': '[10, 1, 10]'}), '(1, 3, width_ratios=[10, 1, 10])\n', (977, 1009), True, 'import matplotlib.gridspec as gridspec\n'), ((1053, 1071), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (1064, 1071), True, 'import matplotlib.pyplot as plt\n'), ((2207, 2219), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2216, 2219), False, 'import json\n'), ((1724, 1738), 'pathlib.Path', 'Path', (['main_dir'], {}), '(main_dir)\n', (1728, 1738), False, 'from pathlib import Path\n'), ((2766, 2777), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (2774, 2777), True, 'import numpy as np\n'), ((3653, 3665), 'copy.copy', 'copy.copy', (['h'], {}), '(h)\n', (3662, 3665), False, 'import copy\n')] |
import os
import re
import numpy as np
from keras import Input, Model
from keras.layers import Embedding
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
import tensorflow as tf
from autokeras.constant import Constant
from autokeras.utils import download_file_with_extract
import GPUtil
from keras import backend as K
def download_pre_train(file_path, extract_path):
file_link = Constant.PRE_TRAIN_FILE_LINK
print("try downloading pre train weights from link %s" % file_link)
download_file_with_extract(file_link, file_path=file_path, extract_path=extract_path)
def clean_str(string):
"""
Tokenization/string cleaning for all string.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def tokenlize_text(max_num_words, max_seq_length, x_train):
"""Tokenlize text class.
Vectorize a text corpus by transform each text in texts to a sequence of integers.
Attributes:
max_num_words: int, max number of words in the dictionary
max_seq_length: int, the length of each text sequence, padding if shorter, trim is longer
x_train: list contains text data
y_train: list contains label data
"""
print("tokenlizing texts...")
tokenizer = Tokenizer(num_words=max_num_words)
tokenizer.fit_on_texts(x_train)
sequences = tokenizer.texts_to_sequences(x_train)
word_index = tokenizer.word_index
x_train = pad_sequences(sequences, maxlen=max_seq_length)
print("data readed and convert to %d length sequences" % max_seq_length)
return x_train, word_index
def read_embedding_index(extract_path):
embedding_index = {}
f = open(os.path.join(extract_path, Constant.PRE_TRAIN_FILE_NAME))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_index[word] = coefs
f.close()
return embedding_index
def load_pretrain(path, word_index):
print("loading pretrain weights...")
file_path = os.path.join(path, Constant.FILE_PATH)
extract_path = os.path.join(path, Constant.EXTRACT_PATH)
download_pre_train(file_path=file_path, extract_path=extract_path)
embedding_index = read_embedding_index(extract_path)
print('Total %s word vectors embedded.' % len(embedding_index))
# convert the pretrained embedding index to weights
embedding_matrix = np.random.random((len(word_index) + 1, Constant.EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def processing(path, word_index, input_length, x_train):
embedding_matrix = load_pretrain(path=path, word_index=word_index)
# Get the first available GPU
DEVICE_ID_LIST = GPUtil.getFirstAvailable()
DEVICE_ID = DEVICE_ID_LIST[0] # grab first element from list
# Set CUDA_VISIBLE_DEVICES to mask out all other GPUs than the first available device id
os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID)
device = '/gpu:0'
with tf.device(device):
config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
print("generating preprocessing model...")
embedding_layer = Embedding(len(word_index) + 1,
Constant.EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=input_length,
trainable=False)
sequence_input = Input(shape=(input_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
model = Model(sequence_input, embedded_sequences)
print("converting text to vector...")
x_train = model.predict(x_train)
del model
x_train = np.expand_dims(x_train, -1)
return x_train
def text_preprocess(x_train, path):
x_train = [clean_str(x) for x in x_train]
x_train, word_index = tokenlize_text(max_seq_length=Constant.MAX_SEQUENCE_LENGTH,
max_num_words=Constant.MAX_NB_WORDS,
x_train=x_train)
print("generating preprocessing model...")
x_train = processing(path=path, word_index=word_index, input_length=Constant.MAX_SEQUENCE_LENGTH, x_train=x_train)
return x_train
| [
"GPUtil.getFirstAvailable",
"keras.Input",
"keras_preprocessing.sequence.pad_sequences",
"autokeras.utils.download_file_with_extract",
"keras.Model",
"numpy.asarray",
"tensorflow.device",
"tensorflow.Session",
"numpy.expand_dims",
"keras.backend.set_session",
"tensorflow.ConfigProto",
"os.path... | [((545, 635), 'autokeras.utils.download_file_with_extract', 'download_file_with_extract', (['file_link'], {'file_path': 'file_path', 'extract_path': 'extract_path'}), '(file_link, file_path=file_path, extract_path=\n extract_path)\n', (571, 635), False, 'from autokeras.utils import download_file_with_extract\n'), ((826, 872), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', string)\n', (832, 872), False, 'import re\n'), ((885, 914), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (891, 914), False, 'import re\n'), ((929, 960), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (935, 960), False, 'import re\n'), ((975, 1006), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (981, 1006), False, 'import re\n'), ((1021, 1052), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (1027, 1052), False, 'import re\n'), ((1067, 1096), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (1073, 1096), False, 'import re\n'), ((1111, 1142), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (1117, 1142), False, 'import re\n'), ((1157, 1183), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (1163, 1183), False, 'import re\n'), ((1198, 1224), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (1204, 1224), False, 'import re\n'), ((1239, 1269), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (1245, 1269), False, 'import re\n'), ((1282, 1312), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (1288, 1312), False, 'import re\n'), ((1325, 1355), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (1331, 1355), False, 'import re\n'), ((1368, 1398), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (1374, 1398), False, 'import re\n'), ((1934, 1968), 'keras_preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'max_num_words'}), '(num_words=max_num_words)\n', (1943, 1968), False, 'from keras_preprocessing.text import Tokenizer\n'), ((2111, 2158), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_seq_length'}), '(sequences, maxlen=max_seq_length)\n', (2124, 2158), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((2710, 2748), 'os.path.join', 'os.path.join', (['path', 'Constant.FILE_PATH'], {}), '(path, Constant.FILE_PATH)\n', (2722, 2748), False, 'import os\n'), ((2768, 2809), 'os.path.join', 'os.path.join', (['path', 'Constant.EXTRACT_PATH'], {}), '(path, Constant.EXTRACT_PATH)\n', (2780, 2809), False, 'import os\n'), ((3548, 3574), 'GPUtil.getFirstAvailable', 'GPUtil.getFirstAvailable', ([], {}), '()\n', (3572, 3574), False, 'import GPUtil\n'), ((4699, 4726), 'numpy.expand_dims', 'np.expand_dims', (['x_train', '(-1)'], {}), '(x_train, -1)\n', (4713, 4726), True, 'import numpy as np\n'), ((2347, 2403), 'os.path.join', 'os.path.join', (['extract_path', 'Constant.PRE_TRAIN_FILE_NAME'], {}), '(extract_path, Constant.PRE_TRAIN_FILE_NAME)\n', (2359, 2403), False, 'import os\n'), ((2495, 2534), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (2505, 2534), True, 'import numpy as np\n'), ((3822, 3839), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (3831, 3839), True, 'import tensorflow as tf\n'), ((3858, 3926), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(True)', 'allow_soft_placement': '(True)'}), '(log_device_placement=True, allow_soft_placement=True)\n', (3872, 3926), True, 'import tensorflow as tf\n'), ((3989, 4014), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3999, 4014), True, 'import tensorflow as tf\n'), ((4023, 4042), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (4036, 4042), True, 'from keras import backend as K\n'), ((4417, 4460), 'keras.Input', 'Input', ([], {'shape': '(input_length,)', 'dtype': '"""int32"""'}), "(shape=(input_length,), dtype='int32')\n", (4422, 4460), False, 'from keras import Input, Model\n'), ((4538, 4579), 'keras.Model', 'Model', (['sequence_input', 'embedded_sequences'], {}), '(sequence_input, embedded_sequences)\n', (4543, 4579), False, 'from keras import Input, Model\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 13:02:00 2019
@author: jaehooncha
@email: <EMAIL>
"""
import tensorflow as tf
import numpy as np
def cycle_fn(iteration, base_lr, max_lr, stepsize):
cycle = np.floor(1+iteration/(2*stepsize))
x = np.abs(iteration/stepsize - 2*cycle +1)
lr = base_lr + (max_lr - base_lr)*np.maximum(0, (1-x))
return np.float32(lr)
def cycle_lr(base_lr, max_lr, iter_in_batch, epoch_for_cycle, ratio, total_epochs):
iteration = 0;
Lr = [];
stepsize = (iter_in_batch*epoch_for_cycle)/2.
for i in range(total_epochs):
for j in range(iter_in_batch):
Lr.append(cycle_fn(iteration, base_lr = base_lr,
max_lr = max_lr, stepsize = stepsize))
iteration+=1
final_iter = np.int((total_epochs/epoch_for_cycle)*stepsize*2*ratio)
Lr = np.array(Lr)
Lr[final_iter:] = base_lr*0.01
return Lr
def step_lr(lr_set, iter_in_epoch, total_epochs):
Lr = []
n_lr = len(lr_set)
for i in range(n_lr - 1):
sub_lr = [lr_set[i]] * int(total_epochs/n_lr)*iter_in_epoch
Lr += sub_lr
sub_lr = [lr_set[-1]]* int(total_epochs/n_lr)*iter_in_epoch*2
Lr += sub_lr
Lr = np.array(Lr)
return Lr
def Batch_norm(x, training, name = None, reuse = None):
return tf.contrib.layers.batch_norm(inputs=x,
decay=0.997, epsilon=1e-5,
center=True, scale=True, updates_collections=None,
is_training=training, fused=True, reuse = reuse,
scope = name)
| [
"numpy.abs",
"numpy.maximum",
"numpy.floor",
"numpy.float32",
"tensorflow.contrib.layers.batch_norm",
"numpy.array",
"numpy.int"
] | [((214, 254), 'numpy.floor', 'np.floor', (['(1 + iteration / (2 * stepsize))'], {}), '(1 + iteration / (2 * stepsize))\n', (222, 254), True, 'import numpy as np\n'), ((257, 301), 'numpy.abs', 'np.abs', (['(iteration / stepsize - 2 * cycle + 1)'], {}), '(iteration / stepsize - 2 * cycle + 1)\n', (263, 301), True, 'import numpy as np\n'), ((367, 381), 'numpy.float32', 'np.float32', (['lr'], {}), '(lr)\n', (377, 381), True, 'import numpy as np\n'), ((794, 855), 'numpy.int', 'np.int', (['(total_epochs / epoch_for_cycle * stepsize * 2 * ratio)'], {}), '(total_epochs / epoch_for_cycle * stepsize * 2 * ratio)\n', (800, 855), True, 'import numpy as np\n'), ((859, 871), 'numpy.array', 'np.array', (['Lr'], {}), '(Lr)\n', (867, 871), True, 'import numpy as np\n'), ((1218, 1230), 'numpy.array', 'np.array', (['Lr'], {}), '(Lr)\n', (1226, 1230), True, 'import numpy as np\n'), ((1313, 1499), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', ([], {'inputs': 'x', 'decay': '(0.997)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'updates_collections': 'None', 'is_training': 'training', 'fused': '(True)', 'reuse': 'reuse', 'scope': 'name'}), '(inputs=x, decay=0.997, epsilon=1e-05, center=\n True, scale=True, updates_collections=None, is_training=training, fused\n =True, reuse=reuse, scope=name)\n', (1341, 1499), True, 'import tensorflow as tf\n'), ((335, 355), 'numpy.maximum', 'np.maximum', (['(0)', '(1 - x)'], {}), '(0, 1 - x)\n', (345, 355), True, 'import numpy as np\n')] |
import argparse
import logging
import sys
import time
import math
from torchvision import datasets, transforms
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch.utils.data import Dataset
from torch.autograd import Variable
from sklearn.metrics import roc_auc_score, f1_score, roc_curve
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import os
from models import *
from utils import *
from PIL import Image
def get_args():
parser = argparse.ArgumentParser()
#parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--model_name', type=str, default='PreActResNet18')
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--dataset', default='CIFAR-10', type=str)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--load_epoch', default=101, type=int)
parser.add_argument('--evalset', default='test', choices=['test'])
parser.add_argument('--target', action='store_true') # whether use target-mode attack
parser.add_argument('--ConfidenceOnly', action='store_true')
parser.add_argument('--AuxiliaryOnly', action='store_true')
# two branch
parser.add_argument('--twobranch', action='store_true')
parser.add_argument('--out_dim', default=10, type=int)
parser.add_argument('--useBN', action='store_true')
parser.add_argument('--along', action='store_true')
parser.add_argument('--selfreweightCalibrate', action='store_true') # Calibrate
parser.add_argument('--selfreweightSelectiveNet', action='store_true')
parser.add_argument('--selfreweightATRO', action='store_true')
parser.add_argument('--selfreweightCARL', action='store_true')
parser.add_argument('--lossversion', default='onehot', choices=['onehot', 'category'])
parser.add_argument('--tempC', default=1., type=float)
parser.add_argument('--evalonAA', action='store_true')# evaluate on AutoAttack
parser.add_argument('--evalonCWloss', action='store_true')# evaluate on PGD with CW loss
parser.add_argument('--evalonGAMA_FW', action='store_true')# evaluate on GAMA-FW
parser.add_argument('--evalonGAMA_PGD', action='store_true')# evaluate on GAMA-FW
parser.add_argument('--evalonMultitarget', action='store_true')# evaluate on GAMA-FW
return parser.parse_args()
# corruptes = ['brightness', 'elastic_transform', 'gaussian_blur', 'impulse_noise',
# 'motion_blur', 'shot_noise', 'speckle_noise', 'contrast', 'fog', 'gaussian_noise',
# 'jpeg_compression', 'pixelate', 'snow', 'zoom_blur', 'defocus_blur', 'frost', 'glass_blur',
# 'saturate', 'spatter']
corruptes = ['glass_blur', 'motion_blur', 'zoom_blur',
'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'jpeg_compression']
kwargs = {'num_workers': 4, 'pin_memory': True}
class CIFAR10_C(Dataset):
def __init__(self, root, name, transform=None, target_transform=None):
self.data = []
self.targets = []
self.transform = transform
self.target_transform = target_transform
assert name in corruptes
file_path = os.path.join(root, 'CIFAR10-C', name+'.npy')
lable_path = os.path.join(root, 'CIFAR10-C', 'labels.npy')
self.data = np.load(file_path)
self.targets = np.load(lable_path)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def eval_adv_test_general(args, model, name, logger):
"""
evaluate model by white-box attack
"""
# set up data loader
transform_test = transforms.Compose([transforms.ToTensor(),])
testset = CIFAR10_C(root='../cifar-data', name = name, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, **kwargs)
model.eval()
test_acc, test_n = 0, 0
test_classes_correct, test_classes_wrong = [], []
# record con
test_con_correct = []
test_con_wrong = []
# record evi
test_evi_correct = []
test_evi_wrong = []
for idx, (data, target) in enumerate(test_loader):
X, y = data.cuda(), target.long().cuda()
if args.twobranch:
output, output_aux = model(normalize(X))[0:2]
con_pre, _ = torch.softmax(output * args.tempC, dim=1).max(1) # predicted label and confidence
if args.selfreweightCalibrate:
output_aux = output_aux.sigmoid().squeeze()
test_evi_all = con_pre * output_aux
if args.ConfidenceOnly:
test_evi_all = con_pre
if args.AuxiliaryOnly:
test_evi_all = output_aux
elif args.selfreweightSelectiveNet:
test_evi_all = output_aux.sigmoid().squeeze()
elif args.selfreweightATRO:
test_evi_all = output_aux.tanh().squeeze()
elif args.selfreweightCARL:
output_all = torch.cat((output, output_aux), dim=1) # bs x 11 or bs x 101
softmax_output = F.softmax(output_all, dim=1)
test_evi_all = softmax_output[torch.tensor(range(X.size(0))), -1]
else:
output = model(normalize(X))
test_evi_all = output.logsumexp(dim=1)
output_s = F.softmax(output, dim=1)
out_con, out_pre = output_s.max(1)
# output labels
labels = torch.where(out_pre == y)[0]
labels_n = torch.where(out_pre != y)[0]
# ground labels
test_classes_correct += y[labels].tolist()
test_classes_wrong += y[labels_n].tolist()
# accuracy
test_acc += labels.size(0)
# confidence
test_con_correct += out_con[labels].tolist()
test_con_wrong += out_con[labels_n].tolist()
# evidence
test_evi_correct += test_evi_all[labels].tolist()
test_evi_wrong += test_evi_all[labels_n].tolist()
test_n += y.size(0)
# confidence
test_con_correct = torch.tensor(test_con_correct)
test_con_wrong = torch.tensor(test_con_wrong)
# evidence
test_evi_correct = torch.tensor(test_evi_correct)
test_evi_wrong = torch.tensor(test_evi_wrong)
test_acc = test_acc/test_n
print('### Basic statistics ###')
logger.info('Clean | acc: %.4f | con cor: %.3f (%.3f) | con wro: %.3f (%.3f) | evi cor: %.3f (%.3f) | evi wro: %.3f (%.3f)',
test_acc,
test_con_correct.mean().item(), test_con_correct.std().item(),
test_con_wrong.mean().item(), test_con_wrong.std().item(),
test_evi_correct.mean().item(), test_evi_correct.std().item(),
test_evi_wrong.mean().item(), test_evi_wrong.std().item())
print('')
print('### ROC-AUC scores (confidence) ###')
clean_clean = calculate_auc_scores(test_con_correct, test_con_wrong)
_, acc95 = calculate_FPR_TPR(test_con_correct, test_con_wrong, tpr_ref=0.95)
_, acc99 = calculate_FPR_TPR(test_con_correct, test_con_wrong, tpr_ref=0.99)
logger.info('clean_clean: %.3f',
clean_clean)
logger.info('TPR 95 clean acc: %.4f; 99 clean acc: %.4f',
acc95, acc99)
print('')
print('### ROC-AUC scores (evidence) ###')
clean_clean = calculate_auc_scores(test_evi_correct, test_evi_wrong)
_, acc95 = calculate_FPR_TPR(test_evi_correct, test_evi_wrong, tpr_ref=0.95)
_, acc99 = calculate_FPR_TPR(test_evi_correct, test_evi_wrong, tpr_ref=0.99)
logger.info('clean_clean: %.3f',
clean_clean)
logger.info('TPR 95 clean acc: %.4f; 99 clean acc: %.4f',
acc95, acc99)
def main():
args = get_args()
# define a logger
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log')),
logging.StreamHandler()
])
logger.info(args)
# set random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
num_cla = 10
if args.selfreweightCalibrate or args.selfreweightSelectiveNet or args.selfreweightCARL or args.selfreweightATRO:
along = True
args.out_dim = 1
# load pretrained model
if args.model_name == 'PreActResNet18':
model = PreActResNet18(num_classes=num_cla)
elif args.model_name == 'PreActResNet18_twobranch_DenseV1':
model = PreActResNet18_twobranch_DenseV1(num_classes=num_cla, out_dim=args.out_dim, use_BN=args.useBN, along=along)
elif args.model_name == 'WideResNet':
model = WideResNet(34, num_cla, widen_factor=10, dropRate=0.0)
elif args.model_name == 'WideResNet_twobranch_DenseV1':
model = WideResNet_twobranch_DenseV1(34, num_cla, widen_factor=10, dropRate=0.0, along=along, use_BN=args.useBN, out_dim=args.out_dim)
elif args.model_name == 'PreActResNet18_threebranch_DenseV1':
model = PreActResNet18_threebranch_DenseV1(num_classes=num_cla, out_dim=args.out_dim, use_BN=args.useBN, along=along)
elif args.model_name == 'WideResNet_threebranch_DenseV1':
model = WideResNet_threebranch_DenseV1(34, num_cla, widen_factor=10, dropRate=0.0, use_BN=args.useBN, along=along, out_dim=args.out_dim)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
if args.load_epoch > 0:
model_dict = torch.load(os.path.join(args.fname, f'model_{args.load_epoch}.pth'))
logger.info(f'Resuming at epoch {args.load_epoch}')
else:
model_dict = torch.load(os.path.join(args.fname, f'model_best.pth'))
logger.info(f'Resuming at best epoch')
if 'state_dict' in model_dict.keys():
model.load_state_dict(model_dict['state_dict'])
else:
model.load_state_dict(model_dict)
for name in corruptes:
print('')
print('')
print('====== test ' + name + ' =====')
eval_adv_test_general(args, model, name, logger)
if __name__ == "__main__":
main()
| [
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.where",
"torch.utils.data.cuda",
"torch.manual_seed",
"logging.StreamHandler",
"torch.cuda.manual_seed",
"torch.cat",
"logging.getLogger",
"torch.nn.functional.softmax",
"torch.softmax",
"PI... | [((542, 567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (565, 567), False, 'import argparse\n'), ((5173, 5250), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(testset, batch_size=128, shuffle=False, **kwargs)\n', (5200, 5250), False, 'import torch\n'), ((7470, 7500), 'torch.tensor', 'torch.tensor', (['test_con_correct'], {}), '(test_con_correct)\n', (7482, 7500), False, 'import torch\n'), ((7522, 7550), 'torch.tensor', 'torch.tensor', (['test_con_wrong'], {}), '(test_con_wrong)\n', (7534, 7550), False, 'import torch\n'), ((7590, 7620), 'torch.tensor', 'torch.tensor', (['test_evi_correct'], {}), '(test_evi_correct)\n', (7602, 7620), False, 'import torch\n'), ((7642, 7670), 'torch.tensor', 'torch.tensor', (['test_evi_wrong'], {}), '(test_evi_wrong)\n', (7654, 7670), False, 'import torch\n'), ((9187, 9214), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9204, 9214), False, 'import logging\n'), ((9538, 9563), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (9552, 9563), True, 'import numpy as np\n'), ((9568, 9596), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9585, 9596), False, 'import torch\n'), ((9601, 9634), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9623, 9634), False, 'import torch\n'), ((4014, 4060), 'os.path.join', 'os.path.join', (['root', '"""CIFAR10-C"""', "(name + '.npy')"], {}), "(root, 'CIFAR10-C', name + '.npy')\n", (4026, 4060), False, 'import os\n'), ((4080, 4125), 'os.path.join', 'os.path.join', (['root', '"""CIFAR10-C"""', '"""labels.npy"""'], {}), "(root, 'CIFAR10-C', 'labels.npy')\n", (4092, 4125), False, 'import os\n'), ((4146, 4164), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (4153, 4164), True, 'import numpy as np\n'), ((4188, 4207), 'numpy.load', 'np.load', (['lable_path'], {}), '(lable_path)\n', (4195, 4207), True, 'import numpy as np\n'), ((4591, 4611), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (4606, 4611), False, 'from PIL import Image\n'), ((6734, 6758), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (6743, 6758), True, 'import torch.nn.functional as F\n'), ((5045, 5066), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5064, 5066), False, 'from torchvision import datasets, transforms\n'), ((5558, 5569), 'torch.utils.data.cuda', 'data.cuda', ([], {}), '()\n', (5567, 5569), True, 'import torch.utils.data as data\n'), ((6844, 6869), 'torch.where', 'torch.where', (['(out_pre == y)'], {}), '(out_pre == y)\n', (6855, 6869), False, 'import torch\n'), ((6892, 6917), 'torch.where', 'torch.where', (['(out_pre != y)'], {}), '(out_pre != y)\n', (6903, 6917), False, 'import torch\n'), ((10911, 10933), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (10926, 10933), True, 'import torch.nn as nn\n'), ((11008, 11064), 'os.path.join', 'os.path.join', (['args.fname', 'f"""model_{args.load_epoch}.pth"""'], {}), "(args.fname, f'model_{args.load_epoch}.pth')\n", (11020, 11064), False, 'import os\n'), ((11168, 11211), 'os.path.join', 'os.path.join', (['args.fname', 'f"""model_best.pth"""'], {}), "(args.fname, f'model_best.pth')\n", (11180, 11211), False, 'import os\n'), ((9454, 9477), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (9475, 9477), False, 'import logging\n'), ((5703, 5744), 'torch.softmax', 'torch.softmax', (['(output * args.tempC)'], {'dim': '(1)'}), '(output * args.tempC, dim=1)\n', (5716, 5744), False, 'import torch\n'), ((9403, 9439), 'os.path.join', 'os.path.join', (['args.fname', '"""eval.log"""'], {}), "(args.fname, 'eval.log')\n", (9415, 9439), False, 'import os\n'), ((6390, 6428), 'torch.cat', 'torch.cat', (['(output, output_aux)'], {'dim': '(1)'}), '((output, output_aux), dim=1)\n', (6399, 6428), False, 'import torch\n'), ((6484, 6512), 'torch.nn.functional.softmax', 'F.softmax', (['output_all'], {'dim': '(1)'}), '(output_all, dim=1)\n', (6493, 6512), True, 'import torch.nn.functional as F\n')] |
import os
import numpy as np
from tf_lite.tf_lite import TFLiteModel
from tf_lite.ring_buffer import RingBuffer
class Filter:
def __init__(
self,
pre_emphasis: float = 0.0,
sample_rate: int = 16000,
fft_window_type: str = "hann",
fft_hop_length: int = 10,
model_dir: str = "",
) -> None:
self.pre_emphasis: float = pre_emphasis
self.hop_length: int = int(fft_hop_length * sample_rate / 1000)
if fft_window_type != "hann":
raise ValueError("Invalid fft_window_type")
self.filter_model: TFLiteModel = TFLiteModel(
model_path=os.path.join(model_dir, "filter.tflite")
)
# window size calculated based on fft
# the filter inputs are (fft_size - 1) / 2
# which makes the window size (post_fft_size - 1) * 2
self._window_size = (self.filter_model.input_details[0]["shape"][-1] - 1) * 2
self._fft_window = np.hanning(self._window_size)
# initialize sample buffer
self.sample_window: RingBuffer = RingBuffer(shape=[self._window_size])
self._prev_sample: float = 0.0
def filter_frame(self, frame) -> None:
# pull out a single value from the frame and apply pre-emphasis
# with the previous sample then cache the previous sample
# to be use in the next iteration
prev_sample = frame[-1]
frame -= self.pre_emphasis * np.append(self._prev_sample, frame[:-1])
self._prev_sample = prev_sample
# fill the sample window to analyze speech containing samples
# after each window fill the buffer advances by the hop length
# to produce an overlapping window
frame_features = []
for sample in frame:
self.sample_window.write(sample)
if self.sample_window.is_full:
features = self._analyze()
frame_features.append(features.squeeze())
self.sample_window.rewind().seek(self.hop_length)
return frame_features
def _analyze(self) -> None:
# read the full contents of the sample window to calculate a single frame
# of the STFT by applying the DFT to a real-valued input and
# taking the magnitude of the complex DFT
frame = self.sample_window.read_all()
frame = np.fft.rfft(frame * self._fft_window, n=self._window_size)
frame = np.abs(frame).astype(np.float32)
# compute mel spectrogram
return self._filter(frame)
def _filter(self, frame) -> None:
# add the batch dimension and compute the mel spectrogram with filter model
frame = np.expand_dims(frame, 0)
frame = self.filter_model(frame)[0]
return frame
def num_outputs(self) -> None:
# return number of output features from model
return self.filter_model.output_details[0]["shape"][-1]
| [
"numpy.fft.rfft",
"numpy.abs",
"os.path.join",
"numpy.expand_dims",
"numpy.append",
"numpy.hanning",
"tf_lite.ring_buffer.RingBuffer"
] | [((965, 994), 'numpy.hanning', 'np.hanning', (['self._window_size'], {}), '(self._window_size)\n', (975, 994), True, 'import numpy as np\n'), ((1072, 1109), 'tf_lite.ring_buffer.RingBuffer', 'RingBuffer', ([], {'shape': '[self._window_size]'}), '(shape=[self._window_size])\n', (1082, 1109), False, 'from tf_lite.ring_buffer import RingBuffer\n'), ((2348, 2406), 'numpy.fft.rfft', 'np.fft.rfft', (['(frame * self._fft_window)'], {'n': 'self._window_size'}), '(frame * self._fft_window, n=self._window_size)\n', (2359, 2406), True, 'import numpy as np\n'), ((2665, 2689), 'numpy.expand_dims', 'np.expand_dims', (['frame', '(0)'], {}), '(frame, 0)\n', (2679, 2689), True, 'import numpy as np\n'), ((1442, 1482), 'numpy.append', 'np.append', (['self._prev_sample', 'frame[:-1]'], {}), '(self._prev_sample, frame[:-1])\n', (1451, 1482), True, 'import numpy as np\n'), ((641, 681), 'os.path.join', 'os.path.join', (['model_dir', '"""filter.tflite"""'], {}), "(model_dir, 'filter.tflite')\n", (653, 681), False, 'import os\n'), ((2423, 2436), 'numpy.abs', 'np.abs', (['frame'], {}), '(frame)\n', (2429, 2436), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Created on 27 jun. 2020
@author: reinaqu_2
'''
from matplotlib import pyplot as plt
import geopandas as gpd
import dataframes
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import venn
from typing import List
import math
from plotnine import *
from plotnine.scales.scale_xy import scale_x_discrete
from cycler import cycler
MARKER_SQUARE='s'
MARKER_CIRCLE='o'
COUNTRY_MAP = 'ADMIN' # for countries.geojson
#COUNTRY_MAP = 'name' ==> for world_countries.json
def create_piechart(dataframe, y_name,legend=False, y_axis_label=True, font_size=9, label_distance=1.1, pct_distance=0.8,radius=1):
'''
INPUT:
-dataframe: A panda dataframe with the data to be plotted.
-x_name: Name of the column that has to be represented in the x axis.
-lines: Sequence or list of names of the columns that have to be represented in y axis.
-colours: Sequence or list of colours of the different lines
'''
plt.axis('equal')
#colors = plt.cm.plasma(np.linspace(0.4,0.95,10))
#colors = plt.cm.gray(np.linspace(0.2,0.8,10))
colors = plt.cm.magma(np.linspace(0.4,0.95,10))
ax = plt.gca()
text_props = {'fontsize': font_size}
wedgeprops={"edgecolor":"k",'linewidth': 1, 'linestyle': 'solid', 'antialiased': True}
plot = dataframe.plot.pie(y=y_name, figsize=(5, 5),ax=ax, \
wedgeprops=wedgeprops,\
pctdistance=pct_distance,colors=colors, \
labeldistance=label_distance, autopct='%1.1f%%', \
legend=legend, textprops=text_props)
if y_axis_label==False:
ax.set_ylabel('')
plt.show()
def create_piechart_subplots(dataframe,legend=False):
plt.axis('equal')
ax = plt.gca()
plot = dataframe.plot.pie(subplots=True, ax=ax, autopct='%1.1f%%',legend=legend)
plt.show()
def create_lineplot_from_dataframe(dataframe, x_label, y_label ):
ax = plt.gca()
ax.locator_params(integer=True)
dataframe.plot(x=x_label, y=y_label, kind='line', color='orange', ax=ax, marker=MARKER_SQUARE,grid=True)
plt.xticks(dataframe.index.values)
#labelling the points
for index, value in dataframe.iteritems():
plt.text(index, value,str(value))
plt.show()
def create_line_plot_multiple_colums(dataframe, x_name, lines, colours,markers:list):
'''
INPUT:
-dataframe: A panda dataframe with the data to be plotted.
-x_name: Name of the column that has to be represented in the x axis.
-lines: Sequence or list of names of the columns that have to be represented in y axis.
-colours: Sequence or list of colours of the different lines
'''
# gca stands for 'get current axis'
ax = plt.gca()
for i in range(0,len(lines)):
dataframe.plot(kind='line',x=x_name, y=lines[i], color=colours[i],marker=markers[i], ax=ax)
plt.grid()
plt.show()
def create_bar(dataframe, x_labels_rotation=90, legend=True):
'''
INPUT:
-dataframe: A panda dataframe with the data to be plotted.
-x_name: Name of the column that has to be represented in the x axis.
-lines: Sequence or list of names of the columns that have to be represented in y axis.
-colours: Sequence or list of colours of the different lines
'''
# gca stands for 'get current axis'
#plot = dataframe.plot(kind='bar',x=x_name)
plot = dataframe.plot(kind='bar')
plt.xticks(rotation=x_labels_rotation)
ax1 = plt.gca()
ax1.xaxis.label.set_visible(False)
ax1.legend().set_visible(legend)
plt.show()
def create_stacked_bar(dataframe,column_name, values_name,x_labels_rotation=0):
'''
INPUT:
-dataframe: A panda dataframe with the data to be plotted.
-x_name: Name of the column that has to be represented in the x axis.
-lines: Sequence or list of names of the columns that have to be represented in y axis.
-colours: Sequence or list of colours of the different lines
'''
pivot_df = dataframe.pivot(columns=column_name, values=values_name)
ax=pivot_df.plot.bar(stacked=True)
#to draw the labels we have to iterate over the bar rectangles.
for rec in ax.patches:
x= rec.get_x() + rec.get_width() / 2
y = rec.get_y()+ rec.get_height()/2
label= str(int(rec.get_height()))
ax.text(x, y,label, ha = 'center', va='center')
plt.xticks(rotation=x_labels_rotation)
plt.show()
def dataframe_search_country(dataframe,country):
#res = dataframe['number of studies'].where(dataframe['countries'] == country,0)
res_df= dataframe.loc[dataframe['countries'] == country]
if res_df.empty:
res=0
else:
res= res_df['number of studies']
res.index=[range(0,len(res))]
return res
def create_choropleth_map (dataframe, column_name, geojson_mapfile):
#read the map as a geodataframe
world_df = gpd.read_file(geojson_mapfile)
#To draw all the countries, a left -join is needed.
#Note that when the country does not exist in dataframe, the column 'number of studies' has a NaN value
merged = world_df.merge(dataframe, how='left',left_on = COUNTRY_MAP, right_on = 'countries')
#The NaN values are replaced by 0
merged = merged.replace(np.nan, 0)
# set the range for the choropleth (min and max values)
vmin, vmax=0, max(dataframe[column_name])
# create figure and axes for Matplotlib
fig, ax = plt.subplots(1, figsize=(20, 50))
#remove axes
ax.set_axis_off()
# Create colorbar as a legend
sm = plt.cm.ScalarMappable(cmap='Blues',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
# empty array for the data range
sm._A = []
axins1 = inset_axes(ax,
width="2%", # width = 50% of parent_bbox width
height="50%", # height : 5%
loc='lower right'
)
# add the colorbar to the figure
cbar = fig.colorbar(sm, cax=axins1)
# create map
ax = merged.plot(column=column_name,
cmap ='Blues',
linewidth=0.8,
ax=ax,
edgecolor='0.8')
# Add Labels
#merged['coords'] = merged['geometry'].apply(lambda x: x.representative_point().coords[:])
#merged['coords'] = [coords[0] for coords in merged['coords']]
#for idx, row in merged.iterrows():
# if (row[column_name]>0):
# plt.annotate(s=str(int(row[column_name])), xy=row['coords'],horizontalalignment='center')
plt.show()
def create_bubble(dataframe:pd.DataFrame, count_name:str, x_name:str, y_name:str, \
rows:List[str]=None, columns:List[str] = None):
'''
It createa a bubble plot with data from dataframe. The dataframe should
contain 3 columns, at least tree columns, one with the count, other with the X-axis
values, and another one with the Y-exis values.
@param dataframe: Data frame with the data count. One example of this kind
of dataframe is the folloing one
x_name y_name count_name
0 HIGH HIGH 34
1 HIGH MEDIUM 5
2 LOW HIGH 51
3 LOW MEDIUM 13
4 MEDIUM HIGH 38
5 MEDIUM MEDIUM 4
@param count_name: Name of the column dataframe that holds the count.
@param x_name: Name of the column that holds the labels that will be depicted in X-axis
@param y_name: Name of the column that holds the labels that will be depicted in Y-axis
@param rows: If None, the labels depicted in X-axis
'''
#create an extra padding column from values for circles that are neither too small nor too large
df_aux= dataframe[count_name]
dataframe["padd"] = 2.5 * (df_aux - df_aux.min()) / (df_aux.max() - df_aux.min()) + 0.5
fig = plt.figure()
#prepare the axes for the plot - you can also order your categories at this step
if rows == None:
rows = [str(row) for row in dataframe[x_name].to_list()]
if columns==None:
columns = [str(col) for col in dataframe[y_name].to_list()]
s = plt.scatter(rows, columns, s = 0)
#s.remove
ax = plt.gca()
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
ax.set_xmargin(0.5)
ax.set_ymargin(0.5)
#plot data row-wise as text with circle radius according to Count
for row in dataframe.itertuples():
bbox_props = dict(boxstyle = f"circle, pad = {row.padd}", fc = "w", ec = "r", lw = 2)
plt.annotate(str(row[3]), xy = (row[1], row[2]), bbox = bbox_props, ha="center", va="center", zorder = 2, clip_on = False)
#plot grid behind markers
plt.grid(ls = "--", zorder = 1)
#take care of long labels
fig.autofmt_xdate()
plt.tight_layout()
plt.show()
def create_bubble2(dataframe:pd.DataFrame, count_name:str, x_name:str, y_name:str):
'''
It createa a bubble plot with data from dataframe. The dataframe should
contain 3 columns, at least tree columns, one with the count, other with the X-axis
values, and another one with the Y-exis values.
@param dataframe: Data frame with the data count. One example of this kind
of dataframe is the folloing one
x_name y_name count_name
0 HIGH HIGH 34
1 HIGH MEDIUM 5
2 LOW HIGH 51
3 LOW MEDIUM 13
4 MEDIUM HIGH 38
5 MEDIUM MEDIUM 4
@param count_name: Name of the column dataframe that holds the count.
@param x_name: Name of the column that holds the labels that will be depicted in X-axis
@param y_name: Name of the column that holds the labels that will be depicted in Y-axis
@param rows: If None, the labels depicted in X-axis
'''
#x_values = sorted(dataframe[x_name].unique())
dataframe['dotsize'] = dataframe.apply(lambda row: math.sqrt(float(row[count_name]) / math.pi)*7.5, axis=1)
res=(ggplot(dataframe, aes(x=x_name, y=y_name)) + \
#scale_x_discrete(limits=x_values, breaks=x_values) + \
geom_point(aes(size='dotsize'),fill='white') + \
geom_text(aes(label=count_name),size=8) + \
scale_size_identity() + \
theme(panel_grid_major=element_line(linetype='dashed',color='black'),
#panel_grid_minor=element_line(linetype='dashed',color='grey'),
axis_text_x=element_text(angle=90,hjust=1,vjust=0))
)
print(res)
# res2= ggplot(dataframe, aes(x = x_name, y = "number of studies", color = y_name)) + \
# geom_line(alpha = 0.5)
# print(res2)
def create_venn4(labels, names):
fig, ax = venn.venn4(labels, names=names)
#fig.savefig('venn4.png', bbox_inches='tight')
plt.show()
def create_venn3(labels, names):
fig, ax = venn.venn3(labels, names=names)
#fig.savefig('venn4.png', bbox_inches='tight')
plt.show() | [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots",
"venn.venn3",
"matplotlib.pyplot.figure",
"mpl_toolkits.axes_grid1.inset_locator.inset_axes",
"venn.venn4",
"numpy.linspace",
"matplotlib.pyplot.gca",
... | [((1016, 1033), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1024, 1033), True, 'from matplotlib import pyplot as plt\n'), ((1205, 1214), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1212, 1214), True, 'from matplotlib import pyplot as plt\n'), ((1751, 1761), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1759, 1761), True, 'from matplotlib import pyplot as plt\n'), ((1821, 1838), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1829, 1838), True, 'from matplotlib import pyplot as plt\n'), ((1848, 1857), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1855, 1857), True, 'from matplotlib import pyplot as plt\n'), ((1948, 1958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1956, 1958), True, 'from matplotlib import pyplot as plt\n'), ((2040, 2049), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2047, 2049), True, 'from matplotlib import pyplot as plt\n'), ((2199, 2233), 'matplotlib.pyplot.xticks', 'plt.xticks', (['dataframe.index.values'], {}), '(dataframe.index.values)\n', (2209, 2233), True, 'from matplotlib import pyplot as plt\n'), ((2353, 2363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2361, 2363), True, 'from matplotlib import pyplot as plt\n'), ((2841, 2850), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2848, 2850), True, 'from matplotlib import pyplot as plt\n'), ((3009, 3019), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3017, 3019), True, 'from matplotlib import pyplot as plt\n'), ((3028, 3038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3036, 3038), True, 'from matplotlib import pyplot as plt\n'), ((3570, 3608), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'x_labels_rotation'}), '(rotation=x_labels_rotation)\n', (3580, 3608), True, 'from matplotlib import pyplot as plt\n'), ((3619, 3628), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3626, 3628), True, 'from matplotlib import pyplot as plt\n'), ((3709, 3719), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3717, 3719), True, 'from matplotlib import pyplot as plt\n'), ((4546, 4584), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'x_labels_rotation'}), '(rotation=x_labels_rotation)\n', (4556, 4584), True, 'from matplotlib import pyplot as plt\n'), ((4595, 4605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4603, 4605), True, 'from matplotlib import pyplot as plt\n'), ((5065, 5095), 'geopandas.read_file', 'gpd.read_file', (['geojson_mapfile'], {}), '(geojson_mapfile)\n', (5078, 5095), True, 'import geopandas as gpd\n'), ((5617, 5650), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 50)'}), '(1, figsize=(20, 50))\n', (5629, 5650), True, 'from matplotlib import pyplot as plt\n'), ((5923, 5982), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '"""2%"""', 'height': '"""50%"""', 'loc': '"""lower right"""'}), "(ax, width='2%', height='50%', loc='lower right')\n", (5933, 5982), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((6788, 6798), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6796, 6798), True, 'from matplotlib import pyplot as plt\n'), ((8282, 8294), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8292, 8294), True, 'from matplotlib import pyplot as plt\n'), ((8582, 8613), 'matplotlib.pyplot.scatter', 'plt.scatter', (['rows', 'columns'], {'s': '(0)'}), '(rows, columns, s=0)\n', (8593, 8613), True, 'from matplotlib import pyplot as plt\n'), ((8639, 8648), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8646, 8648), True, 'from matplotlib import pyplot as plt\n'), ((9126, 9153), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '"""--"""', 'zorder': '(1)'}), "(ls='--', zorder=1)\n", (9134, 9153), True, 'from matplotlib import pyplot as plt\n'), ((9216, 9234), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9232, 9234), True, 'from matplotlib import pyplot as plt\n'), ((9239, 9249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9247, 9249), True, 'from matplotlib import pyplot as plt\n'), ((11300, 11331), 'venn.venn4', 'venn.venn4', (['labels'], {'names': 'names'}), '(labels, names=names)\n', (11310, 11331), False, 'import venn\n'), ((11388, 11398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11396, 11398), True, 'from matplotlib import pyplot as plt\n'), ((11451, 11482), 'venn.venn3', 'venn.venn3', (['labels'], {'names': 'names'}), '(labels, names=names)\n', (11461, 11482), False, 'import venn\n'), ((11539, 11549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11547, 11549), True, 'from matplotlib import pyplot as plt\n'), ((1165, 1191), 'numpy.linspace', 'np.linspace', (['(0.4)', '(0.95)', '(10)'], {}), '(0.4, 0.95, 10)\n', (1176, 1191), True, 'import numpy as np\n'), ((5811, 5846), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (5824, 5846), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
def write_keypoints(path, keypoints):
# path: path to save
# keypoints: single-precision real matrix, N*4, x,y,scale, orientation
assert keypoints.shape[1]==4
write_matrix(path, keypoints, np.float32)
def write_descriptors(path, descriptors):
write_matrix(path, descriptors, np.float32)
def write_matches(path, matches):
if len(matches) > 0:
write_matrix(path, matches, np.uint32)
def read_keypoints(path):
return read_matrix(path, np.float32)
def read_descriptors(path):
return read_matrix(path, np.float32)
def read_matches(path):
return read_matrix(path, np.uint32)
def read_matrix(path, dtype):
with open(path, "rb") as fid:
shape = np.fromfile(fid, count=2, dtype=np.int32)
matrix = np.fromfile(fid, count=shape[0] * shape[1], dtype=dtype)
return matrix.reshape(shape)
def write_matrix(path, data, dtype):
with open(path, 'wb') as f:
np.asarray(data.shape, dtype='int32').tofile(f)
data.astype(dtype).tofile(f)
| [
"numpy.fromfile",
"numpy.asarray"
] | [((692, 733), 'numpy.fromfile', 'np.fromfile', (['fid'], {'count': '(2)', 'dtype': 'np.int32'}), '(fid, count=2, dtype=np.int32)\n', (703, 733), True, 'import numpy as np\n'), ((751, 807), 'numpy.fromfile', 'np.fromfile', (['fid'], {'count': '(shape[0] * shape[1])', 'dtype': 'dtype'}), '(fid, count=shape[0] * shape[1], dtype=dtype)\n', (762, 807), True, 'import numpy as np\n'), ((910, 947), 'numpy.asarray', 'np.asarray', (['data.shape'], {'dtype': '"""int32"""'}), "(data.shape, dtype='int32')\n", (920, 947), True, 'import numpy as np\n')] |
import argparse
import io
import os
import h5py
import networkx as nx
import numpy as np
import torch
from networkx.algorithms.dag import dag_longest_path
from scipy.spatial.distance import cdist
from torch.utils.data import DataLoader
from tqdm import tqdm
from data import FeatureDataset
from model import (GRUModule, LSTMModule, NetVLAD, NeXtVLAD, TCA, VideoComparator)
from utils import resize_axis
def calculate_similarities(query_features, target_feature, metric='euclidean', comparator=None):
"""
Args:
query_features: global features of the query videos
target_feature: global features of the target video
metric: distance metric of features
Returns:
similarities: the similarities of each query with the videos in the dataset
"""
similarities = []
if metric == 'euclidean':
dist = np.nan_to_num(
cdist(query_features, target_feature, metric='euclidean'))
for i, v in enumerate(query_features):
sim = np.round(1 - dist[i] / dist.max(), decimals=6)
similarities.append(sim.item())
elif metric == 'cosine':
dist = np.nan_to_num(
cdist(query_features, target_feature, metric='cosine'))
for i, v in enumerate(query_features):
sim = 1 - dist[i]
similarities.append(sim.item())
elif metric == 'chamfer':
for query in query_features:
sim = chamfer(query, target_feature, comparator)
similarities.append(sim)
else:
for query in query_features:
sim1 = chamfer(query, target_feature, comparator)
sim2 = chamfer(target_feature, query, comparator)
similarities.append((sim1 + sim2) / 2.0)
return similarities
def chamfer(query, target_feature, comparator=False):
query = torch.Tensor(query).cuda()
target_feature = torch.Tensor(target_feature).cuda()
simmatrix = torch.einsum('ik,jk->ij', [query, target_feature])
if comparator:
simmatrix = comparator(simmatrix).detach()
sim = simmatrix.max(dim=1)[0].sum().cpu().item() / simmatrix.shape[0]
return sim
def dp(query, target_feature, phi=10):
n, m = query.shape[0], target_feature.shape[0]
mismatch = 0
sims = np.zeros((n, m))
for i in range(0, n):
sims[i, 0] = np.dot(query[i], target_feature[0])
for j in range(0, m):
sims[0, j] = np.dot(query[0], target_feature[j])
for i in range(1, n):
for j in range(1, m):
sim = np.dot(query[i], target_feature[j])
if mismatch >= phi:
sims[i, j] = sim
mismatch = 0
else:
top_left = sims[i - 1, j - 1] + sim
top = sims[i - 1, j] + sim / 2.0
left = sims[i, j - 1] + sim / 2.0
continue
if top_left >= max(top, left):
sims[i, j] = top_left
else:
sims[i, j] = max(top, left)
mismatch += 1
sim = np.sum(np.max(sims, axis=1)) / n
return sim
def compute_dists(query, target_feature):
query = torch.Tensor(query).cuda()
target_feature = torch.Tensor(target_feature).cuda()
sims = torch.einsum('ik,jk->ij', [query, target_feature]).cpu().numpy()
unsorted_dists = 1 - sims
idxs = np.argsort(unsorted_dists)
rows = np.dot(np.arange(idxs.shape[0]).reshape(
(idxs.shape[0], 1)), np.ones((1, idxs.shape[1]))).astype(int)
sorted_dists = unsorted_dists[rows, idxs]
return idxs, unsorted_dists, sorted_dists
def tn(query_features, refer_features, top_K=5, min_sim=0.80, max_step=10):
"""
用于计算两组特征(已经做过l2-norm)之间的帧匹配结果
Args:
query_features: shape: [N, D]
refer_features: shape: [M, D]
top_K: 取前K个refer_frame
min_sim: 要求query_frame与refer_frame的最小相似度
max_step: 有边相连的结点间的最大步长
Returns:
path_query: shape: [1, L]
path_refer: shape: [1, L]
"""
node_pair2id = {}
node_id2pair = {}
node_id2pair[0] = (-1, -1) # source
node_pair2id[(-1, -1)] = 0
node_num = 1
DG = nx.DiGraph()
DG.add_node(0)
idxs, unsorted_dists, sorted_dists = compute_dists(query_features, refer_features)
# add nodes
for qf_idx in range(query_features.shape[0]):
for k in range(top_K):
rf_idx = idxs[qf_idx][k]
sim = 1 - sorted_dists[qf_idx][k]
if sim < min_sim:
break
node_id2pair[node_num] = (qf_idx, rf_idx)
node_pair2id[(qf_idx, rf_idx)] = node_num
DG.add_node(node_num)
node_num += 1
node_id2pair[node_num] = (query_features.shape[0],
refer_features.shape[0]) # sink
node_pair2id[(query_features.shape[0], refer_features.shape[0])] = node_num
DG.add_node(node_num)
node_num += 1
# link nodes
for i in range(0, node_num - 1):
for j in range(i + 1, node_num - 1):
pair_i = node_id2pair[i]
pair_j = node_id2pair[j]
if(pair_j[0] > pair_i[0] and pair_j[1] > pair_i[1] and
pair_j[0] - pair_i[0] <= max_step and pair_j[1] - pair_i[1] <= max_step):
qf_idx = pair_j[0]
rf_idx = pair_j[1]
DG.add_edge(i, j, weight=1 - unsorted_dists[qf_idx][rf_idx])
for i in range(0, node_num - 1):
j = node_num - 1
pair_i = node_id2pair[i]
pair_j = node_id2pair[j]
if(pair_j[0] > pair_i[0] and pair_j[1] > pair_i[1] and
pair_j[0] - pair_i[0] <= max_step and pair_j[1] - pair_i[1] <= max_step):
qf_idx = pair_j[0]
rf_idx = pair_j[1]
DG.add_edge(i, j, weight=0)
longest_path = dag_longest_path(DG)
if 0 in longest_path:
longest_path.remove(0) # remove source node
if node_num - 1 in longest_path:
longest_path.remove(node_num - 1) # remove sink node
path_query = [node_id2pair[node_id][0] for node_id in longest_path]
path_refer = [node_id2pair[node_id][1] for node_id in longest_path]
score = 0.0
for (qf_idx, rf_idx) in zip(path_query, path_refer):
score += 1 - unsorted_dists[qf_idx][rf_idx]
return score
def query_vs_database(model, dataset, args):
model = model.eval()
comparator = None
if args.use_comparator:
comparator = VideoComparator()
comparator.load_state_dict(torch.load('models/video_comparator.pth'))
comparator = comparator.eval()
if args.cuda:
model = model.cuda()
if args.use_comparator:
comparator = comparator.cuda()
print('loading features...')
vid2features = h5py.File(args.feature_path, 'r')
print('...features loaded')
test_loader = DataLoader(
FeatureDataset(vid2features, dataset.get_queries(),
padding_size=args.padding_size, random_sampling=args.random_sampling),
batch_size=1, shuffle=False)
# Extract features of the queries
all_db, queries, queries_ids = set(), [], []
for feature, feature_len, query_id in tqdm(test_loader):
query_id = query_id[0]
if feature.shape[1] > 0:
if args.cuda:
feature = feature.cuda()
feature_len = feature_len.cuda()
# queries.append(model(feature, feature_len).detach().cpu().numpy()[0])
queries.append(model.encode(
feature, feature_len).detach().cpu().numpy()[0])
queries_ids.append(query_id)
all_db.add(query_id)
queries = np.array(queries)
test_loader = DataLoader(
FeatureDataset(vid2features, dataset.get_database(),
padding_size=args.padding_size, random_sampling=args.random_sampling),
batch_size=1, shuffle=False)
# Calculate similarities between the queries and the database videos
similarities = dict({query: dict() for query in queries_ids})
for feature, feature_len, video_id in tqdm(test_loader):
video_id = video_id[0]
# print('current video : {} {}'.format(video_id, feature.shape))
if feature.shape[1] > 0:
if args.cuda:
feature = feature.cuda()
feature_len = feature_len.cuda()
# embedding = model(feature, feature_len).detach().cpu().numpy()
embedding = model.encode(
feature, feature_len).detach().cpu().numpy()[0]
all_db.add(video_id)
sims = calculate_similarities(
queries, embedding, args.metric, comparator)
for i, s in enumerate(sims):
similarities[queries_ids[i]][video_id] = float(s)
dataset.evaluate(similarities, all_db)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, required=True,
help='Name of evaluation dataset. Options: CC_WEB_VIDEO, VCDB, '
'\"FIVR-200K\", \"FIVR-5K\", \"EVVE\"')
parser.add_argument('-pc', '--pca_components', type=int, default=1024,
help='Number of components of the PCA module.')
parser.add_argument('-nc', '--num_clusters', type=int, default=256,
help='Number of clusters of the NetVLAD model')
parser.add_argument('-od', '--output_dim', type=int, default=1024,
help='Dimention of the output embedding of the NetVLAD model')
parser.add_argument('-nl', '--num_layers', type=int, default=1,
help='Number of layers')
parser.add_argument('-mp', '--model_path', type=str, required=True,
help='Directory of the .pth file containing model state dicts')
parser.add_argument('-fp', '--feature_path', type=str, required=True,
help='Path to the .hdf5 file that contains the features of the dataset')
parser.add_argument('-ps', '--padding_size', type=int, default=100,
help='Padding size of the input data at temporal axis')
parser.add_argument('-rs', '--random_sampling', action='store_true',
help='Flag that indicates that the frames in a video are random sampled if max frame limit is exceeded')
parser.add_argument('-m', '--metric', type=str, default='euclidean',
help='Metric that will be used for similarity calculation')
parser.add_argument('-uc', '--use_comparator', action='store_true',
help='Flag that indicates that the video comparator is used')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
if 'CC_WEB' in args.dataset:
from data import CC_WEB_VIDEO
dataset = CC_WEB_VIDEO()
eval_function = query_vs_database
elif 'VCDB' in args.dataset:
from data import VCDB
dataset = VCDB()
eval_function = query_vs_database
elif 'FIVR' in args.dataset:
from data import FIVR
dataset = FIVR(version=args.dataset.split('-')[1].lower())
eval_function = query_vs_database
elif 'EVVE' in args.dataset:
from data import EVVE
dataset = EVVE()
eval_function = query_vs_database
else:
raise Exception('[ERROR] Not supported evaluation dataset. '
'Supported options: \"CC_WEB_VIDEO\", \"VCDB\", \"FIVR-200K\", \"FIVR-5K\", \"EVVE\"')
model = TCA(feature_size=args.pca_components, nlayers=args.num_layers)
model.load_state_dict(torch.load(args.model_path))
eval_function(model, dataset, args)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.ones",
"numpy.argsort",
"data.CC_WEB_VIDEO",
"numpy.arange",
"data.VCDB",
"model.VideoComparator",
"torch.load",
"data.EVVE",
"numpy.max",
"torch.Tensor",
"scipy.spatial.distance.cdist",
"h5py.File",
"tqdm.tqdm",
"torch.einsum",
"torch.cuda.is_availabl... | [((1933, 1983), 'torch.einsum', 'torch.einsum', (['"""ik,jk->ij"""', '[query, target_feature]'], {}), "('ik,jk->ij', [query, target_feature])\n", (1945, 1983), False, 'import torch\n'), ((2263, 2279), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (2271, 2279), True, 'import numpy as np\n'), ((3353, 3379), 'numpy.argsort', 'np.argsort', (['unsorted_dists'], {}), '(unsorted_dists)\n', (3363, 3379), True, 'import numpy as np\n'), ((4150, 4162), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4160, 4162), True, 'import networkx as nx\n'), ((5800, 5820), 'networkx.algorithms.dag.dag_longest_path', 'dag_longest_path', (['DG'], {}), '(DG)\n', (5816, 5820), False, 'from networkx.algorithms.dag import dag_longest_path\n'), ((6739, 6772), 'h5py.File', 'h5py.File', (['args.feature_path', '"""r"""'], {}), "(args.feature_path, 'r')\n", (6748, 6772), False, 'import h5py\n'), ((7155, 7172), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (7159, 7172), False, 'from tqdm import tqdm\n'), ((7632, 7649), 'numpy.array', 'np.array', (['queries'], {}), '(queries)\n', (7640, 7649), True, 'import numpy as np\n'), ((8054, 8071), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (8058, 8071), False, 'from tqdm import tqdm\n'), ((8820, 8845), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8843, 8845), False, 'import argparse\n'), ((10687, 10712), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10710, 10712), False, 'import torch\n'), ((11495, 11557), 'model.TCA', 'TCA', ([], {'feature_size': 'args.pca_components', 'nlayers': 'args.num_layers'}), '(feature_size=args.pca_components, nlayers=args.num_layers)\n', (11498, 11557), False, 'from model import GRUModule, LSTMModule, NetVLAD, NeXtVLAD, TCA, VideoComparator\n'), ((2327, 2362), 'numpy.dot', 'np.dot', (['query[i]', 'target_feature[0]'], {}), '(query[i], target_feature[0])\n', (2333, 2362), True, 'import numpy as np\n'), ((2410, 2445), 'numpy.dot', 'np.dot', (['query[0]', 'target_feature[j]'], {}), '(query[0], target_feature[j])\n', (2416, 2445), True, 'import numpy as np\n'), ((6430, 6447), 'model.VideoComparator', 'VideoComparator', ([], {}), '()\n', (6445, 6447), False, 'from model import GRUModule, LSTMModule, NetVLAD, NeXtVLAD, TCA, VideoComparator\n'), ((10803, 10817), 'data.CC_WEB_VIDEO', 'CC_WEB_VIDEO', ([], {}), '()\n', (10815, 10817), False, 'from data import CC_WEB_VIDEO\n'), ((11584, 11611), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (11594, 11611), False, 'import torch\n'), ((889, 946), 'scipy.spatial.distance.cdist', 'cdist', (['query_features', 'target_feature'], {'metric': '"""euclidean"""'}), "(query_features, target_feature, metric='euclidean')\n", (894, 946), False, 'from scipy.spatial.distance import cdist\n'), ((1833, 1852), 'torch.Tensor', 'torch.Tensor', (['query'], {}), '(query)\n', (1845, 1852), False, 'import torch\n'), ((1881, 1909), 'torch.Tensor', 'torch.Tensor', (['target_feature'], {}), '(target_feature)\n', (1893, 1909), False, 'import torch\n'), ((2520, 2555), 'numpy.dot', 'np.dot', (['query[i]', 'target_feature[j]'], {}), '(query[i], target_feature[j])\n', (2526, 2555), True, 'import numpy as np\n'), ((3055, 3075), 'numpy.max', 'np.max', (['sims'], {'axis': '(1)'}), '(sims, axis=1)\n', (3061, 3075), True, 'import numpy as np\n'), ((3152, 3171), 'torch.Tensor', 'torch.Tensor', (['query'], {}), '(query)\n', (3164, 3171), False, 'import torch\n'), ((3200, 3228), 'torch.Tensor', 'torch.Tensor', (['target_feature'], {}), '(target_feature)\n', (3212, 3228), False, 'import torch\n'), ((6483, 6524), 'torch.load', 'torch.load', (['"""models/video_comparator.pth"""'], {}), "('models/video_comparator.pth')\n", (6493, 6524), False, 'import torch\n'), ((10941, 10947), 'data.VCDB', 'VCDB', ([], {}), '()\n', (10945, 10947), False, 'from data import VCDB\n'), ((1175, 1229), 'scipy.spatial.distance.cdist', 'cdist', (['query_features', 'target_feature'], {'metric': '"""cosine"""'}), "(query_features, target_feature, metric='cosine')\n", (1180, 1229), False, 'from scipy.spatial.distance import cdist\n'), ((3461, 3488), 'numpy.ones', 'np.ones', (['(1, idxs.shape[1])'], {}), '((1, idxs.shape[1]))\n', (3468, 3488), True, 'import numpy as np\n'), ((3247, 3297), 'torch.einsum', 'torch.einsum', (['"""ik,jk->ij"""', '[query, target_feature]'], {}), "('ik,jk->ij', [query, target_feature])\n", (3259, 3297), False, 'import torch\n'), ((11243, 11249), 'data.EVVE', 'EVVE', ([], {}), '()\n', (11247, 11249), False, 'from data import EVVE\n'), ((3398, 3422), 'numpy.arange', 'np.arange', (['idxs.shape[0]'], {}), '(idxs.shape[0])\n', (3407, 3422), True, 'import numpy as np\n')] |
from math import *
import os
import numpy as np
def point_grapher(
x_vals:list, y_vals:list,
x_range:tuple=None,
y_range:tuple=None,):
if x_range==None:
x_range=(min(x_vals),
max(x_vals))
if y_range==None:
y_range=(min(y_vals),
max(y_vals))
cols,rows=os.get_terminal_size()
cols -= 2
rows -= 2
print(f" {x_range[0]:<10.0f}"
f"{x_range[1]:{cols-9}.0f}")
print(" +"+"-"*(cols-1)+">")
print(f" ^{y_range[1]:.0f}")
screen=np.int_(
[[ord(' ')]*cols]*rows)
points=list(zip(x_vals, y_vals))
points.sort(key=lambda x: x[0])
# not necessary but could help
# with debug
def normalize(left, right, val):
return (val-left)/ \
(right-left)
for point in points:
x = normalize(*x_range,
point[0])*cols
y = normalize(*y_range,
point[1])*rows
x = x if x>1 else 1
y = y if y>1 else 1
screen[int(y+.5)-1] \
[int(x+.5)-1]=ord('*')
for line in screen[::-1]:
print(" |",end="")
for c in line:
print(chr(c), end='')
print()
print(" +"+"-"*(cols-1)+">")
print(f"{y_range[0]:.0f}")
print(f" {x_range[0]:<10.0f}"
f"{x_range[1]:{cols-9}.0f}")
# print(x_vals, y_vals)
def demo():
x = list(range(100))
y = list(map(lambda x: x**2, x))
point_grapher(x,y)
def draw_circle():
t = np.array(range(1000))
x = np.sin(t)
y = np.cos(t)
point_grapher(x,y)
if __name__ == "__main__":
demo()
draw_circle()
| [
"os.get_terminal_size",
"numpy.sin",
"numpy.cos"
] | [((343, 365), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (363, 365), False, 'import os\n'), ((1570, 1579), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1576, 1579), True, 'import numpy as np\n'), ((1588, 1597), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1594, 1597), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME>, <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequential Monte-Carlo Approximate Bayesian Computation (SMC-ABC)
"""
# Imports
from sciope.inference.abc_inference import ABC
from sciope.inference.inference_base import InferenceBase
from sciope.inference import abc_inference
from sciope.core import core
from sciope.utilities.distancefunctions import euclidean as euc
from sciope.utilities.summarystats import burstiness as bs
from sciope.utilities.housekeeping import sciope_logger as ml
from sciope.utilities.priors.prior_base import PriorBase
from sciope.utilities.epsilonselectors import RelativeEpsilonSelector
from sciope.utilities.perturbationkernels.multivariate_normal import MultivariateNormalKernel
import numpy as np
import dask
from dask.distributed import futures_of, as_completed, wait
from dask import delayed
class PerturbationPrior(PriorBase):
def __init__(self, ref_prior, samples, normalized_weights, perturbation_kernel,
use_logger=False):
self.name = 'Perturbation Prior'
self.ref_prior = ref_prior
self.samples = samples
self.normalized_weights = normalized_weights
self.perturbation_kernel = perturbation_kernel
super(PerturbationPrior, self).__init__(self.name, use_logger)
def draw(self, n=1, chunk_size=1):
assert n >= chunk_size
generated_samples = []
m = n % chunk_size
if m > 0:
generated_samples.append(self._weighted_draw_perturb(m))
for i in range(0, n - m, chunk_size):
generated_samples.append(self._weighted_draw_perturb(chunk_size))
return generated_samples
@delayed
def _weighted_draw_perturb(self, m):
idxs = np.random.choice(self.samples.shape[0], m,
p=self.normalized_weights)
s0 = [self.samples[idx] for idx in idxs]
s = []
for z in s0:
accepted = False
while not accepted:
sz = self.perturbation_kernel.rvs(z)
if self.ref_prior.pdf(sz) > 0:
accepted = True
s.append(sz)
return np.asarray(s)
class SMCABC(InferenceBase):
"""
SMC - Approximate Bayesian Computation
Properties/variables:
* data (observed / fixed data)
* sim (simulator function handle)
* prior_function (prior over the simulator parameters)
* perturbation_kernel (kernel for perturbing parameters samples)
* summaries_function (summary statistics calculation function)
* distance_function (function calculating deviation between simulated statistics and observed statistics)
* summaries_divisor (numpy array of maxima - used for normalizing summary statistic values)
* use_logger (whether logging is enabled or disabled)
Methods:
* infer (perform parameter inference)
"""
def __init__(self, data, sim, prior_function,
perturbation_kernel=None,
summaries_function=bs.Burstiness().compute,
distance_function=euc.EuclideanDistance(),
summaries_divisor=None, use_logger=False):
self.name = 'SMC-ABC'
super(SMCABC, self).__init__(self.name, data, sim, use_logger)
self.prior_function = prior_function
self.summaries_function = summaries_function
self.distance_function = distance_function
self.summaries_divisor = summaries_divisor
if perturbation_kernel is not None:
self.perturbation_kernel = perturbation_kernel
else:
self.perturbation_kernel = MultivariateNormalKernel(
d=self.prior_function.get_dimension(),
adapt=True)
if self.use_logger:
self.logger = ml.SciopeLogger().get_logger()
self.logger.info("Sequential Monte-Carlo Approximate Bayesian Computation initialized")
def infer(self, num_samples, batch_size,
eps_selector=RelativeEpsilonSelector(20), chunk_size=10,
ensemble_size=1):
"""Performs SMC-ABC.
Parameters
----------
num_samples : int
The number of required accepted samples
batch_size : int
The batch size of samples for performing rejection sampling
eps_selector : EpsilonSelector
The epsilon selector to determine the sequence of epsilons
chunk_size : int
The partition size when splitting the fixed data. For avoiding many individual tasks
in dask if the data is large. Default 10.
ensemble_size : int
In case we have an ensemble of responses
normalize : bool
Whether summary statistics should be normalized and epsilon be interpreted as a percentage
Returns
-------
dict
Keys
'accepted_samples: The accepted parameter values',
'distances: Accepted distance values',
'accepted_count: Number of accepted samples',
'trial_count: The number of total trials performed in order to converge',
'inferred_parameters': The mean of accepted parameter samples
"""
abc_history = []
t = num_samples
prior_function = self.prior_function
tol, relative, terminate = eps_selector.get_initial_epsilon()
print("Determining initial population using {}".format(tol))
abc_instance = abc_inference.ABC(self.data, self.sim, prior_function,
epsilon=tol,
summaries_function=self.summaries_function,
distance_function=self.distance_function,
summaries_divisor=self.summaries_divisor,
use_logger=self.use_logger)
abc_instance.compute_fixed_mean(chunk_size=chunk_size)
abc_results = abc_instance.infer(num_samples=t,
batch_size=batch_size,
chunk_size=chunk_size,
normalize=relative)
population = np.vstack(abc_results['accepted_samples'])[:t]
normalized_weights = np.ones(t) / t
abc_history.append(abc_results)
# SMC iterations
round = 1
while not terminate:
tol, relative, terminate = eps_selector.get_epsilon(round, abc_history)
print("Starting epsilon = {}".format(tol))
if self.use_logger:
self.logger.info("Starting epsilon = {}".format(tol))
# Adapt the kernel based on the current population
self.perturbation_kernel.adapt(population)
# Generate a proposal prior based on the population
new_prior = PerturbationPrior(self.prior_function,
population,
normalized_weights,
self.perturbation_kernel)
try:
# Run ABC on the next epsilon using the proposal prior
abc_instance = abc_inference.ABC(self.data, self.sim, new_prior,
epsilon=tol,
summaries_function=self.summaries_function,
distance_function=self.distance_function,
summaries_divisor=self.summaries_divisor,
use_logger=self.use_logger)
abc_instance.compute_fixed_mean(chunk_size=chunk_size)
abc_results = abc_instance.infer(num_samples=t,
batch_size=batch_size,
chunk_size=chunk_size,
normalize=relative)
# Compute importance weights for the new samples
new_samples = np.vstack(abc_results['accepted_samples'])[:t]
prior_weights = self.prior_function.pdf(new_samples)
kweights = self.perturbation_kernel.pdf(population, new_samples)
new_weights = prior_weights / np.sum(kweights * normalized_weights[:, np.newaxis], axis=0)
new_weights = new_weights / sum(new_weights)
population = new_samples
normalized_weights = new_weights
abc_history.append(abc_results)
round += 1
except KeyboardInterrupt:
return abc_history
except:
raise
return abc_history
| [
"sciope.utilities.epsilonselectors.RelativeEpsilonSelector",
"sciope.utilities.distancefunctions.euclidean.EuclideanDistance",
"sciope.utilities.housekeeping.sciope_logger.SciopeLogger",
"numpy.sum",
"numpy.asarray",
"numpy.ones",
"sciope.inference.abc_inference.ABC",
"numpy.random.choice",
"sciope.... | [((2263, 2332), 'numpy.random.choice', 'np.random.choice', (['self.samples.shape[0]', 'm'], {'p': 'self.normalized_weights'}), '(self.samples.shape[0], m, p=self.normalized_weights)\n', (2279, 2332), True, 'import numpy as np\n'), ((2696, 2709), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (2706, 2709), True, 'import numpy as np\n'), ((3672, 3695), 'sciope.utilities.distancefunctions.euclidean.EuclideanDistance', 'euc.EuclideanDistance', ([], {}), '()\n', (3693, 3695), True, 'from sciope.utilities.distancefunctions import euclidean as euc\n'), ((4584, 4611), 'sciope.utilities.epsilonselectors.RelativeEpsilonSelector', 'RelativeEpsilonSelector', (['(20)'], {}), '(20)\n', (4607, 4611), False, 'from sciope.utilities.epsilonselectors import RelativeEpsilonSelector\n'), ((6064, 6301), 'sciope.inference.abc_inference.ABC', 'abc_inference.ABC', (['self.data', 'self.sim', 'prior_function'], {'epsilon': 'tol', 'summaries_function': 'self.summaries_function', 'distance_function': 'self.distance_function', 'summaries_divisor': 'self.summaries_divisor', 'use_logger': 'self.use_logger'}), '(self.data, self.sim, prior_function, epsilon=tol,\n summaries_function=self.summaries_function, distance_function=self.\n distance_function, summaries_divisor=self.summaries_divisor, use_logger\n =self.use_logger)\n', (6081, 6301), False, 'from sciope.inference import abc_inference\n'), ((3612, 3627), 'sciope.utilities.summarystats.burstiness.Burstiness', 'bs.Burstiness', ([], {}), '()\n', (3625, 3627), True, 'from sciope.utilities.summarystats import burstiness as bs\n'), ((6824, 6866), 'numpy.vstack', 'np.vstack', (["abc_results['accepted_samples']"], {}), "(abc_results['accepted_samples'])\n", (6833, 6866), True, 'import numpy as np\n'), ((6900, 6910), 'numpy.ones', 'np.ones', (['t'], {}), '(t)\n', (6907, 6910), True, 'import numpy as np\n'), ((7823, 8055), 'sciope.inference.abc_inference.ABC', 'abc_inference.ABC', (['self.data', 'self.sim', 'new_prior'], {'epsilon': 'tol', 'summaries_function': 'self.summaries_function', 'distance_function': 'self.distance_function', 'summaries_divisor': 'self.summaries_divisor', 'use_logger': 'self.use_logger'}), '(self.data, self.sim, new_prior, epsilon=tol,\n summaries_function=self.summaries_function, distance_function=self.\n distance_function, summaries_divisor=self.summaries_divisor, use_logger\n =self.use_logger)\n', (7840, 8055), False, 'from sciope.inference import abc_inference\n'), ((4380, 4397), 'sciope.utilities.housekeeping.sciope_logger.SciopeLogger', 'ml.SciopeLogger', ([], {}), '()\n', (4395, 4397), True, 'from sciope.utilities.housekeeping import sciope_logger as ml\n'), ((8731, 8773), 'numpy.vstack', 'np.vstack', (["abc_results['accepted_samples']"], {}), "(abc_results['accepted_samples'])\n", (8740, 8773), True, 'import numpy as np\n'), ((8976, 9036), 'numpy.sum', 'np.sum', (['(kweights * normalized_weights[:, np.newaxis])'], {'axis': '(0)'}), '(kweights * normalized_weights[:, np.newaxis], axis=0)\n', (8982, 9036), True, 'import numpy as np\n')] |
import pandas as pd
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
import nltk
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
if __name__ == "__main__":
data = pd.read_csv('segmentaion_result.csv', error_bad_lines=False, encoding="utf8")
data_text = data[['text']]
data_text['index'] = data_text.index
documents = data_text
print(len(documents), "Documents found")
np.random.seed(2018)
nltk.download('wordnet')
print(WordNetLemmatizer().lemmatize('went', pos='v'))
stemmer = SnowballStemmer('english')
processed_docs = documents['text'].map(preprocess)
dictionary = gensim.corpora.Dictionary(processed_docs)
bow_corpus = 0
if(len(dictionary) != 0):
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
else:
print("Change filter values to run!")
exit()
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=len(documents), id2word=dictionary, passes=2, workers=4)
for idx, topic in lda_model.print_topics(-1):
print('Topic: {} \n======\nWords: {}'.format(idx, topic))
M = []
count = 0
for bow_doc in bow_corpus:
M_r = [0]*len(documents)
for index, score in sorted(lda_model[bow_doc], key=lambda tup: -1*tup[1]):
M_r[index] = score
M.append(M_r)
count += 1
print("External Knowledge Gaps:-")
f = open("external_gaps.txt", "w")
f.write(str(pd.DataFrame(M)))
no_gaps = 0
threshold = 1.75
for i in range(len(M)-1):
tmp_sum = 0
for j in range(len(M[0])):
tmp_sum += abs(M[i+1][j]-M[i][j])
if tmp_sum > threshold:
no_gaps += 1
print("Gap found between segments", i+1, "and", i+2)
f.write("Gap found between segments "+str(i+1)+" and "+str(i+2)+"\n")
print("Total", no_gaps, "Knowledge-gaps found!")
f.write("Total "+str(no_gaps)+" Knowledge-gaps found!\n") | [
"pandas.DataFrame",
"numpy.random.seed",
"nltk.stem.WordNetLemmatizer",
"pandas.read_csv",
"nltk.stem.SnowballStemmer",
"gensim.corpora.Dictionary",
"gensim.utils.simple_preprocess",
"nltk.download"
] | [((404, 440), 'gensim.utils.simple_preprocess', 'gensim.utils.simple_preprocess', (['text'], {}), '(text)\n', (434, 440), False, 'import gensim\n'), ((634, 711), 'pandas.read_csv', 'pd.read_csv', (['"""segmentaion_result.csv"""'], {'error_bad_lines': '(False)', 'encoding': '"""utf8"""'}), "('segmentaion_result.csv', error_bad_lines=False, encoding='utf8')\n", (645, 711), True, 'import pandas as pd\n'), ((846, 866), 'numpy.random.seed', 'np.random.seed', (['(2018)'], {}), '(2018)\n', (860, 866), True, 'import numpy as np\n'), ((869, 893), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (882, 893), False, 'import nltk\n'), ((962, 988), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (977, 988), False, 'from nltk.stem import WordNetLemmatizer, SnowballStemmer\n'), ((1057, 1098), 'gensim.corpora.Dictionary', 'gensim.corpora.Dictionary', (['processed_docs'], {}), '(processed_docs)\n', (1082, 1098), False, 'import gensim\n'), ((1789, 1804), 'pandas.DataFrame', 'pd.DataFrame', (['M'], {}), '(M)\n', (1801, 1804), True, 'import pandas as pd\n'), ((302, 321), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (319, 321), False, 'from nltk.stem import WordNetLemmatizer, SnowballStemmer\n'), ((902, 921), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (919, 921), False, 'from nltk.stem import WordNetLemmatizer, SnowballStemmer\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 11 22:13:10 2018
@author: tatvam
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# import the dataset
dataset = pd.read_csv("50_Startups.csv")
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 4].values
# Encoding Catagorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding dummy variable trap
X=X[:, 1:]
# Splitting the data into training set and test set
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
# Fitting multi linear regression in training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Optimal model using Backward Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(float), values = X,axis=1)
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = Y,exog = X_opt).fit()
regressor_OLS.summary()
mx = 0
while 1:
regressor_OLS = sm.OLS(Y,X_opt).fit()
print(regressor_OLS.summary())
flag = 0
mx = 0
for i in range(1,int(X_opt.size/50)):
mx = max(mx,float(regressor_OLS.summary().tables[1][i][4].data))
print(mx)
for i in range(1,int(X_opt.size/50)):
if float(mx) > 0.05 and mx == float(regressor_OLS.summary().tables[1][i][4].data):
X_opt = np.delete(X_opt,i-1,axis = 1)
flag = 1
break
if flag == 0:
break
X_opt = X_opt[:, 1 : ]
from sklearn.model_selection import train_test_split
X_train_B,X_test_B,Y_train,Y_test = train_test_split(X_opt,Y, test_size = 0.2, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor_BE = LinearRegression()
regressor_BE.fit(X_train_B, Y_train)
y_pred_BE = regressor_BE.predict(X_test_B)
"""import statsmodels.formula.api as sm
def backwardElimination(x, sl):
numVars = len(x[0])
for i in range(0, numVars):
regressor_OLS = sm.OLS(y, x).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
if maxVar > sl:
for j in range(0, numVars - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
x = np.delete(x, j, 1)
regressor_OLS.summary()
return x
SL = 0.05
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
X_Modeled = backwardElimination(X_opt, SL)
"""
"""
xelimination = X[:,[0,1,2,3,4,5]]
regressorOLS = sm.OLS(Y, xelimination).fit()
print(regressorOLS.summary())
xelimination = X[:,[0,1,3,4,5]]
regressorOLS = sm.OLS(Y, xelimination).fit()
print(regressorOLS.summary())
xelimination = X[:,[0,3,4,5]]
regressorOLS = sm.OLS(Y, xelimination).fit()
print(regressorOLS.summary())
xelimination = X[:,[0,3,5]]
regressorOLS = sm.OLS(Y, xelimination).fit()
print(regressorOLS.summary())
xelimination = X[:,[0,3]]
regressorOLS = sm.OLS(Y, xelimination).fit()
print(regressorOLS.summary())
""" | [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"sklearn.preprocessing.LabelEncoder",
"sklearn.linear_model.LinearRegression",
"statsmodels.formula.api.OLS",
"numpy.delete"
] | [((217, 247), 'pandas.read_csv', 'pd.read_csv', (['"""50_Startups.csv"""'], {}), "('50_Startups.csv')\n", (228, 247), True, 'import pandas as pd\n'), ((419, 433), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (431, 433), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((498, 537), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categorical_features': '[3]'}), '(categorical_features=[3])\n', (511, 537), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((767, 820), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, Y, test_size=0.2, random_state=0)\n', (783, 820), False, 'from sklearn.model_selection import train_test_split\n'), ((938, 956), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (954, 956), False, 'from sklearn.linear_model import LinearRegression\n'), ((1935, 1992), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_opt', 'Y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X_opt, Y, test_size=0.2, random_state=0)\n', (1951, 1992), False, 'from sklearn.model_selection import train_test_split\n'), ((2062, 2080), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2078, 2080), False, 'from sklearn.linear_model import LinearRegression\n'), ((1260, 1287), 'statsmodels.formula.api.OLS', 'sm.OLS', ([], {'endog': 'Y', 'exog': 'X_opt'}), '(endog=Y, exog=X_opt)\n', (1266, 1287), True, 'import statsmodels.formula.api as sm\n'), ((1358, 1374), 'statsmodels.formula.api.OLS', 'sm.OLS', (['Y', 'X_opt'], {}), '(Y, X_opt)\n', (1364, 1374), True, 'import statsmodels.formula.api as sm\n'), ((1721, 1752), 'numpy.delete', 'np.delete', (['X_opt', '(i - 1)'], {'axis': '(1)'}), '(X_opt, i - 1, axis=1)\n', (1730, 1752), True, 'import numpy as np\n'), ((1161, 1177), 'numpy.ones', 'np.ones', (['(50, 1)'], {}), '((50, 1))\n', (1168, 1177), True, 'import numpy as np\n')] |
'''
Author: <NAME>
Youtube Channel: https://www.youtube.com/c/aiphile
'''
import cv2 as cv
import numpy as np
# colors
# values =(blue, green, red) opencv accepts BGR values not RGB
BLACK = (0,0,0)
WHITE = (255,255,255)
BLUE = (255,0,0)
RED = (0,0,255)
CYAN = (255,255,0)
YELLOW =(0,255,255)
MAGENTA = (255,0,255)
GRAY = (128,128,128)
GREEN = (0,255,0)
PURPLE = (128,0,128)
ORANGE = (0,165,255)
PINK = (147,20,255)
points_list =[(200, 300), (150, 150), (400, 200)]
def drawColor(img, colors):
x, y = 0,10
w, h = 20, 30
for color in colors:
x += w+5
# y += 10
cv.rectangle(img, (x-6, y-5 ), (x+w+5, y+h+5), (10, 50, 10), -1)
cv.rectangle(img, (x, y ), (x+w, y+h), color, -1)
def textWithBackground(img, text, font, fontScale, textPos, textThickness=1,textColor=(0,255,0), bgColor=(0,0,0), pad_x=3, pad_y=3, bgOpacity=0.5):
"""
Draws text with background, with control transparency
@param img:(mat) which you want to draw text
@param text: (string) text you want draw
@param font: fonts face, like FONT_HERSHEY_COMPLEX, FONT_HERSHEY_PLAIN etc.
@param fontScale: (double) the size of text, how big it should be.
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be.
@param textColor: tuple(BGR), values -->0 to 255 each
@param bgColor: tuple(BGR), values -->0 to 255 each
@param pad_x: int(pixels) padding of in x direction
@param pad_y: int(pixels) 1 to 1.0 (), controls transparency of text background
@return: img(mat) with draw with background
"""
(t_w, t_h), _= cv.getTextSize(text, font, fontScale, textThickness) # getting the text size
x, y = textPos
overlay = img.copy() # coping the image
cv.rectangle(overlay, (x-pad_x, y+ pad_y), (x+t_w+pad_x, y-t_h-pad_y), bgColor,-1) # draw rectangle
new_img = cv.addWeighted(overlay, bgOpacity, img, 1 - bgOpacity, 0) # overlaying the rectangle on the image.
cv.putText(new_img,text, textPos,font, fontScale, textColor,textThickness ) # draw in text
img = new_img
return img
def textBlurBackground(img, text, font, fontScale, textPos, textThickness=1,textColor=(0,255,0),kneral=(33,33) , pad_x=3, pad_y=3):
"""
Draw text with background blured, control the blur value, with kernal(odd, odd)
@param img:(mat) which you want to draw text
@param text: (string) text you want draw
@param font: fonts face, like FONT_HERSHEY_COMPLEX, FONT_HERSHEY_PLAIN etc.
@param fontScale: (double) the size of text, how big it should be.
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be.
@param textColor: tuple(BGR), values -->0 to 255 each
@param kneral: tuple(3,3) int as odd number: higher the value, more blurry background would be
@param pad_x: int(pixels) padding of in x direction
@param pad_y: int(pixels) padding of in y direction
@return: img mat, with text drawn, with background blured
call the function:
img =textBlurBackground(img, 'Blured Background Text', cv2.FONT_HERSHEY_COMPLEX, 0.9, (20, 60),2, (0,255, 0), (49,49), 13, 13 )
"""
(t_w, t_h), _= cv.getTextSize(text, font, fontScale, textThickness) # getting the text size
x, y = textPos
blur_roi = img[y-pad_y-t_h: y+pad_y, x-pad_x:x+t_w+pad_x] # croping Text Background
img[y-pad_y-t_h: y+pad_y, x-pad_x:x+t_w+pad_x]=cv.blur(blur_roi, kneral) # merging the blured background to img
cv.putText(img,text, textPos,font, fontScale, textColor,textThickness )
# cv.imshow('blur roi', blur_roi)
# cv.imshow('blured', img)
return img
def fillPolyTrans(img, points, color, opacity):
"""
@param img: (mat) input image, where shape is drawn.
@param points: list [tuples(int, int) these are the points custom shape,FillPoly
@param color: (tuples (int, int, int)
@param opacity: it is transparency of image.
@return: img(mat) image with rectangle draw.
"""
list_to_np_array = np.array(points, dtype=np.int32)
overlay = img.copy() # coping the image
cv.fillPoly(overlay,[list_to_np_array], color )
new_img = cv.addWeighted(overlay, opacity, img, 1 - opacity, 0)
# print(points_list)
img = new_img
return img
def rectTrans(img, pt1, pt2, color, thickness, opacity):
"""
@param img: (mat) input image, where shape is drawn.
@param pt1: tuple(int,int) it specifies the starting point(x,y) os rectangle
@param pt2: tuple(int,int) it nothing but width and height of rectangle
@param color: (tuples (int, int, int), it tuples of BGR values
@param thickness: it thickness of board line rectangle, if (-1) passed then rectangle will be fulled with color.
@param opacity: it is transparency of image.
@return:
"""
overlay = img.copy()
cv.rectangle(overlay, pt1, pt2, color, thickness)
new_img = cv.addWeighted(overlay, opacity, img, 1 - opacity, 0) # overlaying the rectangle on the image.
img = new_img
return img
def main():
cap = cv.VideoCapture('Girl.mp4')
counter =0
while True:
success, img = cap.read()
# img = np.zeros((1000,1000, 3), dtype=np.uint8)
img=rectTrans(img, pt1=(30, 320), pt2=(160, 260), color=(0,255,255),thickness=-1, opacity=0.6)
img =fillPolyTrans(img=img, points=points_list, color=(0,255,0), opacity=.5)
drawColor(img, [BLACK,WHITE ,BLUE,RED,CYAN,YELLOW,MAGENTA,GRAY ,GREEN,PURPLE,ORANGE,PINK])
textBlurBackground(img, 'Blured Background Text', cv.FONT_HERSHEY_COMPLEX, 0.8, (60, 140),2, YELLOW, (71,71), 13, 13)
img=textWithBackground(img, 'Colored Background Texts', cv.FONT_HERSHEY_SIMPLEX, 0.8, (60,80), textThickness=2, bgColor=GREEN, textColor=BLACK, bgOpacity=0.7, pad_x=6, pad_y=6)
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# cv.imwrite('color_image.png', img)
counter +=1
cv.imshow('img', img)
cv.imwrite(f'image/image_{counter}.png', img)
if cv.waitKey(1) ==ord('q'):
break
if __name__ == "__main__":
main()
| [
"cv2.putText",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.getTextSize",
"cv2.blur",
"cv2.addWeighted",
"cv2.fillPoly",
"cv2.VideoCapture",
"numpy.array",
"cv2.rectangle",
"cv2.imshow"
] | [((1801, 1853), 'cv2.getTextSize', 'cv.getTextSize', (['text', 'font', 'fontScale', 'textThickness'], {}), '(text, font, fontScale, textThickness)\n', (1815, 1853), True, 'import cv2 as cv\n'), ((1945, 2043), 'cv2.rectangle', 'cv.rectangle', (['overlay', '(x - pad_x, y + pad_y)', '(x + t_w + pad_x, y - t_h - pad_y)', 'bgColor', '(-1)'], {}), '(overlay, (x - pad_x, y + pad_y), (x + t_w + pad_x, y - t_h -\n pad_y), bgColor, -1)\n', (1957, 2043), True, 'import cv2 as cv\n'), ((2060, 2117), 'cv2.addWeighted', 'cv.addWeighted', (['overlay', 'bgOpacity', 'img', '(1 - bgOpacity)', '(0)'], {}), '(overlay, bgOpacity, img, 1 - bgOpacity, 0)\n', (2074, 2117), True, 'import cv2 as cv\n'), ((2163, 2240), 'cv2.putText', 'cv.putText', (['new_img', 'text', 'textPos', 'font', 'fontScale', 'textColor', 'textThickness'], {}), '(new_img, text, textPos, font, fontScale, textColor, textThickness)\n', (2173, 2240), True, 'import cv2 as cv\n'), ((3428, 3480), 'cv2.getTextSize', 'cv.getTextSize', (['text', 'font', 'fontScale', 'textThickness'], {}), '(text, font, fontScale, textThickness)\n', (3442, 3480), True, 'import cv2 as cv\n'), ((3663, 3688), 'cv2.blur', 'cv.blur', (['blur_roi', 'kneral'], {}), '(blur_roi, kneral)\n', (3670, 3688), True, 'import cv2 as cv\n'), ((3733, 3806), 'cv2.putText', 'cv.putText', (['img', 'text', 'textPos', 'font', 'fontScale', 'textColor', 'textThickness'], {}), '(img, text, textPos, font, fontScale, textColor, textThickness)\n', (3743, 3806), True, 'import cv2 as cv\n'), ((4272, 4304), 'numpy.array', 'np.array', (['points'], {'dtype': 'np.int32'}), '(points, dtype=np.int32)\n', (4280, 4304), True, 'import numpy as np\n'), ((4354, 4401), 'cv2.fillPoly', 'cv.fillPoly', (['overlay', '[list_to_np_array]', 'color'], {}), '(overlay, [list_to_np_array], color)\n', (4365, 4401), True, 'import cv2 as cv\n'), ((4416, 4469), 'cv2.addWeighted', 'cv.addWeighted', (['overlay', 'opacity', 'img', '(1 - opacity)', '(0)'], {}), '(overlay, opacity, img, 1 - opacity, 0)\n', (4430, 4469), True, 'import cv2 as cv\n'), ((5095, 5144), 'cv2.rectangle', 'cv.rectangle', (['overlay', 'pt1', 'pt2', 'color', 'thickness'], {}), '(overlay, pt1, pt2, color, thickness)\n', (5107, 5144), True, 'import cv2 as cv\n'), ((5159, 5212), 'cv2.addWeighted', 'cv.addWeighted', (['overlay', 'opacity', 'img', '(1 - opacity)', '(0)'], {}), '(overlay, opacity, img, 1 - opacity, 0)\n', (5173, 5212), True, 'import cv2 as cv\n'), ((5311, 5338), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""Girl.mp4"""'], {}), "('Girl.mp4')\n", (5326, 5338), True, 'import cv2 as cv\n'), ((609, 684), 'cv2.rectangle', 'cv.rectangle', (['img', '(x - 6, y - 5)', '(x + w + 5, y + h + 5)', '(10, 50, 10)', '(-1)'], {}), '(img, (x - 6, y - 5), (x + w + 5, y + h + 5), (10, 50, 10), -1)\n', (621, 684), True, 'import cv2 as cv\n'), ((682, 734), 'cv2.rectangle', 'cv.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color', '(-1)'], {}), '(img, (x, y), (x + w, y + h), color, -1)\n', (694, 734), True, 'import cv2 as cv\n'), ((6077, 6112), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (6088, 6112), True, 'import cv2 as cv\n'), ((6186, 6207), 'cv2.imshow', 'cv.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (6195, 6207), True, 'import cv2 as cv\n'), ((6216, 6261), 'cv2.imwrite', 'cv.imwrite', (['f"""image/image_{counter}.png"""', 'img'], {}), "(f'image/image_{counter}.png', img)\n", (6226, 6261), True, 'import cv2 as cv\n'), ((6273, 6286), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (6283, 6286), True, 'import cv2 as cv\n')] |
from heritageconnector.disambiguation import retrieve, pipelines
from heritageconnector.namespace import OWL, SKOS
import numpy as np
from rdflib import URIRef, Literal
import pytest
def test_disambiguator_process_wbgetentities_results():
qids = ["Q2897681", "Q75931117", "Q6198902", "Q3805088"]
pids = ["P735", "P734", "P21", "P569", "P570", "P106", "P31"]
d = pipelines.Disambiguator(table_name="PERSON")
wikidata_results = retrieve.get_wikidata_fields(pids, qids)
results_processed = d._process_wikidata_results(wikidata_results)
dates_processed = (
results_processed["P569"].tolist() + results_processed["P570"].tolist()
)
# years can be converted to int
assert all([(str(int(val)) == str(val)) for val in dates_processed if val != ""])
# @pytest.mark.skip(reason="relies on local fuseki instance running")
def test_disambiguator_get_unique_predicates():
predicates_ignore = [OWL.sameAs, SKOS.hasTopConcept]
d = pipelines.Disambiguator(table_name="PERSON")
res = d._get_predicates(predicates_ignore=predicates_ignore)
assert len(res) > 0
assert len(set(res)) == len(res) # unique values
assert (
len(set(res).intersection(set(predicates_ignore))) == 0
) # none of the predicates to ignore in the list
# @pytest.mark.skip(reason="relies on local fuseki instance running")
def test_disambiguator_get_triples():
d = pipelines.Disambiguator(table_name="PERSON")
res = d._get_triples_from_store(
(
None,
URIRef("http://www.w3.org/2004/02/skos/core#hasTopConcept"),
Literal("PERSON"),
)
)
res_list = [i for i in res]
assert len(res_list) > 0
def test_disambiguator_make_training_data():
d = pipelines.Disambiguator(table_name="PERSON")
X, y, X_columns, id_pair_list = d.build_training_data(
True, page_size=100, limit=200, search_limit=10
)
# array sizes
assert X.shape[0] == y.shape[0] == len(id_pair_list)
assert X.shape[1] == len(X_columns)
# values
assert (X >= 0).all()
assert (X <= 1).all()
# each column has a sum greater than zero (i.e. no column is empty)
assert all([i > 0 for i in X.sum(axis=0)])
# for different types
for idx, col in enumerate(X_columns):
print(col)
if col in ["label", "P735", "P734", "P31", "P569", "P570"]:
# text and numerical similarity are continuous, so some values won't
# exactly round to 2 decimal places
# TODO: occupations should be in the categorical list below, once they've been resolved to Wikidata entities
assert (X[:, idx].round(2) != X[:, idx]).any()
elif col in ["P21", "P106"]:
# categorical similarity is in [0,1]
assert (np.isin(X[:, idx], [0, 1])).all()
| [
"numpy.isin",
"rdflib.Literal",
"heritageconnector.disambiguation.pipelines.Disambiguator",
"rdflib.URIRef",
"heritageconnector.disambiguation.retrieve.get_wikidata_fields"
] | [((377, 421), 'heritageconnector.disambiguation.pipelines.Disambiguator', 'pipelines.Disambiguator', ([], {'table_name': '"""PERSON"""'}), "(table_name='PERSON')\n", (400, 421), False, 'from heritageconnector.disambiguation import retrieve, pipelines\n'), ((445, 485), 'heritageconnector.disambiguation.retrieve.get_wikidata_fields', 'retrieve.get_wikidata_fields', (['pids', 'qids'], {}), '(pids, qids)\n', (473, 485), False, 'from heritageconnector.disambiguation import retrieve, pipelines\n'), ((976, 1020), 'heritageconnector.disambiguation.pipelines.Disambiguator', 'pipelines.Disambiguator', ([], {'table_name': '"""PERSON"""'}), "(table_name='PERSON')\n", (999, 1020), False, 'from heritageconnector.disambiguation import retrieve, pipelines\n'), ((1414, 1458), 'heritageconnector.disambiguation.pipelines.Disambiguator', 'pipelines.Disambiguator', ([], {'table_name': '"""PERSON"""'}), "(table_name='PERSON')\n", (1437, 1458), False, 'from heritageconnector.disambiguation import retrieve, pipelines\n'), ((1761, 1805), 'heritageconnector.disambiguation.pipelines.Disambiguator', 'pipelines.Disambiguator', ([], {'table_name': '"""PERSON"""'}), "(table_name='PERSON')\n", (1784, 1805), False, 'from heritageconnector.disambiguation import retrieve, pipelines\n'), ((1536, 1595), 'rdflib.URIRef', 'URIRef', (['"""http://www.w3.org/2004/02/skos/core#hasTopConcept"""'], {}), "('http://www.w3.org/2004/02/skos/core#hasTopConcept')\n", (1542, 1595), False, 'from rdflib import URIRef, Literal\n'), ((1609, 1626), 'rdflib.Literal', 'Literal', (['"""PERSON"""'], {}), "('PERSON')\n", (1616, 1626), False, 'from rdflib import URIRef, Literal\n'), ((2801, 2827), 'numpy.isin', 'np.isin', (['X[:, idx]', '[0, 1]'], {}), '(X[:, idx], [0, 1])\n', (2808, 2827), True, 'import numpy as np\n')] |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import numpy as np
import pytest
import tensorflow as tf
from zoo import init_nncontext
from zoo.orca.data import XShards
import zoo.orca.data.pandas
from zoo.orca.learn.tf2 import Estimator
from zoo.ray import RayContext
import ray
NUM_TRAIN_SAMPLES = 1000
NUM_TEST_SAMPLES = 400
import os
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../../resources")
def linear_dataset(a=2, size=1000):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
def create_train_datasets(config, batch_size):
import tensorflow as tf
x_train, y_train = linear_dataset(size=NUM_TRAIN_SAMPLES)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(NUM_TRAIN_SAMPLES).batch(
batch_size)
return train_dataset
def create_test_dataset(config, batch_size):
import tensorflow as tf
x_test, y_test = linear_dataset(size=NUM_TEST_SAMPLES)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(batch_size)
return test_dataset
def simple_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),
tf.keras.layers.Dense(1)])
return model
def compile_args(config):
import tensorflow as tf
if "lr" in config:
lr = config["lr"]
else:
lr = 1e-3
args = {
"optimizer": tf.keras.optimizers.SGD(lr),
"loss": "mean_squared_error",
"metrics": ["mean_squared_error"]
}
return args
def model_creator(config):
model = simple_model(config)
model.compile(**compile_args(config))
return model
def identity_model_creator(config):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(1)),
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
model.compile()
return model
def create_auto_shard_datasets(config, batch_size):
import tensorflow as tf
data_path = os.path.join(resource_path, "orca/learn/test_auto_shard/*.csv")
dataset = tf.data.Dataset.list_files(data_path)
dataset = dataset.interleave(lambda x: tf.data.TextLineDataset(x))
dataset = dataset.map(lambda x: tf.strings.to_number(x))
dataset = dataset.map(lambda x: (x, x))
dataset = dataset.batch(batch_size)
return dataset
def create_auto_shard_model(config):
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.identity(x))
])
return model
def create_auto_shard_compile_args(config):
import tensorflow as tf
def loss_func(y1, y2):
return tf.abs(y1[0] - y1[1]) + tf.abs(y2[0] - y2[1])
args = {
"optimizer": tf.keras.optimizers.SGD(lr=0.0),
"loss": loss_func,
}
return args
def auto_shard_model_creator(config):
model = create_auto_shard_model(config)
model.compile(**create_auto_shard_compile_args(config))
return model
class LRChecker(tf.keras.callbacks.Callback):
def __init__(self, *args):
super(LRChecker, self).__init__(*args)
self.warmup_lr = [0.16, 0.22, 0.28, 0.34, 0.4]
def on_epoch_end(self, epoch, logs=None):
current_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print("epoch {} current lr is {}".format(epoch, current_lr))
if epoch < 5:
assert abs(current_lr - self.warmup_lr[epoch]) < 1e-5
elif 5 <= epoch < 10:
assert abs(current_lr - 0.4) < 1e-5
elif 10 <= epoch < 15:
assert abs(current_lr - 0.04) < 1e-5
elif 15 <= epoch < 20:
assert abs(current_lr - 0.004) < 1e-5
else:
assert abs(current_lr - 0.0004) < 1e-5
class TestTFRayEstimator(TestCase):
def impl_test_fit_and_evaluate(self, backend):
import tensorflow as tf
ray_ctx = RayContext.get()
batch_size = 32
global_batch_size = batch_size * ray_ctx.num_ray_nodes
if backend == "horovod":
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=None,
backend=backend)
else:
trainer = Estimator.from_keras(model_creator=model_creator,
verbose=True,
config=None,
backend=backend,
workers_per_node=2)
# model baseline performance
start_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(start_stats)
def scheduler(epoch):
if epoch < 2:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (2 - epoch))
scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
# train for 2 epochs
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
trainer.fit(create_train_datasets, epochs=2, batch_size=global_batch_size,
steps_per_epoch=10, callbacks=[scheduler])
# model performance after training (should improve)
end_stats = trainer.evaluate(create_test_dataset, batch_size=global_batch_size,
num_steps=NUM_TEST_SAMPLES // global_batch_size)
print(end_stats)
# sanity check that training worked
dloss = end_stats["validation_loss"] - start_stats["validation_loss"]
dmse = (end_stats["validation_mean_squared_error"] -
start_stats["validation_mean_squared_error"])
print(f"dLoss: {dloss}, dMSE: {dmse}")
assert dloss < 0 and dmse < 0, "training sanity check failed. loss increased!"
def test_fit_and_evaluate_tf(self):
self.impl_test_fit_and_evaluate(backend="tf2")
def test_fit_and_evaluate_horovod(self):
self.impl_test_fit_and_evaluate(backend="horovod")
def test_auto_shard_tf(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=auto_shard_model_creator,
verbose=True,
backend="tf2", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
def test_auto_shard_horovod(self):
# file 1 contains all 0s, file 2 contains all 1s
# If shard by files, then each model will
# see the same records in the same batch.
# If shard by records, then each batch
# will have different records.
# The loss func is constructed such that
# the former case will return 0, and the latter
# case will return non-zero.
ray_ctx = RayContext.get()
trainer = Estimator.from_keras(
model_creator=create_auto_shard_model,
compile_args_creator=create_auto_shard_compile_args,
verbose=True,
backend="horovod", workers_per_node=2)
stats = trainer.fit(create_auto_shard_datasets, epochs=1, batch_size=4, steps_per_epoch=2)
assert stats["train_loss"] == 0.0
# this needs horovod >= 0.19.2
def test_horovod_learning_rate_schedule(self):
import horovod
major, minor, patch = horovod.__version__.split(".")
larger_major = int(major) > 0
larger_minor = int(major) == 0 and int(minor) > 19
larger_patch = int(major) == 0 and int(minor) == 19 and int(patch) >= 2
if larger_major or larger_minor or larger_patch:
ray_ctx = RayContext.get()
batch_size = 32
workers_per_node = 4
global_batch_size = batch_size * workers_per_node
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=simple_model,
compile_args_creator=compile_args,
verbose=True,
config=config,
backend="horovod", workers_per_node=workers_per_node)
import horovod.tensorflow.keras as hvd
callbacks = [
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, initial_lr=0.4,
verbose=True),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=10,
multiplier=1., initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=10, end_epoch=15,
multiplier=1e-1, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=15, end_epoch=20,
multiplier=1e-2, initial_lr=0.4),
hvd.callbacks.LearningRateScheduleCallback(start_epoch=20, multiplier=1e-3,
initial_lr=0.4),
LRChecker()
]
for i in range(30):
trainer.fit(create_train_datasets, epochs=1, batch_size=global_batch_size,
callbacks=callbacks)
else:
# skip tests in horovod lower version
pass
def test_sparkxshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_dataframe(self):
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_shard_size(self):
from zoo.orca import OrcaContext
OrcaContext._shard_size = 3
sc = init_nncontext()
rdd = sc.range(0, 10)
from pyspark.sql import SparkSession
spark = SparkSession(sc)
from pyspark.ml.linalg import DenseVector
df = rdd.map(lambda x: (DenseVector(np.random.randn(1,).astype(np.float)),
int(np.random.randint(0, 1, size=())))).toDF(["feature", "label"])
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(df, epochs=1, batch_size=4, steps_per_epoch=25,
feature_cols=["feature"],
label_cols=["label"])
trainer.evaluate(df, batch_size=4, num_steps=25, feature_cols=["feature"],
label_cols=["label"])
trainer.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_predict(self):
sc = init_nncontext()
rdd = sc.parallelize(range(20))
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config={},
workers_per_node=2)
result = estimator.predict(df, batch_size=4,
feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_sparkxshards_with_inbalanced_data(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100))})
def random_pad(data):
import numpy as np
import random
times = random.randint(1, 10)
data["x"] = np.concatenate([data["x"]] * times)
data["y"] = np.concatenate([data["y"]] * times)
return data
train_data_shard = train_data_shard.transform_shard(random_pad)
config = {
"lr": 0.8
}
trainer = Estimator.from_keras(
model_creator=model_creator,
verbose=True,
config=config,
workers_per_node=2)
trainer.fit(train_data_shard, epochs=1, batch_size=4, steps_per_epoch=25)
trainer.evaluate(train_data_shard, batch_size=4, num_steps=25)
def test_predict_xshards(self):
train_data_shard = XShards.partition({"x": np.random.randn(100, 1),
"y": np.random.randint(0, 1, size=(100,))})
expected = train_data_shard.collect()
expected = [shard["x"] for shard in expected]
for x in expected:
print(x.shape)
expected = np.concatenate(expected)
config = {
}
trainer = Estimator.from_keras(
model_creator=identity_model_creator,
verbose=True,
config=config,
workers_per_node=2)
result_shards = trainer.predict(train_data_shard, batch_size=10).collect()
result = [shard["prediction"] for shard in result_shards]
expected_result = [shard["x"] for shard in result_shards]
result = np.concatenate(result)
assert np.allclose(expected, result)
def test_save_and_load(self):
def model_creator(config):
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')]
)
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def train_data_creator(config, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((np.random.randn(100, 28, 28, 3),
np.random.randint(0, 10, (100, 1))))
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 320
try:
est = Estimator.from_keras(model_creator=model_creator, workers_per_node=2)
history = est.fit(train_data_creator,
epochs=1,
batch_size=batch_size,
steps_per_epoch=5)
print("start saving")
est.save("/tmp/cifar10_keras.ckpt")
est.load("/tmp/cifar10_keras.ckpt")
print("save success")
finally:
os.remove("/tmp/cifar10_keras.ckpt")
| [
"os.remove",
"tensorflow.strings.to_number",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.identity",
"zoo.init_nncontext",
"numpy.allclose",
"tensorflow.keras.optimizers.SGD",
"numpy.random.randint",
"horovod.tensorflow.keras.callbacks.LearningRateWarmupCall... | [((1081, 1101), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (1095, 1101), True, 'import numpy as np\n'), ((1348, 1402), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), '((x_train, y_train))\n', (1382, 1402), True, 'import tensorflow as tf\n'), ((1671, 1723), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), '((x_test, y_test))\n', (1705, 1723), True, 'import tensorflow as tf\n'), ((2774, 2837), 'os.path.join', 'os.path.join', (['resource_path', '"""orca/learn/test_auto_shard/*.csv"""'], {}), "(resource_path, 'orca/learn/test_auto_shard/*.csv')\n", (2786, 2837), False, 'import os\n'), ((2852, 2889), 'tensorflow.data.Dataset.list_files', 'tf.data.Dataset.list_files', (['data_path'], {}), '(data_path)\n', (2878, 2889), True, 'import tensorflow as tf\n'), ((982, 1007), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (997, 1007), False, 'import os\n'), ((2191, 2218), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['lr'], {}), '(lr)\n', (2214, 2218), True, 'import tensorflow as tf\n'), ((3513, 3544), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(0.0)'}), '(lr=0.0)\n', (3536, 3544), True, 'import tensorflow as tf\n'), ((4006, 4057), 'tensorflow.keras.backend.get_value', 'tf.keras.backend.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (4032, 4057), True, 'import tensorflow as tf\n'), ((4658, 4674), 'zoo.ray.RayContext.get', 'RayContext.get', ([], {}), '()\n', (4672, 4674), False, 'from zoo.ray import RayContext\n'), ((5780, 5842), 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['scheduler'], {'verbose': '(1)'}), '(scheduler, verbose=1)\n', (5820, 5842), True, 'import tensorflow as tf\n'), ((7445, 7461), 'zoo.ray.RayContext.get', 'RayContext.get', ([], {}), '()\n', (7459, 7461), False, 'from zoo.ray import RayContext\n'), ((7480, 7593), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'auto_shard_model_creator', 'verbose': '(True)', 'backend': '"""tf2"""', 'workers_per_node': '(2)'}), "(model_creator=auto_shard_model_creator, verbose=True,\n backend='tf2', workers_per_node=2)\n", (7500, 7593), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((8212, 8228), 'zoo.ray.RayContext.get', 'RayContext.get', ([], {}), '()\n', (8226, 8228), False, 'from zoo.ray import RayContext\n'), ((8247, 8420), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'create_auto_shard_model', 'compile_args_creator': 'create_auto_shard_compile_args', 'verbose': '(True)', 'backend': '"""horovod"""', 'workers_per_node': '(2)'}), "(model_creator=create_auto_shard_model,\n compile_args_creator=create_auto_shard_compile_args, verbose=True,\n backend='horovod', workers_per_node=2)\n", (8267, 8420), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((8743, 8773), 'horovod.__version__.split', 'horovod.__version__.split', (['"""."""'], {}), "('.')\n", (8768, 8773), False, 'import horovod\n'), ((11020, 11123), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'verbose': '(True)', 'config': 'config', 'workers_per_node': '(2)'}), '(model_creator=model_creator, verbose=True, config=\n config, workers_per_node=2)\n', (11040, 11123), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((11367, 11383), 'zoo.init_nncontext', 'init_nncontext', ([], {}), '()\n', (11381, 11383), False, 'from zoo import init_nncontext\n'), ((11475, 11491), 'pyspark.sql.SparkSession', 'SparkSession', (['sc'], {}), '(sc)\n', (11487, 11491), False, 'from pyspark.sql import SparkSession\n'), ((11794, 11897), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'verbose': '(True)', 'config': 'config', 'workers_per_node': '(2)'}), '(model_creator=model_creator, verbose=True, config=\n config, workers_per_node=2)\n', (11814, 11897), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((12425, 12441), 'zoo.init_nncontext', 'init_nncontext', ([], {}), '()\n', (12439, 12441), False, 'from zoo import init_nncontext\n'), ((12533, 12549), 'pyspark.sql.SparkSession', 'SparkSession', (['sc'], {}), '(sc)\n', (12545, 12549), False, 'from pyspark.sql import SparkSession\n'), ((12852, 12955), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'verbose': '(True)', 'config': 'config', 'workers_per_node': '(2)'}), '(model_creator=model_creator, verbose=True, config=\n config, workers_per_node=2)\n', (12872, 12955), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((13403, 13419), 'zoo.init_nncontext', 'init_nncontext', ([], {}), '()\n', (13417, 13419), False, 'from zoo import init_nncontext\n'), ((13652, 13759), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'identity_model_creator', 'verbose': '(True)', 'config': '{}', 'workers_per_node': '(2)'}), '(model_creator=identity_model_creator, verbose=True,\n config={}, workers_per_node=2)\n', (13672, 13759), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((14694, 14797), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'verbose': '(True)', 'config': 'config', 'workers_per_node': '(2)'}), '(model_creator=model_creator, verbose=True, config=\n config, workers_per_node=2)\n', (14714, 14797), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((15375, 15399), 'numpy.concatenate', 'np.concatenate', (['expected'], {}), '(expected)\n', (15389, 15399), True, 'import numpy as np\n'), ((15448, 15559), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'identity_model_creator', 'verbose': '(True)', 'config': 'config', 'workers_per_node': '(2)'}), '(model_creator=identity_model_creator, verbose=True,\n config=config, workers_per_node=2)\n', (15468, 15559), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((15840, 15862), 'numpy.concatenate', 'np.concatenate', (['result'], {}), '(result)\n', (15854, 15862), True, 'import numpy as np\n'), ((15879, 15908), 'numpy.allclose', 'np.allclose', (['expected', 'result'], {}), '(expected, result)\n', (15890, 15908), True, 'import numpy as np\n'), ((1895, 1938), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'input_shape': '(1,)'}), '(10, input_shape=(1,))\n', (1916, 1938), True, 'import tensorflow as tf\n'), ((1980, 2004), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (2001, 2004), True, 'import tensorflow as tf\n'), ((2530, 2571), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(1)'}), '(input_shape=1)\n', (2556, 2571), True, 'import tensorflow as tf\n'), ((2933, 2959), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['x'], {}), '(x)\n', (2956, 2959), True, 'import tensorflow as tf\n'), ((2997, 3020), 'tensorflow.strings.to_number', 'tf.strings.to_number', (['x'], {}), '(x)\n', (3017, 3020), True, 'import tensorflow as tf\n'), ((3432, 3453), 'tensorflow.abs', 'tf.abs', (['(y1[0] - y1[1])'], {}), '(y1[0] - y1[1])\n', (3438, 3453), True, 'import tensorflow as tf\n'), ((3456, 3477), 'tensorflow.abs', 'tf.abs', (['(y2[0] - y2[1])'], {}), '(y2[0] - y2[1])\n', (3462, 3477), True, 'import tensorflow as tf\n'), ((4818, 4950), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'simple_model', 'compile_args_creator': 'compile_args', 'verbose': '(True)', 'config': 'None', 'backend': 'backend'}), '(model_creator=simple_model, compile_args_creator=\n compile_args, verbose=True, config=None, backend=backend)\n', (4838, 4950), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((5064, 5181), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'verbose': '(True)', 'config': 'None', 'backend': 'backend', 'workers_per_node': '(2)'}), '(model_creator=model_creator, verbose=True, config=None,\n backend=backend, workers_per_node=2)\n', (5084, 5181), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((9032, 9048), 'zoo.ray.RayContext.get', 'RayContext.get', ([], {}), '()\n', (9046, 9048), False, 'from zoo.ray import RayContext\n'), ((9257, 9432), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'simple_model', 'compile_args_creator': 'compile_args', 'verbose': '(True)', 'config': 'config', 'backend': '"""horovod"""', 'workers_per_node': 'workers_per_node'}), "(model_creator=simple_model, compile_args_creator=\n compile_args, verbose=True, config=config, backend='horovod',\n workers_per_node=workers_per_node)\n", (9277, 9432), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((14385, 14406), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (14399, 14406), False, 'import random\n'), ((14431, 14466), 'numpy.concatenate', 'np.concatenate', (["([data['x']] * times)"], {}), "([data['x']] * times)\n", (14445, 14466), True, 'import numpy as np\n'), ((14491, 14526), 'numpy.concatenate', 'np.concatenate', (["([data['y']] * times)"], {}), "([data['y']] * times)\n", (14505, 14526), True, 'import numpy as np\n'), ((17397, 17466), 'zoo.orca.learn.tf2.Estimator.from_keras', 'Estimator.from_keras', ([], {'model_creator': 'model_creator', 'workers_per_node': '(2)'}), '(model_creator=model_creator, workers_per_node=2)\n', (17417, 17466), False, 'from zoo.orca.learn.tf2 import Estimator\n'), ((17853, 17889), 'os.remove', 'os.remove', (['"""/tmp/cifar10_keras.ckpt"""'], {}), "('/tmp/cifar10_keras.ckpt')\n", (17862, 17889), False, 'import os\n'), ((9598, 9689), 'horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback', 'hvd.callbacks.LearningRateWarmupCallback', ([], {'warmup_epochs': '(5)', 'initial_lr': '(0.4)', 'verbose': '(True)'}), '(warmup_epochs=5, initial_lr=0.4,\n verbose=True)\n', (9638, 9689), True, 'import horovod.tensorflow.keras as hvd\n'), ((9760, 9867), 'horovod.tensorflow.keras.callbacks.LearningRateScheduleCallback', 'hvd.callbacks.LearningRateScheduleCallback', ([], {'start_epoch': '(5)', 'end_epoch': '(10)', 'multiplier': '(1.0)', 'initial_lr': '(0.4)'}), '(start_epoch=5, end_epoch=10,\n multiplier=1.0, initial_lr=0.4)\n', (9802, 9867), True, 'import horovod.tensorflow.keras as hvd\n'), ((9939, 10047), 'horovod.tensorflow.keras.callbacks.LearningRateScheduleCallback', 'hvd.callbacks.LearningRateScheduleCallback', ([], {'start_epoch': '(10)', 'end_epoch': '(15)', 'multiplier': '(0.1)', 'initial_lr': '(0.4)'}), '(start_epoch=10, end_epoch=15,\n multiplier=0.1, initial_lr=0.4)\n', (9981, 10047), True, 'import horovod.tensorflow.keras as hvd\n'), ((10121, 10230), 'horovod.tensorflow.keras.callbacks.LearningRateScheduleCallback', 'hvd.callbacks.LearningRateScheduleCallback', ([], {'start_epoch': '(15)', 'end_epoch': '(20)', 'multiplier': '(0.01)', 'initial_lr': '(0.4)'}), '(start_epoch=15, end_epoch=20,\n multiplier=0.01, initial_lr=0.4)\n', (10163, 10230), True, 'import horovod.tensorflow.keras as hvd\n'), ((10303, 10399), 'horovod.tensorflow.keras.callbacks.LearningRateScheduleCallback', 'hvd.callbacks.LearningRateScheduleCallback', ([], {'start_epoch': '(20)', 'multiplier': '(0.001)', 'initial_lr': '(0.4)'}), '(start_epoch=20, multiplier=0.001,\n initial_lr=0.4)\n', (10345, 10399), True, 'import horovod.tensorflow.keras as hvd\n'), ((10836, 10859), 'numpy.random.randn', 'np.random.randn', (['(100)', '(1)'], {}), '(100, 1)\n', (10851, 10859), True, 'import numpy as np\n'), ((10912, 10945), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '(100)'}), '(0, 1, size=100)\n', (10929, 10945), True, 'import numpy as np\n'), ((14163, 14186), 'numpy.random.randn', 'np.random.randn', (['(100)', '(1)'], {}), '(100, 1)\n', (14178, 14186), True, 'import numpy as np\n'), ((14239, 14272), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '(100)'}), '(0, 1, size=100)\n', (14256, 14272), True, 'import numpy as np\n'), ((15084, 15107), 'numpy.random.randn', 'np.random.randn', (['(100)', '(1)'], {}), '(100, 1)\n', (15099, 15107), True, 'import numpy as np\n'), ((15160, 15196), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '(100,)'}), '(0, 1, size=(100,))\n', (15177, 15196), True, 'import numpy as np\n'), ((2616, 2630), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (2627, 2630), True, 'import tensorflow as tf\n'), ((3274, 3288), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (3285, 3288), True, 'import tensorflow as tf\n'), ((5728, 5758), 'tensorflow.math.exp', 'tf.math.exp', (['(0.1 * (2 - epoch))'], {}), '(0.1 * (2 - epoch))\n', (5739, 5758), True, 'import tensorflow as tf\n'), ((16073, 16176), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, kernel_size=(3, 3), strides=(1, 1), activation=\n 'relu', padding='valid')\n", (16095, 16176), True, 'import tensorflow as tf\n'), ((16228, 16264), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (16262, 16264), True, 'import tensorflow as tf\n'), ((16282, 16361), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid')\n", (16310, 16361), True, 'import tensorflow as tf\n'), ((16379, 16482), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, kernel_size=(3, 3), strides=(1, 1), activation=\n 'relu', padding='valid')\n", (16401, 16482), True, 'import tensorflow as tf\n'), ((16534, 16613), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid')\n", (16562, 16613), True, 'import tensorflow as tf\n'), ((16631, 16656), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (16654, 16656), True, 'import tensorflow as tf\n'), ((16674, 16721), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (16695, 16721), True, 'import tensorflow as tf\n'), ((16773, 16802), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {}), '()\n', (16800, 16802), True, 'import tensorflow as tf\n'), ((17054, 17085), 'numpy.random.randn', 'np.random.randn', (['(100)', '(28)', '(28)', '(3)'], {}), '(100, 28, 28, 3)\n', (17069, 17085), True, 'import numpy as np\n'), ((17145, 17179), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(100, 1)'], {}), '(0, 10, (100, 1))\n', (17162, 17179), True, 'import numpy as np\n'), ((11661, 11693), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '()'}), '(0, 1, size=())\n', (11678, 11693), True, 'import numpy as np\n'), ((12719, 12751), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '()'}), '(0, 1, size=())\n', (12736, 12751), True, 'import numpy as np\n'), ((13545, 13577), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '()'}), '(0, 2, size=())\n', (13562, 13577), True, 'import numpy as np\n'), ((11586, 11604), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (11601, 11604), True, 'import numpy as np\n'), ((12644, 12662), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (12659, 12662), True, 'import numpy as np\n')] |
# Author: <NAME>
# Vision for Robotics Group, Automation and Control Institute (ACIN)
# TU Wien, Vienna
import numpy as np
import os
basepath = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(os.path.join(basepath, "cpp", "build"))
import icp
def refine(pose, cloud_observed, cloud_model, p_distance=0.1, iterations=1):
obj_T = pose.copy()
cloud_estimated = np.dot(cloud_model, obj_T[:3, :3].T) + obj_T[:3, 3].T
if cloud_estimated.shape[0] == 0 or cloud_observed.shape[0] == 0:
return obj_T
T = icp.icp(cloud_observed, cloud_estimated, iterations, p_distance)
obj_T = T @ obj_T
return obj_T
| [
"icp.icp",
"numpy.dot",
"os.path.abspath",
"os.path.join"
] | [((161, 186), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'import os\n'), ((215, 253), 'os.path.join', 'os.path.join', (['basepath', '"""cpp"""', '"""build"""'], {}), "(basepath, 'cpp', 'build')\n", (227, 253), False, 'import os\n'), ((547, 611), 'icp.icp', 'icp.icp', (['cloud_observed', 'cloud_estimated', 'iterations', 'p_distance'], {}), '(cloud_observed, cloud_estimated, iterations, p_distance)\n', (554, 611), False, 'import icp\n'), ((392, 428), 'numpy.dot', 'np.dot', (['cloud_model', 'obj_T[:3, :3].T'], {}), '(cloud_model, obj_T[:3, :3].T)\n', (398, 428), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image as im
print()
Mmax = -5000
Mmin = 0
F = open("KAU_Height.txt", 'r')
F = F.readlines()
xlen = int(F[-1].split(",")[0]) + 1
ylen = int(F[-1].split(",")[1].split(":")[0]) + 1
arr = np.zeros((ylen, xlen))
# print(arr.shape)
for line in range(len(F)):
s = F[line].split(",")
x = int(s[0])
c = s[1].split(":")
y = int(c[0])
v = float(c[1])
# print(x,y)
arr[y][x] = v
if v > Mmax: Mmax = v
if v > -5000 and v < Mmin: Mmin = v
L_range = Mmax - Mmin
print("MAX = ", Mmax, "MIN = ", Mmin, "Range : ", L_range)
# norm = np.linalg.norm(arr)
# n_arr = arr / norm * 255 + 100
m_arr = (arr-Mmin)/L_range*255
# print(n_arr)
data = im.fromarray(m_arr)
data = data.convert("L")
data.save('KAU_Hmap.png')
# for x in range(xlen):
# for y in range(ylen):
# arr[x][y] = F[][]
# arr = np.zeros((ylen, xlen))
# arr = np.zeros((6, 4))
# arr[1][1:3] = 5
# print(arr) | [
"PIL.Image.fromarray",
"numpy.zeros"
] | [((226, 248), 'numpy.zeros', 'np.zeros', (['(ylen, xlen)'], {}), '((ylen, xlen))\n', (234, 248), True, 'import numpy as np\n'), ((717, 736), 'PIL.Image.fromarray', 'im.fromarray', (['m_arr'], {}), '(m_arr)\n', (729, 736), True, 'from PIL import Image as im\n')] |
"""
Tets the :mod:`fatf.utils.data.occlusion` module.
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
import pytest
import numpy as np
from fatf.exceptions import IncorrectShapeError
import fatf
try:
import fatf.utils.data.segmentation
except ImportError: # pragma: no cover
pytest.skip(
'Skipping tests of image occlusion -- scikit-image or Pillow is '
'not installed (and is required for the segmentation dependency).',
allow_module_level=True)
import fatf.utils.data.occlusion as fudo
ONES = np.ones(shape=(2, 2), dtype=int)
SEGMENTS = np.ones(shape=(2, 2), dtype=int)
SEGMENTS[1, 1] = 2
ARRAY_IMAGE_BNW1 = np.array([[0, 1], [1, 0]])
ARRAY_IMAGE_2D = np.array([[0, 255], [1, 2]])
ARRAY_IMAGE_3D = np.ones(shape=(2, 2, 3), dtype=np.uint8)
ARRAY_STRUCT = np.array([(-.1, 1)], dtype=[('a', np.float64), ('b', np.int8)])
class TestOcclusion(object):
"""Tests the :class:`fatf.utils.data.occlusion.Occlusion` class."""
def test_occlusion_class_init(self, caplog):
"""
Tests :class:`fatf.utils.data.occlusion.Occlusion` class init.
"""
log_1 = 'Assuming a black-and-white image.'
log_2 = 'Rescale 0/1 black-and-white image to 0/255.'
assert len(caplog.records) == 0
err = ('Black-and-white images must use 0 as '
'black and 1 or 255 as white.')
with pytest.raises(RuntimeError) as exin:
fudo.Occlusion(
np.array([[2, 255], [255, 2]], dtype=int),
np.ones(shape=(2, 2), dtype=int))
assert str(exin.value) == err
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[0].getMessage() == log_1
assert len(caplog.records) == 1
with pytest.raises(RuntimeError) as exin:
fudo.Occlusion(
np.array([[2, 1], [1, 2]], dtype=int),
np.ones(shape=(2, 2), dtype=int))
assert str(exin.value) == err
assert len(caplog.records) == 3
assert caplog.records[1].levelname == 'INFO'
assert caplog.records[1].getMessage() == log_1
assert caplog.records[2].levelname == 'INFO'
assert caplog.records[2].getMessage() == log_2
# Colour image
wrn_msg = 'The segmentation has only **one** segment.'
with pytest.warns(UserWarning) as warning:
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, ONES)
assert len(warning) == 1
assert str(warning[0].message) == wrn_msg
#
assert np.array_equal(occlusion.image, ARRAY_IMAGE_3D)
assert np.array_equal(occlusion.segments, ONES)
assert occlusion.is_rgb
assert not occlusion.is_bnw
assert np.array_equal(occlusion.unique_segments, [1])
assert occlusion.segments_number == 1
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('mean')(ONES))
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('mean')(ONES))
# Grayscale image
occlusion = fudo.Occlusion(ARRAY_IMAGE_2D, SEGMENTS, 'white')
assert np.array_equal(occlusion.image, ARRAY_IMAGE_2D)
assert np.array_equal(occlusion.segments, SEGMENTS)
assert not occlusion.is_rgb
assert not occlusion.is_bnw
assert np.array_equal(occlusion.unique_segments, [1, 2])
assert occlusion.segments_number == 2
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('white')(ONES))
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('white')(ONES))
# Black-and-white image
assert len(caplog.records) == 3
occlusion = fudo.Occlusion(ARRAY_IMAGE_BNW1, SEGMENTS)
assert len(caplog.records) == 5
assert caplog.records[3].levelname == 'INFO'
assert caplog.records[3].getMessage() == log_1
assert caplog.records[4].levelname == 'INFO'
assert caplog.records[4].getMessage() == log_2
#
assert np.array_equal(occlusion.image,
np.array([[0, 255], [255, 0]], dtype=np.uint8))
assert np.array_equal(occlusion.segments, SEGMENTS)
assert not occlusion.is_rgb
assert occlusion.is_bnw
assert np.array_equal(occlusion.unique_segments, [1, 2])
assert occlusion.segments_number == 2
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('black')(ONES))
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('black')(ONES))
def test_colouring_strategy(self):
"""
Tests ``colouring_strategy`` getters and setter for the
:class:`fatf.utils.data.occlusion.Occlusion` class.
"""
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
assert occlusion._colouring_strategy == occlusion.colouring_strategy
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('mean')(ONES))
occlusion.colouring_strategy = 'black'
assert occlusion._colouring_strategy == occlusion.colouring_strategy
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('black')(ONES))
occlusion.set_colouring_strategy('white')
assert occlusion._colouring_strategy == occlusion.colouring_strategy
assert np.array_equal(
occlusion.colouring_strategy(ONES),
occlusion._generate_colouring_strategy('white')(ONES))
def test_randomise_patch(self):
"""
Tests :func:`fatf.utils.data.occlusion.Occlusion._randomise_patch`.
"""
fatf.setup_random_seed()
mask_ = np.array([[1, 0], [0, 1]], dtype=bool)
# Colour
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
assert np.array_equal(
occlusion._randomise_patch(mask_),
np.array([[125, 114, 71], [52, 44, 216]], dtype=np.uint8))
# ..check the default
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('mean')(ONES))
# Grayscale
occlusion = fudo.Occlusion(ARRAY_IMAGE_2D, SEGMENTS)
assert np.array_equal(
occlusion._randomise_patch(mask_),
np.array([119, 13], dtype=np.uint8))
# ..check the default
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('mean')(ONES))
# Black-and-white
occlusion = fudo.Occlusion(
np.array([[0, 255], [255, 0]], dtype=np.uint8), SEGMENTS)
assert np.array_equal(
occlusion._randomise_patch(mask_),
np.array([0, 255], dtype=np.uint8))
# ..check the default
assert np.array_equal(
occlusion._colouring_strategy(ONES),
occlusion._generate_colouring_strategy('black')(ONES))
def test_generate_colouring_strategy(self):
"""
Tests :func:`fatf.utils.data.occlusion.Occlusion.\
_generate_colouring_strategy`.
"""
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
# Errors
msg = ('The colour can either be a string specifier; or '
'an RGB thriplet for RGB images and an integer '
'for or grayscale and black-and-white images.')
with pytest.raises(TypeError) as exin:
occlusion._generate_colouring_strategy(['list'])
assert str(exin.value) == msg
# int for colour
with pytest.raises(TypeError) as exin:
occlusion._generate_colouring_strategy(33)
assert str(exin.value) == msg
# tuple for grayscale/black-and-white
occlusion = fudo.Occlusion(ARRAY_IMAGE_2D, SEGMENTS)
with pytest.raises(TypeError) as exin:
occlusion._generate_colouring_strategy((4, 2, 0))
assert str(exin.value) == msg
with pytest.raises(TypeError) as exin:
occlusion._generate_colouring_strategy(2.0)
assert str(exin.value) == msg
# Colour
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
# string
msg = ('Unknown colouring strategy name: colour.\n'
"Choose one of the following: ['black', 'blue', 'green', "
"'mean', 'pink', 'random', 'random-patch', 'randomise', "
"'randomise-patch', 'red', 'white'].")
with pytest.raises(ValueError) as exin:
occlusion._generate_colouring_strategy('colour')
assert str(exin.value) == msg
# functional -- mean
clr = occlusion._generate_colouring_strategy(None)(ONES)
assert np.array_equal(clr, np.ones(shape=(2, 2, 2, 3), dtype=np.uint8))
clr = occlusion._generate_colouring_strategy('mean')(ONES)
assert np.array_equal(clr, np.ones(shape=(2, 2, 2, 3), dtype=np.uint8))
one_ = np.zeros(shape=(2, 2), dtype=bool)
one_[1, 1] = True
fatf.setup_random_seed()
# functional -- random
clr = occlusion._generate_colouring_strategy('random')(ONES)
assert np.array_equal(clr, (57, 12, 140))
# functional -- random-patch
clr = occlusion._generate_colouring_strategy('random-patch')(one_)
assert np.array_equal(clr, np.array([[16, 15, 47]], dtype=np.uint8))
# functional -- randomise
clr = occlusion._generate_colouring_strategy('randomise')(one_)
assert np.array_equal(clr, (101, 214, 112))
# functional -- randomise-patch
clr = occlusion._generate_colouring_strategy('randomise-patch')(one_)
assert np.array_equal(clr, np.array([[81, 216, 174]], dtype=np.uint8))
# functional -- black
clr = occlusion._generate_colouring_strategy('black')(one_)
assert np.array_equal(clr, (0, 0, 0))
# functional -- white
clr = occlusion._generate_colouring_strategy('white')(one_)
assert np.array_equal(clr, (255, 255, 255))
# functional -- red
clr = occlusion._generate_colouring_strategy('red')(one_)
assert np.array_equal(clr, (255, 0, 0))
# functional -- green
clr = occlusion._generate_colouring_strategy('green')(one_)
assert np.array_equal(clr, (0, 255, 0))
# functional -- blue
clr = occlusion._generate_colouring_strategy('blue')(one_)
assert np.array_equal(clr, (0, 0, 255))
# functional -- pink
clr = occlusion._generate_colouring_strategy('pink')(one_)
assert np.array_equal(clr, (255, 192, 203))
# tuple
clr = occlusion._generate_colouring_strategy((42, 24, 242))(one_)
assert np.array_equal(clr, (42, 24, 242))
# Grayscale
occlusion = fudo.Occlusion(ARRAY_IMAGE_2D, SEGMENTS)
# int
msg = ('Unknown colouring strategy name: colour.\n'
"Choose one of the following: ['black', 'mean', 'random', "
"'random-patch', 'randomise', 'randomise-patch', 'white'].")
with pytest.raises(ValueError) as exin:
occlusion._generate_colouring_strategy('colour')
assert str(exin.value) == msg
msg = ('The colour should be an integer between '
'0 and 255 for grayscale images.')
with pytest.raises(ValueError) as exin:
occlusion._generate_colouring_strategy(-1)
assert str(exin.value) == msg
with pytest.raises(ValueError) as exin:
occlusion._generate_colouring_strategy(256)
assert str(exin.value) == msg
clr = occlusion._generate_colouring_strategy(42)(one_)
assert clr == 42
# string
clr = occlusion._generate_colouring_strategy(None)(ONES)
assert np.array_equal(
clr,
np.array([[[85, 2], [85, 2]], [[85, 2], [85, 2]]], dtype=np.uint8))
clr = occlusion._generate_colouring_strategy('mean')(ONES)
assert np.array_equal(
clr,
np.array([[[85, 2], [85, 2]], [[85, 2], [85, 2]]], dtype=np.uint8))
fatf.setup_random_seed()
# functional -- random
clr = occlusion._generate_colouring_strategy('random')(ONES)
assert clr == 57
# functional -- random-patch
clr = occlusion._generate_colouring_strategy('random-patch')(one_)
assert np.array_equal(clr, np.array([125], dtype=np.uint8))
# functional -- randomise
clr = occlusion._generate_colouring_strategy('randomise')(one_)
assert clr == 71
# functional -- randomise-patch
clr = occlusion._generate_colouring_strategy('randomise-patch')(one_)
assert np.array_equal(clr, np.array([44], dtype=np.uint8))
# functional -- black
clr = occlusion._generate_colouring_strategy('black')(one_)
assert clr == 0
# functional -- white
clr = occlusion._generate_colouring_strategy('white')(one_)
assert clr == 255
# Black-and-white
occlusion = fudo.Occlusion(
np.array([[0, 255], [0, 255]], dtype=np.uint8), SEGMENTS)
# int
msg = ('The colour should be 0 for black, or 1 or 255 for '
'white for black-and-white images.')
with pytest.raises(ValueError) as exin:
occlusion._generate_colouring_strategy(42)
assert str(exin.value) == msg
clr = occlusion._generate_colouring_strategy(0)(one_)
assert clr == 0
clr = occlusion._generate_colouring_strategy(1)(one_)
assert clr == 255
clr = occlusion._generate_colouring_strategy(255)(one_)
assert clr == 255
# string
msg = 'Mean occlusion is not supported for black-and-white images.'
with pytest.raises(RuntimeError) as exin:
occlusion._generate_colouring_strategy(None)
assert str(exin.value) == msg
with pytest.raises(RuntimeError) as exin:
occlusion._generate_colouring_strategy('mean')
assert str(exin.value) == msg
fatf.setup_random_seed()
# functional -- random
clr = occlusion._generate_colouring_strategy('random')(ONES)
assert clr == 0
# functional -- random-patch
clr = occlusion._generate_colouring_strategy('random-patch')(one_)
assert np.array_equal(clr, np.array([0], dtype=np.uint8))
# functional -- randomise
clr = occlusion._generate_colouring_strategy('randomise')(one_)
assert clr == 0
# functional -- randomise-patch
clr = occlusion._generate_colouring_strategy('randomise-patch')(one_)
assert np.array_equal(clr, np.array([0], dtype=np.uint8))
# functional -- black
clr = occlusion._generate_colouring_strategy('black')(one_)
assert clr == 0
# functional -- white
clr = occlusion._generate_colouring_strategy('white')(one_)
assert clr == 255
def test_occlude_segments(self):
"""
Tests :func:`fatf.utils.data.occlusion.Occlusion.occlude_segments`.
"""
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
msg = ('Segments subset must be either '
'an integer or a list of integers.')
with pytest.raises(TypeError) as exin:
occlusion.occlude_segments('list')
assert str(exin.value) == msg
msg = ('The segment id 0 does not correspond to any of '
'the known segments ([1, 2]).')
with pytest.raises(ValueError) as exin:
occlusion.occlude_segments(segments_subset=0)
assert str(exin.value) == msg
msg = 'The list of segments has duplicates.'
with pytest.raises(ValueError) as exin:
occlusion.occlude_segments(segments_subset=[1, 2, 1])
assert str(exin.value) == msg
msg = 'The segment id 1 is not an integer.'
with pytest.raises(TypeError) as exin:
occlusion.occlude_segments(segments_subset=[1, 2, '1'])
assert str(exin.value) == msg
msg = ('The segment id 4 does not correspond to any of '
'the known segments ([1, 2]).')
with pytest.raises(ValueError) as exin:
occlusion.occlude_segments(segments_subset=[2, 4, 1])
assert str(exin.value) == msg
msg = ('The width, height or number of channels of the input '
'image does not agree with the same parameters of the '
'original image.')
with pytest.raises(IncorrectShapeError) as exin:
occlusion.occlude_segments([],
image=np.ones(shape=(4, 4), dtype=int))
assert str(exin.value) == msg
# No image
ocl = occlusion.occlude_segments(segments_subset=[])
assert np.array_equal(ocl, ARRAY_IMAGE_3D)
# External image with external colour
img_ = np.ones(shape=(2, 2, 3), dtype=np.uint8)
img_[0, 0, 0] = 42
img_[1, 1, 1] = 42
img_[0, 1, 2] = 42
ocl_ = np.zeros(shape=(2, 2, 3), dtype=np.uint8)
ocl_[1, 1] = (1, 42, 1)
ocl = occlusion.occlude_segments([1], image=img_, colour='black')
assert np.array_equal(ocl, ocl_)
ocl = occlusion.occlude_segments(1, image=img_, colour='black')
assert np.array_equal(ocl, ocl_)
def test_occlude_segments_vectorised(self):
"""
Tests :func:`fatf.utils.data.occlusion.Occlusion.\
occlude_segments_vectorised`.
"""
occlusion = fudo.Occlusion(ARRAY_IMAGE_3D, SEGMENTS)
msg = ('The width, height or number of channels of the input '
'image does not agree with the same parameters of the '
'original image.')
with pytest.raises(IncorrectShapeError) as exin:
occlusion.occlude_segments_vectorised(
None, image=np.ones(shape=(4, 4), dtype=int))
assert str(exin.value) == msg
err = ('The vector representation of segments should be a 1- or '
'2-dimensional numpy array.')
with pytest.raises(IncorrectShapeError) as exin:
occlusion.occlude_segments_vectorised(np.array([[[1, 2, 3]]]))
assert str(exin.value) == err
err = ('The vector representation of segments cannot be '
'a structured numpy array.')
with pytest.raises(TypeError) as exin:
occlusion.occlude_segments_vectorised(ARRAY_STRUCT)
assert str(exin.value) == err
err = ('The vector representation of segments should be '
'a numerical numpy array.')
with pytest.raises(TypeError) as exin:
occlusion.occlude_segments_vectorised(np.array(['1', '2']))
assert str(exin.value) == err
err = ('The number of elements (3) in the vector representation of '
'segments should correspond to the unique number of segments '
'(2).')
with pytest.raises(IncorrectShapeError) as exin:
occlusion.occlude_segments_vectorised(np.array([1, 2, 3]))
assert str(exin.value) == err
err = ('The number of columns (3) in the vector representation '
'of segments should correspond to the unique number of '
'segments (2).')
with pytest.raises(IncorrectShapeError) as exin:
occlusion.occlude_segments_vectorised(np.array([[1, 2, 3]]))
assert str(exin.value) == err
err = ('The vector representation of segments should be binary '
'numpy array.')
with pytest.raises(TypeError) as exin:
occlusion.occlude_segments_vectorised(np.array([[1, 2]]))
assert str(exin.value) == err
# 1-D mask
ocl = occlusion.occlude_segments_vectorised(np.array([1, 1]))
assert np.array_equal(ocl, ARRAY_IMAGE_3D)
ocl = occlusion.occlude_segments_vectorised(
np.array([1, 0]), colour='black')
ocl_ = np.ones(shape=(2, 2, 3), dtype=np.uint8)
ocl_[1, 1] = (0, 0, 0)
assert np.array_equal(ocl, ocl_)
# 1-D mask -- colour
ocl = occlusion.occlude_segments_vectorised(
np.array([1.0, 0.0]), colour='black')
assert np.array_equal(ocl, ocl_)
# 2-D mask -- colour
ocl = occlusion.occlude_segments_vectorised(
np.array([[1.0, 0.0], [1, 0]]), colour='black')
assert np.array_equal(ocl, np.array([ocl_, ocl_]))
# 2-D mask -- external image
ocl = occlusion.occlude_segments_vectorised(
np.array([[1.0, 0.0], [1, 0]]), image=ocl_, colour='black')
assert np.array_equal(ocl, np.array([ocl_, ocl_]))
| [
"pytest.warns",
"fatf.setup_random_seed",
"pytest.skip",
"numpy.ones",
"numpy.zeros",
"pytest.raises",
"numpy.array",
"numpy.array_equal",
"fatf.utils.data.occlusion.Occlusion"
] | [((537, 569), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)', 'dtype': 'int'}), '(shape=(2, 2), dtype=int)\n', (544, 569), True, 'import numpy as np\n'), ((581, 613), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)', 'dtype': 'int'}), '(shape=(2, 2), dtype=int)\n', (588, 613), True, 'import numpy as np\n'), ((653, 679), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (661, 679), True, 'import numpy as np\n'), ((697, 725), 'numpy.array', 'np.array', (['[[0, 255], [1, 2]]'], {}), '([[0, 255], [1, 2]])\n', (705, 725), True, 'import numpy as np\n'), ((743, 783), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 3), dtype=np.uint8)\n', (750, 783), True, 'import numpy as np\n'), ((799, 863), 'numpy.array', 'np.array', (['[(-0.1, 1)]'], {'dtype': "[('a', np.float64), ('b', np.int8)]"}), "([(-0.1, 1)], dtype=[('a', np.float64), ('b', np.int8)])\n", (807, 863), True, 'import numpy as np\n'), ((291, 468), 'pytest.skip', 'pytest.skip', (['"""Skipping tests of image occlusion -- scikit-image or Pillow is not installed (and is required for the segmentation dependency)."""'], {'allow_module_level': '(True)'}), "(\n 'Skipping tests of image occlusion -- scikit-image or Pillow is not installed (and is required for the segmentation dependency).'\n , allow_module_level=True)\n", (302, 468), False, 'import pytest\n'), ((2566, 2613), 'numpy.array_equal', 'np.array_equal', (['occlusion.image', 'ARRAY_IMAGE_3D'], {}), '(occlusion.image, ARRAY_IMAGE_3D)\n', (2580, 2613), True, 'import numpy as np\n'), ((2629, 2669), 'numpy.array_equal', 'np.array_equal', (['occlusion.segments', 'ONES'], {}), '(occlusion.segments, ONES)\n', (2643, 2669), True, 'import numpy as np\n'), ((2753, 2799), 'numpy.array_equal', 'np.array_equal', (['occlusion.unique_segments', '[1]'], {}), '(occlusion.unique_segments, [1])\n', (2767, 2799), True, 'import numpy as np\n'), ((3184, 3233), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_2D', 'SEGMENTS', '"""white"""'], {}), "(ARRAY_IMAGE_2D, SEGMENTS, 'white')\n", (3198, 3233), True, 'import fatf.utils.data.occlusion as fudo\n'), ((3249, 3296), 'numpy.array_equal', 'np.array_equal', (['occlusion.image', 'ARRAY_IMAGE_2D'], {}), '(occlusion.image, ARRAY_IMAGE_2D)\n', (3263, 3296), True, 'import numpy as np\n'), ((3312, 3356), 'numpy.array_equal', 'np.array_equal', (['occlusion.segments', 'SEGMENTS'], {}), '(occlusion.segments, SEGMENTS)\n', (3326, 3356), True, 'import numpy as np\n'), ((3444, 3493), 'numpy.array_equal', 'np.array_equal', (['occlusion.unique_segments', '[1, 2]'], {}), '(occlusion.unique_segments, [1, 2])\n', (3458, 3493), True, 'import numpy as np\n'), ((3926, 3968), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_BNW1', 'SEGMENTS'], {}), '(ARRAY_IMAGE_BNW1, SEGMENTS)\n', (3940, 3968), True, 'import fatf.utils.data.occlusion as fudo\n'), ((4375, 4419), 'numpy.array_equal', 'np.array_equal', (['occlusion.segments', 'SEGMENTS'], {}), '(occlusion.segments, SEGMENTS)\n', (4389, 4419), True, 'import numpy as np\n'), ((4503, 4552), 'numpy.array_equal', 'np.array_equal', (['occlusion.unique_segments', '[1, 2]'], {}), '(occlusion.unique_segments, [1, 2])\n', (4517, 4552), True, 'import numpy as np\n'), ((5100, 5140), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (5114, 5140), True, 'import fatf.utils.data.occlusion as fudo\n'), ((6054, 6078), 'fatf.setup_random_seed', 'fatf.setup_random_seed', ([], {}), '()\n', (6076, 6078), False, 'import fatf\n'), ((6095, 6133), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {'dtype': 'bool'}), '([[1, 0], [0, 1]], dtype=bool)\n', (6103, 6133), True, 'import numpy as np\n'), ((6172, 6212), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (6186, 6212), True, 'import fatf.utils.data.occlusion as fudo\n'), ((6579, 6619), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_2D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_2D, SEGMENTS)\n', (6593, 6619), True, 'import fatf.utils.data.occlusion as fudo\n'), ((7542, 7582), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (7556, 7582), True, 'import fatf.utils.data.occlusion as fudo\n'), ((8173, 8213), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_2D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_2D, SEGMENTS)\n', (8187, 8213), True, 'import fatf.utils.data.occlusion as fudo\n'), ((8540, 8580), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (8554, 8580), True, 'import fatf.utils.data.occlusion as fudo\n'), ((9344, 9378), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2)', 'dtype': 'bool'}), '(shape=(2, 2), dtype=bool)\n', (9352, 9378), True, 'import numpy as np\n'), ((9413, 9437), 'fatf.setup_random_seed', 'fatf.setup_random_seed', ([], {}), '()\n', (9435, 9437), False, 'import fatf\n'), ((9553, 9587), 'numpy.array_equal', 'np.array_equal', (['clr', '(57, 12, 140)'], {}), '(clr, (57, 12, 140))\n', (9567, 9587), True, 'import numpy as np\n'), ((9898, 9934), 'numpy.array_equal', 'np.array_equal', (['clr', '(101, 214, 112)'], {}), '(clr, (101, 214, 112))\n', (9912, 9934), True, 'import numpy as np\n'), ((10245, 10275), 'numpy.array_equal', 'np.array_equal', (['clr', '(0, 0, 0)'], {}), '(clr, (0, 0, 0))\n', (10259, 10275), True, 'import numpy as np\n'), ((10389, 10425), 'numpy.array_equal', 'np.array_equal', (['clr', '(255, 255, 255)'], {}), '(clr, (255, 255, 255))\n', (10403, 10425), True, 'import numpy as np\n'), ((10535, 10567), 'numpy.array_equal', 'np.array_equal', (['clr', '(255, 0, 0)'], {}), '(clr, (255, 0, 0))\n', (10549, 10567), True, 'import numpy as np\n'), ((10681, 10713), 'numpy.array_equal', 'np.array_equal', (['clr', '(0, 255, 0)'], {}), '(clr, (0, 255, 0))\n', (10695, 10713), True, 'import numpy as np\n'), ((10825, 10857), 'numpy.array_equal', 'np.array_equal', (['clr', '(0, 0, 255)'], {}), '(clr, (0, 0, 255))\n', (10839, 10857), True, 'import numpy as np\n'), ((10969, 11005), 'numpy.array_equal', 'np.array_equal', (['clr', '(255, 192, 203)'], {}), '(clr, (255, 192, 203))\n', (10983, 11005), True, 'import numpy as np\n'), ((11112, 11146), 'numpy.array_equal', 'np.array_equal', (['clr', '(42, 24, 242)'], {}), '(clr, (42, 24, 242))\n', (11126, 11146), True, 'import numpy as np\n'), ((11188, 11228), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_2D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_2D, SEGMENTS)\n', (11202, 11228), True, 'import fatf.utils.data.occlusion as fudo\n'), ((12497, 12521), 'fatf.setup_random_seed', 'fatf.setup_random_seed', ([], {}), '()\n', (12519, 12521), False, 'import fatf\n'), ((14458, 14482), 'fatf.setup_random_seed', 'fatf.setup_random_seed', ([], {}), '()\n', (14480, 14482), False, 'import fatf\n'), ((15503, 15543), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (15517, 15543), True, 'import fatf.utils.data.occlusion as fudo\n'), ((17202, 17237), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ARRAY_IMAGE_3D'], {}), '(ocl, ARRAY_IMAGE_3D)\n', (17216, 17237), True, 'import numpy as np\n'), ((17300, 17340), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 3), dtype=np.uint8)\n', (17307, 17340), True, 'import numpy as np\n'), ((17437, 17478), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 3), dtype=np.uint8)\n', (17445, 17478), True, 'import numpy as np\n'), ((17601, 17626), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ocl_'], {}), '(ocl, ocl_)\n', (17615, 17626), True, 'import numpy as np\n'), ((17714, 17739), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ocl_'], {}), '(ocl, ocl_)\n', (17728, 17739), True, 'import numpy as np\n'), ((17922, 17962), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'SEGMENTS'], {}), '(ARRAY_IMAGE_3D, SEGMENTS)\n', (17936, 17962), True, 'import fatf.utils.data.occlusion as fudo\n'), ((20221, 20256), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ARRAY_IMAGE_3D'], {}), '(ocl, ARRAY_IMAGE_3D)\n', (20235, 20256), True, 'import numpy as np\n'), ((20371, 20411), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 3), dtype=np.uint8)\n', (20378, 20411), True, 'import numpy as np\n'), ((20458, 20483), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ocl_'], {}), '(ocl, ocl_)\n', (20472, 20483), True, 'import numpy as np\n'), ((20632, 20657), 'numpy.array_equal', 'np.array_equal', (['ocl', 'ocl_'], {}), '(ocl, ocl_)\n', (20646, 20657), True, 'import numpy as np\n'), ((1381, 1408), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1394, 1408), False, 'import pytest\n'), ((1795, 1822), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1808, 1822), False, 'import pytest\n'), ((2359, 2384), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2371, 2384), False, 'import pytest\n'), ((2421, 2457), 'fatf.utils.data.occlusion.Occlusion', 'fudo.Occlusion', (['ARRAY_IMAGE_3D', 'ONES'], {}), '(ARRAY_IMAGE_3D, ONES)\n', (2435, 2457), True, 'import fatf.utils.data.occlusion as fudo\n'), ((4312, 4358), 'numpy.array', 'np.array', (['[[0, 255], [255, 0]]'], {'dtype': 'np.uint8'}), '([[0, 255], [255, 0]], dtype=np.uint8)\n', (4320, 4358), True, 'import numpy as np\n'), ((6303, 6360), 'numpy.array', 'np.array', (['[[125, 114, 71], [52, 44, 216]]'], {'dtype': 'np.uint8'}), '([[125, 114, 71], [52, 44, 216]], dtype=np.uint8)\n', (6311, 6360), True, 'import numpy as np\n'), ((6710, 6745), 'numpy.array', 'np.array', (['[119, 13]'], {'dtype': 'np.uint8'}), '([119, 13], dtype=np.uint8)\n', (6718, 6745), True, 'import numpy as np\n'), ((6998, 7044), 'numpy.array', 'np.array', (['[[0, 255], [255, 0]]'], {'dtype': 'np.uint8'}), '([[0, 255], [255, 0]], dtype=np.uint8)\n', (7006, 7044), True, 'import numpy as np\n'), ((7146, 7180), 'numpy.array', 'np.array', (['[0, 255]'], {'dtype': 'np.uint8'}), '([0, 255], dtype=np.uint8)\n', (7154, 7180), True, 'import numpy as np\n'), ((7807, 7831), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7820, 7831), False, 'import pytest\n'), ((7979, 8003), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7992, 8003), False, 'import pytest\n'), ((8227, 8251), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8240, 8251), False, 'import pytest\n'), ((8374, 8398), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8387, 8398), False, 'import pytest\n'), ((8873, 8898), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8886, 8898), False, 'import pytest\n'), ((9136, 9179), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 2, 3), dtype=np.uint8)\n', (9143, 9179), True, 'import numpy as np\n'), ((9283, 9326), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2, 2, 3)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 2, 3), dtype=np.uint8)\n', (9290, 9326), True, 'import numpy as np\n'), ((9735, 9775), 'numpy.array', 'np.array', (['[[16, 15, 47]]'], {'dtype': 'np.uint8'}), '([[16, 15, 47]], dtype=np.uint8)\n', (9743, 9775), True, 'import numpy as np\n'), ((10088, 10130), 'numpy.array', 'np.array', (['[[81, 216, 174]]'], {'dtype': 'np.uint8'}), '([[81, 216, 174]], dtype=np.uint8)\n', (10096, 10130), True, 'import numpy as np\n'), ((11467, 11492), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11480, 11492), False, 'import pytest\n'), ((11723, 11748), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11736, 11748), False, 'import pytest\n'), ((11864, 11889), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11877, 11889), False, 'import pytest\n'), ((12225, 12291), 'numpy.array', 'np.array', (['[[[85, 2], [85, 2]], [[85, 2], [85, 2]]]'], {'dtype': 'np.uint8'}), '([[[85, 2], [85, 2]], [[85, 2], [85, 2]]], dtype=np.uint8)\n', (12233, 12291), True, 'import numpy as np\n'), ((12420, 12486), 'numpy.array', 'np.array', (['[[[85, 2], [85, 2]], [[85, 2], [85, 2]]]'], {'dtype': 'np.uint8'}), '([[[85, 2], [85, 2]], [[85, 2], [85, 2]]], dtype=np.uint8)\n', (12428, 12486), True, 'import numpy as np\n'), ((12794, 12825), 'numpy.array', 'np.array', (['[125]'], {'dtype': 'np.uint8'}), '([125], dtype=np.uint8)\n', (12802, 12825), True, 'import numpy as np\n'), ((13111, 13141), 'numpy.array', 'np.array', (['[44]'], {'dtype': 'np.uint8'}), '([44], dtype=np.uint8)\n', (13119, 13141), True, 'import numpy as np\n'), ((13464, 13510), 'numpy.array', 'np.array', (['[[0, 255], [0, 255]]'], {'dtype': 'np.uint8'}), '([[0, 255], [0, 255]], dtype=np.uint8)\n', (13472, 13510), True, 'import numpy as np\n'), ((13670, 13695), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13683, 13695), False, 'import pytest\n'), ((14170, 14197), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (14183, 14197), False, 'import pytest\n'), ((14315, 14342), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (14328, 14342), False, 'import pytest\n'), ((14754, 14783), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.uint8'}), '([0], dtype=np.uint8)\n', (14762, 14783), True, 'import numpy as np\n'), ((15068, 15097), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.uint8'}), '([0], dtype=np.uint8)\n', (15076, 15097), True, 'import numpy as np\n'), ((15659, 15683), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (15672, 15683), False, 'import pytest\n'), ((15904, 15929), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15917, 15929), False, 'import pytest\n'), ((16102, 16127), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16115, 16127), False, 'import pytest\n'), ((16307, 16331), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (16320, 16331), False, 'import pytest\n'), ((16573, 16598), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16586, 16598), False, 'import pytest\n'), ((16902, 16936), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (16915, 16936), False, 'import pytest\n'), ((18153, 18187), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (18166, 18187), False, 'import pytest\n'), ((18481, 18515), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (18494, 18515), False, 'import pytest\n'), ((18762, 18786), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (18775, 18786), False, 'import pytest\n'), ((19021, 19045), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19034, 19045), False, 'import pytest\n'), ((19357, 19391), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (19370, 19391), False, 'import pytest\n'), ((19701, 19735), 'pytest.raises', 'pytest.raises', (['IncorrectShapeError'], {}), '(IncorrectShapeError)\n', (19714, 19735), False, 'import pytest\n'), ((19974, 19998), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19987, 19998), False, 'import pytest\n'), ((20188, 20204), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (20196, 20204), True, 'import numpy as np\n'), ((20322, 20338), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (20330, 20338), True, 'import numpy as np\n'), ((20579, 20599), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (20587, 20599), True, 'import numpy as np\n'), ((20753, 20783), 'numpy.array', 'np.array', (['[[1.0, 0.0], [1, 0]]'], {}), '([[1.0, 0.0], [1, 0]])\n', (20761, 20783), True, 'import numpy as np\n'), ((20836, 20858), 'numpy.array', 'np.array', (['[ocl_, ocl_]'], {}), '([ocl_, ocl_])\n', (20844, 20858), True, 'import numpy as np\n'), ((20963, 20993), 'numpy.array', 'np.array', (['[[1.0, 0.0], [1, 0]]'], {}), '([[1.0, 0.0], [1, 0]])\n', (20971, 20993), True, 'import numpy as np\n'), ((21058, 21080), 'numpy.array', 'np.array', (['[ocl_, ocl_]'], {}), '([ocl_, ocl_])\n', (21066, 21080), True, 'import numpy as np\n'), ((1462, 1503), 'numpy.array', 'np.array', (['[[2, 255], [255, 2]]'], {'dtype': 'int'}), '([[2, 255], [255, 2]], dtype=int)\n', (1470, 1503), True, 'import numpy as np\n'), ((1521, 1553), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)', 'dtype': 'int'}), '(shape=(2, 2), dtype=int)\n', (1528, 1553), True, 'import numpy as np\n'), ((1876, 1913), 'numpy.array', 'np.array', (['[[2, 1], [1, 2]]'], {'dtype': 'int'}), '([[2, 1], [1, 2]], dtype=int)\n', (1884, 1913), True, 'import numpy as np\n'), ((1931, 1963), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 2)', 'dtype': 'int'}), '(shape=(2, 2), dtype=int)\n', (1938, 1963), True, 'import numpy as np\n'), ((18575, 18598), 'numpy.array', 'np.array', (['[[[1, 2, 3]]]'], {}), '([[[1, 2, 3]]])\n', (18583, 18598), True, 'import numpy as np\n'), ((19105, 19125), 'numpy.array', 'np.array', (["['1', '2']"], {}), "(['1', '2'])\n", (19113, 19125), True, 'import numpy as np\n'), ((19451, 19470), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (19459, 19470), True, 'import numpy as np\n'), ((19795, 19816), 'numpy.array', 'np.array', (['[[1, 2, 3]]'], {}), '([[1, 2, 3]])\n', (19803, 19816), True, 'import numpy as np\n'), ((20058, 20076), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (20066, 20076), True, 'import numpy as np\n'), ((17034, 17066), 'numpy.ones', 'np.ones', ([], {'shape': '(4, 4)', 'dtype': 'int'}), '(shape=(4, 4), dtype=int)\n', (17041, 17066), True, 'import numpy as np\n'), ((18276, 18308), 'numpy.ones', 'np.ones', ([], {'shape': '(4, 4)', 'dtype': 'int'}), '(shape=(4, 4), dtype=int)\n', (18283, 18308), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import cv2
import time
import xlwt
import numpy as np
import math
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from cv2 import *
from opts import opts
from detectors.detector_factory import detector_factory
workbook = xlwt.Workbook(encoding = "utf-8")
booksheet = workbook.add_sheet('Sheet1',cell_overwrite_ok=True)
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
def get_angle1(x1,y1,x2,y2,x3,y3):
a=math.sqrt((x2-x3)**2+(y2-y3)**2)
b=math.sqrt((x1-x3)**2+(y1-y3)**2)
c=math.sqrt((x2-x1)**2+(y2-y1)**2)
if c*b==0:
cosA=1
else:
cosA=(a**2-c**2-b**2)/(-2*c*b)
if cosA < -1.0:
cosA=-1.0
elif cosA>1.0:
cosA=1.0
A=math.acos(cosA)
deg=math.degrees(A)
return deg
def duquwenjian(glo,points, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(17, 2)
A8=get_angle1(points[8,0],points[8,1],points[6,0],points[6,1],points[10,0],points[10,1])
A7=get_angle1(points[7,0],points[7,1],points[5,0],points[5,1],points[9,0],points[9,1])
A6=get_angle1(points[6,0],points[6,1],points[8,0],points[8,1],points[12,0],points[12,1])
A5=get_angle1(points[5,0],points[5,1],points[7,0],points[7,1],points[11,0],points[11,1])
A12=get_angle1(points[12,0],points[12,1],points[14,0],points[14,1],points[11,0],points[11,1])
A11=get_angle1(points[11,0],points[11,1],points[13,0],points[13,1],points[12,0],points[12,1])
A14=get_angle1(points[14,0],points[14,1],points[16,0],points[16,1],points[12,0],points[12,1])
A13=get_angle1(points[13,0],points[13,1],points[15,0],points[15,1],points[11,0],points[11,1])
xcenter=(points[11,0]+points[12,0])/2
ycenter=(points[11,1]+points[12,1])/2
distance={}
field=0
for j in range(17):
distance[j]=math.sqrt((points[j, 0]-xcenter)**2+(points[j, 1]-ycenter)**2)
field = field +distance[j]
print('field',field)
#booksheet.write(glo,1,float(A8))
#booksheet.write(glo,2,float(A7))
#booksheet.write(glo,3,float(A6))
#booksheet.write(glo,4,float(A5))
#booksheet.write(glo,5,float(A12))
#booksheet.write(glo,6,float(A11))
#booksheet.write(glo,7,float(A14))
#booksheet.write(glo,8,float(A13))
#booksheet.write(glo,9,float(field))
booksheet.write(glo,1,float(points[5,0]))
booksheet.write(glo,2,float(points[5,1]))
booksheet.write(glo,3,float(points[6,0]))
booksheet.write(glo,4,float(points[6,1]))
booksheet.write(glo,5,float(points[7,0]))
booksheet.write(glo,6,float(points[7,1]))
booksheet.write(glo,7,float(points[8,0]))
booksheet.write(glo,8,float(points[8,1]))
booksheet.write(glo,9,float(points[9,0]))
booksheet.write(glo,10,float(points[9,1]))
booksheet.write(glo,11,float(points[10,0]))
booksheet.write(glo,12,float(points[10,1]))
booksheet.write(glo,13,float(points[11,0]))
booksheet.write(glo,14,float(points[11,1]))
booksheet.write(glo,15,float(points[12,0]))
booksheet.write(glo,16,float(points[12,1]))
booksheet.write(glo,17,float(points[13,0]))
booksheet.write(glo,18,float(points[13,1]))
booksheet.write(glo,19,float(points[14,0]))
booksheet.write(glo,20,float(points[14,1]))
booksheet.write(glo,21,float(points[15,0]))
booksheet.write(glo,22,float(points[15,1]))
booksheet.write(glo,23,float(points[16,0]))
booksheet.write(glo,24,float(points[16,1]))
class VideoBox(QWidget):
VIDEO_TYPE_OFFLINE = 0
VIDEO_TYPE_REAL_TIME = 1
STATUS_INIT = 0
STATUS_PLAYING = 1
STATUS_PAUSE = 2
video_url = ""
def __init__(self, video_url="", video_type=VIDEO_TYPE_OFFLINE, auto_play=False):
QWidget.__init__(self)
self.video_url = video_url
self.video_type = video_type # 0: offline 1: realTime
self.auto_play = auto_play
self.status = self.STATUS_INIT # 0: init 1:playing 2: pause
# 组件展示
self.pictureLabel = QLabel()
init_image = QPixmap("11.jpeg").scaled(self.width(), self.height())
self.pictureLabel.setPixmap(init_image)
self.playButton = QPushButton()
self.playButton.setEnabled(True)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.switch_video)
control_box = QHBoxLayout()
control_box.setContentsMargins(0, 0, 0, 0)
control_box.addWidget(self.playButton)
layout = QVBoxLayout()
layout.addWidget(self.pictureLabel)
layout.addLayout(control_box)
self.setLayout(layout)
# timer 设置
self.timer = VideoTimer()
self.timer.timeSignal.signal[str].connect(self.show_video_images)
# video 初始设置
self.playCapture = VideoCapture()
if self.video_url != "":
self.playCapture.open(self.video_url)
fps = self.playCapture.get(CAP_PROP_FPS)
self.timer.set_fps(fps)
self.playCapture.release()
if self.auto_play:
self.switch_video()
# self.videoWriter = VideoWriter('*.mp4', VideoWriter_fourcc('M', 'J', 'P', 'G'), self.fps, size)
def reset(self):
self.timer.stop()
self.playCapture.release()
self.status = VideoBox.STATUS_INIT
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
def show_video_images(self):
if self.playCapture.isOpened():
success, frame = self.playCapture.read()
if success:
height, width = frame.shape[:2]
if frame.ndim == 3:
rgb = cvtColor(frame, COLOR_BGR2RGB)
elif frame.ndim == 2:
rgb = cvtColor(frame, COLOR_GRAY2BGR)
temp_image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
# print(type(rgb))
temp_pixmap = QPixmap.fromImage(temp_image)
print(type(temp_pixmap))
self.pictureLabel.setPixmap(temp_pixmap)
else:
print("read failed, no frame data")
success, frame = self.playCapture.read()
if not success and self.video_type is VideoBox.VIDEO_TYPE_OFFLINE:
print("play finished") # 判断本地文件播放完毕
self.reset()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))
return
else:
print("open file or capturing device error, init again")
self.reset()
def switch_video(self):
if self.video_url == "" or self.video_url is None:
return
if self.status is VideoBox.STATUS_INIT:
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
elif self.status is VideoBox.STATUS_PLAYING:
self.timer.stop()
if self.video_type is VideoBox.VIDEO_TYPE_REAL_TIME:
self.playCapture.release()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
elif self.status is VideoBox.STATUS_PAUSE:
if self.video_type is VideoBox.VIDEO_TYPE_REAL_TIME:
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
self.status = (VideoBox.STATUS_PLAYING,
VideoBox.STATUS_PAUSE,
VideoBox.STATUS_PLAYING)[self.status]
class Communicate(QObject):
signal = pyqtSignal(str)
class VideoTimer(QThread):
def __init__(self, frequent=20):
QThread.__init__(self)
self.stopped = False
self.frequent = frequent
self.timeSignal = Communicate()
self.mutex = QMutex()
def run(self):
with QMutexLocker(self.mutex):
self.stopped = False
while True:
if self.stopped:
return
self.timeSignal.signal.emit("1")
time.sleep(1 / self.frequent)
def stop(self):
with QMutexLocker(self.mutex):
self.stopped = True
def is_stopped(self):
with QMutexLocker(self.mutex):
return self.stoppedQWidget
def set_fps(self, fps):
self.frequent = fps
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
Detector = detector_factory[opt.task]
detector = Detector(opt)
app = QApplication(sys.argv)
box = VideoBox("haojiang.mp4")
box.show()
sys.exit(app.exec_())
'''
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
#cam = cv2.VideoCapture("../images/webwxgetvideo")
detector.pause = False
t=0
size = (int(cam.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))
codec = cv2.VideoWriter_fourcc(*'DIVX')
output = cv2.VideoWriter('differ.avi',codec,25.0,size)
while cam.isOpened():
ret, img = cam.read()
cv2.imshow('input', img)
s=time.time()
ret = detector.run(img)
results = ret['results']
for bbox in results[1]:
if bbox[4] > opt.vis_thresh:
duquwenjian(t,bbox[5:39],img_id='multi_pose')
e=time.time()
fps=1/(e-s)
print("FPS",fps)
#print("FPS",1/ret['tot'])
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
if t in [0,1,39,87,88,177,178,180,181,182,184,195,196,200,201,202,203,204,205,206,207,208,212,217,222,223,229,230,239,240,241,257,258,259,285,286,343,346,347,348,349,350,351,352,353,358,359,360,366,371,379,386,387,388]:
output.write(img)
t=t+1
#print(t)
if cv2.waitKey(10) & 0XFF == ord('q'):
print("ed")
workbook.save("dance22.xls")
break # esc to quit
cam.release()
cv2.destroyAllWindows()
workbook.save("test.xls")
#stream.release()
else:
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
for (image_name) in image_names:
ret = detector.run(image_name)
print(ret)
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
'''
if __name__ == '__main__':
opt = opts().init()
demo(opt)
| [
"opts.opts",
"xlwt.Workbook",
"math.sqrt",
"time.sleep",
"math.acos",
"numpy.array",
"math.degrees"
] | [((410, 441), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (423, 441), False, 'import xlwt\n'), ((702, 744), 'math.sqrt', 'math.sqrt', (['((x2 - x3) ** 2 + (y2 - y3) ** 2)'], {}), '((x2 - x3) ** 2 + (y2 - y3) ** 2)\n', (711, 744), False, 'import math\n'), ((741, 783), 'math.sqrt', 'math.sqrt', (['((x1 - x3) ** 2 + (y1 - y3) ** 2)'], {}), '((x1 - x3) ** 2 + (y1 - y3) ** 2)\n', (750, 783), False, 'import math\n'), ((780, 822), 'math.sqrt', 'math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (789, 822), False, 'import math\n'), ((964, 979), 'math.acos', 'math.acos', (['cosA'], {}), '(cosA)\n', (973, 979), False, 'import math\n'), ((988, 1003), 'math.degrees', 'math.degrees', (['A'], {}), '(A)\n', (1000, 1003), False, 'import math\n'), ((2045, 2117), 'math.sqrt', 'math.sqrt', (['((points[j, 0] - xcenter) ** 2 + (points[j, 1] - ycenter) ** 2)'], {}), '((points[j, 0] - xcenter) ** 2 + (points[j, 1] - ycenter) ** 2)\n', (2054, 2117), False, 'import math\n'), ((1081, 1113), 'numpy.array', 'np.array', (['points'], {'dtype': 'np.int32'}), '(points, dtype=np.int32)\n', (1089, 1113), True, 'import numpy as np\n'), ((8363, 8392), 'time.sleep', 'time.sleep', (['(1 / self.frequent)'], {}), '(1 / self.frequent)\n', (8373, 8392), False, 'import time\n'), ((11078, 11084), 'opts.opts', 'opts', ([], {}), '()\n', (11082, 11084), False, 'from opts import opts\n')] |
import numpy as np
from nanograd.nn.conv_ops import get_conv1d_output_size, get_conv2d_output_size
from nanograd.autograd import Function
# *************************************
# ************** Helpers **************
# *************************************
def sigmoid(x:np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-x))
def unbroadcast(grad:np.ndarray, shape:tuple, to_keep:int=0) -> np.ndarray:
while len(grad.shape) != len(shape):
grad = grad.sum(axis=0)
for i in range(len(shape) - to_keep):
if grad.shape[i] != shape[i]:
grad = grad.sum(axis=i, keepdims=True)
return grad
def inner_slice(a, indices):
"""
Helper function to slice a Tensor
Args:
a (np.ndarray): array to slice
indices (list): list of indices
..note: Length must match the number of dimensions of x
"""
padding = [(max(0, -p[0]), max(0, p[1]-a.shape[i])) for i, p in enumerate(indices)]
a = np.pad(a, padding, mode="constant")
slices = [(p[0]+padding[i][0], p[1]+padding[i][0]) for i, p in enumerate(indices)]
return a[tuple([slice(x[0], x[1], None) for x in slices])]
# *************************************
# *********** Forward passes **********
# *************************************
class OneHot(Function):
@staticmethod
def forward(ctx, input, num_classes):
idx = input.astype(int)
out = np.zeros((idx.shape[0], num_classes))
out[np.arange(len(out)), idx] = 1
return out
@staticmethod
def backward(ctx, grad_output):
raise NotImplementedError
class Unsqueeze(Function):
@staticmethod
def forward(ctx, input, axis):
ctx.save_for_backward(axis)
return np.expand_dims(input, axis)
@staticmethod
def backward(ctx, grad_output):
axis = ctx.saved_tensors[0]
return grad_output.squeeze(axis)
class Squeeze(Function):
@staticmethod
def forward(ctx, input, axis):
ctx.save_for_backward(axis)
return np.squeeze(input, axis)
@staticmethod
def backward(ctx, grad_output):
axis = ctx.saved_tensors
return np.expand_dims(grad_output, axis)
class Slice(Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(input.shape, indices)
return inner_slice(input, indices)
@staticmethod
def backward(ctx, grad_output):
shape, indices = ctx.saved_tensors
indices = [(0 - p[0], grad_output.shape[i] + (shape[i] - p[1])) for i, p in enumerate(indices)]
return inner_slice(grad_output, indices)
class Transpose(Function):
@staticmethod
def forward(ctx, input):
return input.T
@staticmethod
def backward(ctx, grad_output):
return grad_output.T
class Reshape(Function):
@staticmethod
def forward(ctx, input, shape):
ctx.save_for_backward(input.shape)
return input.reshape(shape)
@staticmethod
def backward(ctx, grad_output):
shape = ctx.saved_tensors[0]
return grad_output.reshape(shape)
class Max(Function):
@staticmethod
def forward(ctx, input, axis):
axis = [axis] if isinstance(axis, int) else axis
out = np.amax(input, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(input, axis, out)
if axis is not None:
out = out.reshape([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return out
@staticmethod
def backward(ctx, grad_output):
input, axis, out = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == out.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * (grad_output.reshape(shape)).data / div
class Min(Function):
@staticmethod
def forward(ctx, input, axis):
axis = [axis] if isinstance(axis, int) else axis
out = np.amin(input, axis=None if axis is None else tuple(axis), keepdims=True)
ctx.save_for_backward(input, axis, out)
if axis is not None:
out = out.reshape([input.shape[i] for i in range(len(input.shape)) if i not in axis])
return out
@staticmethod
def backward(ctx, grad_output):
input, axis, out = ctx.saved_tensors
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
ret2 = (input == out.reshape(shape))
div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)
return ret2 * (grad_output.reshape(shape)).data / div
class Sum(Function):
@staticmethod
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
if axis is None:
return np.array([input.sum()])
return input.sum(axis=axis)
@staticmethod
def backward(ctx, grad_output):
input, axis = ctx.saved_tensors
axis = [axis] if type(axis) == int else axis
shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]
return grad_output.reshape(shape) + np.zeros_like(input) # Useful for broadcasting
class Add(Function):
@staticmethod
def forward(ctx, a, b):
ctx.save_for_backward(a.shape, b.shape)
return a + b
@staticmethod
def backward(ctx, grad_output):
a_shape, b_shape = ctx.saved_tensors
grad_a = grad_output * np.ones(a_shape)
grad_b = grad_output * np.ones(b_shape)
return unbroadcast(grad_a, a_shape), unbroadcast(grad_b, b_shape)
class Mul(Function):
@staticmethod
def forward(ctx, a, b):
ctx.save_for_backward(a, b)
return a * b
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_a = grad_output * b
grad_b = grad_output * a
return unbroadcast(grad_a, a.shape), unbroadcast(grad_b, b.shape)
class MatMul(Function):
@staticmethod
def forward(ctx, a, b):
ctx.save_for_backward(a, b)
return a @ b
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_a = grad_output @ b.T
grad_b = a.T @ grad_output
return grad_a, grad_b
class Log(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.log(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
return grad_output / input
class Exp(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.exp(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
return grad_output * np.exp(input)
class Neg(Function):
@staticmethod
def forward(ctx, input):
return -input
@staticmethod
def backward(ctx, grad_output):
return -grad_output
class Pow(Function):
@staticmethod
def forward(ctx, input, power):
ctx.save_for_backward(input, power)
return input ** power
@staticmethod
def backward(ctx, grad_output):
input, power = ctx.saved_tensors
return unbroadcast(power * (input ** (power-1.0)) * grad_output, input.shape), \
unbroadcast((input ** power) * np.log(input) * grad_output, power.shape)
class ReLU(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.maximum(input, 0)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
return (input >= 0) * grad_output
class Sigmoid(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return sigmoid(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
return grad_output * sigmoid(input) * (1 - sigmoid(input))
class Tanh(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return np.tanh(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors[0]
return grad_output * (1 - np.power(np.tanh(input), 2))
class Conv1d(Function):
@staticmethod
def forward(ctx, input, weight, stride):
if not isinstance(stride, int):
stride = int(stride[0])
batch_size, in_channel, signal_length = input.shape
num_filters, _, kernel_length = weight.shape
output_length = get_conv1d_output_size(signal_length, kernel_length, stride, 0)
ctx.save_for_backward(input, weight, stride)
stride_shape = (signal_length, 1, in_channel * signal_length, stride)
strides = input.data.itemsize * np.array(stride_shape)
cols = np.lib.stride_tricks.as_strided(
x=input,
strides=strides,
shape=(in_channel, kernel_length, batch_size, output_length),
writeable=False
)
cols = cols.transpose(2, 0, 1, 3)
weight = weight.transpose(1, 2, 0)
ret = np.tensordot(cols, weight, axes=[(1, 2), (0, 1)])
ret = ret.transpose(0, 2, 1)
ctx.save_for_backward(cols.transpose(0, 1, 3, 2))
return ret
@staticmethod
def backward(ctx, grad_output):
input, weight, stride, input_reshaped = ctx.saved_tensors
batch_size, in_channel, signal_length = input.shape
num_filters, _, kernel_length = weight.shape
_, _, output_length = grad_output.shape
#grad_weight = np.einsum('ikX, ijXx -> kjx', grad_output, x_reshaped) SLOWER than using tensordot
grad_weight = np.tensordot(grad_output, input_reshaped, axes=[(0, 2), (0, 2)])
grad_x = np.zeros((batch_size, in_channel, signal_length), dtype=grad_output.dtype)
for k in range(output_length):
X = k % output_length
iX = X * stride
#grad_x[:, :, iX:iX+kernel_length] += np.einsum('ik, kjy->ijy', grad_output[:, :, X], weight) #SLOWER than using tensordot
grad_x[:, :, iX:iX+kernel_length] += np.tensordot(grad_output[:, :, X], weight, axes=[(1), (0)])
grad_x = grad_x.reshape((batch_size, in_channel, signal_length))
return grad_x, grad_weight
class Conv2d(Function):
@staticmethod
def forward(ctx, input, weight, stride):
if not isinstance(stride, int):
stride = int(stride[0])
batch_size, in_channel, im_height, im_width = input.shape
num_filters, _, kernel_height, kernel_width = weight.shape
output_height, output_width = get_conv2d_output_size(im_height, im_width, (kernel_height, kernel_width), stride, 0)
ctx.save_for_backward(weight, stride)
strides = (im_height * im_width, im_width, 1, in_channel * im_height * im_height, stride * im_width, stride)
strides = input.itemsize * np.array(strides)
cols = np.lib.stride_tricks.as_strided(
x=input,
shape=(in_channel, kernel_height, kernel_width, batch_size, output_height, output_width),
strides=strides,
writeable=False
)
cols = cols.transpose(3, 0, 1, 2, 4, 5)
weight = weight.transpose(1, 2, 3, 0)
#jiyxYX,iyxk -> jYXk -> jkYX
ret = np.tensordot(cols, weight, axes=[(1, 2, 3), (0, 1, 2)])
ret = ret.transpose(0, 3, 1, 2)
ctx.save_for_backward(input, cols.transpose(0, 1, 4, 5, 2, 3))
return ret
@staticmethod
def backward(ctx, grad_output):
weight, stride, input, input_reshaped = ctx.saved_tensors
batch_size, in_channel, im_height, im_width = input.shape
num_filters, _, kernel_height, kernel_width = weight.shape
_, _, output_height, output_width = grad_output.shape
#grad_weight = np.einsum('ikYX, ijYXyx -> kjyx', grad_output, x_reshaped) SLOWER than using tensordot
grad_weight = np.tensordot(grad_output, input_reshaped, axes=[(0,2,3),(0,2,3)])
grad_x = np.zeros((batch_size, in_channel, im_height, im_width), dtype=grad_output.dtype)
for k in range(output_height * output_width):
X, Y = k % output_width, k // output_width
iX, iY = X * stride, Y * stride
# grad_x[:,:, iY:iY+kernel_height, iX:iX+kernel_width] += np.einsum('ik,kjyx->ijyx', grad_output[:,:,Y,X], weight)
# SLOWER than using tensordot
grad_x[:,:, iY:iY+kernel_height, iX:iX+kernel_width] += np.tensordot(grad_output[:,:,Y,X], weight, axes=[(1), (0)])
grad_x = grad_x.reshape((batch_size, in_channel, im_height, im_width))
return grad_x, grad_weight | [
"numpy.pad",
"nanograd.nn.conv_ops.get_conv1d_output_size",
"numpy.zeros_like",
"numpy.maximum",
"numpy.log",
"numpy.tanh",
"numpy.tensordot",
"numpy.zeros",
"numpy.expand_dims",
"numpy.ones",
"numpy.lib.stride_tricks.as_strided",
"nanograd.nn.conv_ops.get_conv2d_output_size",
"numpy.exp",
... | [((990, 1025), 'numpy.pad', 'np.pad', (['a', 'padding'], {'mode': '"""constant"""'}), "(a, padding, mode='constant')\n", (996, 1025), True, 'import numpy as np\n'), ((1428, 1465), 'numpy.zeros', 'np.zeros', (['(idx.shape[0], num_classes)'], {}), '((idx.shape[0], num_classes))\n', (1436, 1465), True, 'import numpy as np\n'), ((1753, 1780), 'numpy.expand_dims', 'np.expand_dims', (['input', 'axis'], {}), '(input, axis)\n', (1767, 1780), True, 'import numpy as np\n'), ((2048, 2071), 'numpy.squeeze', 'np.squeeze', (['input', 'axis'], {}), '(input, axis)\n', (2058, 2071), True, 'import numpy as np\n'), ((2179, 2212), 'numpy.expand_dims', 'np.expand_dims', (['grad_output', 'axis'], {}), '(grad_output, axis)\n', (2193, 2212), True, 'import numpy as np\n'), ((6533, 6546), 'numpy.log', 'np.log', (['input'], {}), '(input)\n', (6539, 6546), True, 'import numpy as np\n'), ((6800, 6813), 'numpy.exp', 'np.exp', (['input'], {}), '(input)\n', (6806, 6813), True, 'import numpy as np\n'), ((7675, 7695), 'numpy.maximum', 'np.maximum', (['input', '(0)'], {}), '(input, 0)\n', (7685, 7695), True, 'import numpy as np\n'), ((8261, 8275), 'numpy.tanh', 'np.tanh', (['input'], {}), '(input)\n', (8268, 8275), True, 'import numpy as np\n'), ((8737, 8800), 'nanograd.nn.conv_ops.get_conv1d_output_size', 'get_conv1d_output_size', (['signal_length', 'kernel_length', 'stride', '(0)'], {}), '(signal_length, kernel_length, stride, 0)\n', (8759, 8800), False, 'from nanograd.nn.conv_ops import get_conv1d_output_size, get_conv2d_output_size\n'), ((9013, 9153), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', ([], {'x': 'input', 'strides': 'strides', 'shape': '(in_channel, kernel_length, batch_size, output_length)', 'writeable': '(False)'}), '(x=input, strides=strides, shape=(in_channel,\n kernel_length, batch_size, output_length), writeable=False)\n', (9044, 9153), True, 'import numpy as np\n'), ((9310, 9359), 'numpy.tensordot', 'np.tensordot', (['cols', 'weight'], {'axes': '[(1, 2), (0, 1)]'}), '(cols, weight, axes=[(1, 2), (0, 1)])\n', (9322, 9359), True, 'import numpy as np\n'), ((9887, 9951), 'numpy.tensordot', 'np.tensordot', (['grad_output', 'input_reshaped'], {'axes': '[(0, 2), (0, 2)]'}), '(grad_output, input_reshaped, axes=[(0, 2), (0, 2)])\n', (9899, 9951), True, 'import numpy as np\n'), ((9970, 10044), 'numpy.zeros', 'np.zeros', (['(batch_size, in_channel, signal_length)'], {'dtype': 'grad_output.dtype'}), '((batch_size, in_channel, signal_length), dtype=grad_output.dtype)\n', (9978, 10044), True, 'import numpy as np\n'), ((10846, 10935), 'nanograd.nn.conv_ops.get_conv2d_output_size', 'get_conv2d_output_size', (['im_height', 'im_width', '(kernel_height, kernel_width)', 'stride', '(0)'], {}), '(im_height, im_width, (kernel_height, kernel_width),\n stride, 0)\n', (10868, 10935), False, 'from nanograd.nn.conv_ops import get_conv1d_output_size, get_conv2d_output_size\n'), ((11166, 11338), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', ([], {'x': 'input', 'shape': '(in_channel, kernel_height, kernel_width, batch_size, output_height,\n output_width)', 'strides': 'strides', 'writeable': '(False)'}), '(x=input, shape=(in_channel, kernel_height,\n kernel_width, batch_size, output_height, output_width), strides=strides,\n writeable=False)\n', (11197, 11338), True, 'import numpy as np\n'), ((11536, 11591), 'numpy.tensordot', 'np.tensordot', (['cols', 'weight'], {'axes': '[(1, 2, 3), (0, 1, 2)]'}), '(cols, weight, axes=[(1, 2, 3), (0, 1, 2)])\n', (11548, 11591), True, 'import numpy as np\n'), ((12187, 12257), 'numpy.tensordot', 'np.tensordot', (['grad_output', 'input_reshaped'], {'axes': '[(0, 2, 3), (0, 2, 3)]'}), '(grad_output, input_reshaped, axes=[(0, 2, 3), (0, 2, 3)])\n', (12199, 12257), True, 'import numpy as np\n'), ((12271, 12356), 'numpy.zeros', 'np.zeros', (['(batch_size, in_channel, im_height, im_width)'], {'dtype': 'grad_output.dtype'}), '((batch_size, in_channel, im_height, im_width), dtype=grad_output.dtype\n )\n', (12279, 12356), True, 'import numpy as np\n'), ((321, 331), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (327, 331), True, 'import numpy as np\n'), ((5277, 5297), 'numpy.zeros_like', 'np.zeros_like', (['input'], {}), '(input)\n', (5290, 5297), True, 'import numpy as np\n'), ((5597, 5613), 'numpy.ones', 'np.ones', (['a_shape'], {}), '(a_shape)\n', (5604, 5613), True, 'import numpy as np\n'), ((5645, 5661), 'numpy.ones', 'np.ones', (['b_shape'], {}), '(b_shape)\n', (5652, 5661), True, 'import numpy as np\n'), ((6935, 6948), 'numpy.exp', 'np.exp', (['input'], {}), '(input)\n', (6941, 6948), True, 'import numpy as np\n'), ((8974, 8996), 'numpy.array', 'np.array', (['stride_shape'], {}), '(stride_shape)\n', (8982, 8996), True, 'import numpy as np\n'), ((10332, 10387), 'numpy.tensordot', 'np.tensordot', (['grad_output[:, :, X]', 'weight'], {'axes': '[1, 0]'}), '(grad_output[:, :, X], weight, axes=[1, 0])\n', (10344, 10387), True, 'import numpy as np\n'), ((11132, 11149), 'numpy.array', 'np.array', (['strides'], {}), '(strides)\n', (11140, 11149), True, 'import numpy as np\n'), ((12745, 12803), 'numpy.tensordot', 'np.tensordot', (['grad_output[:, :, Y, X]', 'weight'], {'axes': '[1, 0]'}), '(grad_output[:, :, Y, X], weight, axes=[1, 0])\n', (12757, 12803), True, 'import numpy as np\n'), ((8415, 8429), 'numpy.tanh', 'np.tanh', (['input'], {}), '(input)\n', (8422, 8429), True, 'import numpy as np\n'), ((7510, 7523), 'numpy.log', 'np.log', (['input'], {}), '(input)\n', (7516, 7523), True, 'import numpy as np\n')] |
import enum
import streamlit as st
from gensim.models import KeyedVectors
from sklearn.decomposition import PCA
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.manifold import TSNE
from random import sample
from operator import add, sub
wv = KeyedVectors.load("skip_model_cord19_covid_55k.kv")
def sampled_words(sample_size=10):
sampled = sample(list(wv.index_to_key), sample_size)
return sampled
def word_operation(words_to_workon, op):
if op is add:
op_str = " + "
else:
op_str = " - "
words_for_op = []
for w in words_to_workon:
words_for_op.append(wv.get_vector(w, norm=True))
diff = op(words_for_op[0], words_for_op[1])
results = wv.similar_by_vector(diff)
results = [w[0].replace("_", " ") for w in results[2:14]]
results = "** " + ", ".join(results) + " **"
st.subheader("{} {} {} = ".format(words_to_workon[0],
op_str,
words_to_workon[1]))
st.write(results)
def run():
size_of_vocab = len(list(wv.index_to_key))
size_of_dim = wv.vector_size
st.title("Visualize Word2Vec")
st.header("Word Vector Embedding Statics")
st.write("Numover of Vocab: {:,}".format(size_of_vocab))
st.write("Dimensions: {}".format(size_of_dim))
st.write("Size of Corpus: ~550K")
st.header("Story 1: Try Some Word Operations")
words_to_workon = ['COVID', 'virus']
word_operation(words_to_workon, op=add)
more_words1 = ['indoor', 'PPE']
word_operation(more_words1, op=add)
more_words5 = ['Indian', 'vaccine']
word_operation(more_words5, op=add)
more_words2 = ['work', 'disease']
word_operation(more_words2, op=sub)
more_words3 = ['doctor', 'surgeon']
word_operation(more_words3, op=sub)
result1 = wv.most_similar(positive=['man', 'nurse'],
negative=['woman'], topn=1)[0][0].replace("_", " ")
result2 = wv.most_similar(positive=['woman', 'doctor'],
negative=['man'], topn=1)[0][0]
result3 = wv.most_similar(positive=['Moderna', "Pfizer"],
negative=['vaccine'], topn=1)[0][0]
result4 = wv.most_similar(positive=["Alpha", "USA"],
negative='England', topn=4)
result4 = ", ".join([w[0] for w in result4])
#result4 = [", ".join(w for w in result4)]
st.subheader("Nurse - Woman + Man = {}".format(result1.capitalize()))
st.subheader("Doctor - Man + Woman = {}".format(result2.capitalize()))
st.subheader("Moderna - vaccine + Pfizer = {}".format(result3.capitalize()))
st.subheader("Alpha - England + USA = {}".format(result4))
st.header("Story 2: Opposite Meaning do not equal to further distance:")
word1 = st.text_input(label="Test Word 1 ", value="death")
word2 = st.text_input(label="Test Word 2", value="alive")
topn = st.slider("Top N", 5, 25)
if not word1 or word1 == "":
st.write("No word received")
return
try:
results1 = wv.most_similar(word1, topn=topn)
except KeyError:
st.write("Word not in trained vocab")
return
sim_words_1 = [w for w, _ in results1]
sim_words_1.append(word1)
sim_vecs_1 = wv[sim_words_1]
pretty = [" ".join(w.split("_")) for w in sim_words_1]
pca2 = PCA(n_components=2)
pca30 = PCA(n_components=30)
projected = pca2.fit_transform(sim_vecs_1)
# get topn word grouping for word1 and word2
tsne2 = TSNE(n_components=2, perplexity=45, init="pca",
n_iter=500, early_exaggeration=16, learning_rate='auto')
tsne_word_list = []
num_of_topn = 200
try:
for w in [word1, word2]:
sim_words = wv.most_similar(w, topn=200)
sim_words = [each for each, _ in sim_words]
tsne_word_list.extend(sim_words)
tsne_word_list.extend([word1, word2])
tsne_wvs = wv[tsne_word_list]
except KeyError:
st.write("Word not in trained vocab!")
return
tsne2_projected = pca30.fit_transform(tsne_wvs)
tsne2_projected = tsne2.fit_transform(tsne2_projected)
projected_colors = np.zeros((projected.shape[0]))
projected_colors[-1] = 1.
colors_tsne2 = np.zeros((tsne2_projected.shape[0]))
for idx, _ in enumerate([word1, word2]):
idx += 1
color = idx*int(num_of_topn/4)
colors_tsne2[idx*num_of_topn:(idx+1)*num_of_topn] = color
colors_tsne2[-1] = 100.
colors_tsne2[-2] = 200.
#tsne_all_vocab = np.load("tnse_transform.npy")
fig, ax = plt.subplots(1,2, figsize=(5, 3))
ax[0].scatter(x=tsne2_projected[:, 0], y=tsne2_projected[:, 1], s=1, c=colors_tsne2, cmap='tab10')
ax[1].scatter(x=projected[:, 0], y=projected[:, 1], s=3, c=projected_colors)
ax[0].tick_params(axis='x', labelsize=5)
ax[0].tick_params(axis='y', labelsize=5)
ax[1].tick_params(axis='x', labelsize=5)
ax[1].tick_params(axis='y', labelsize=5)
for i, w in enumerate(pretty):
ax[1].annotate(w, xy=(projected[i, 0], projected[i, 1]), fontsize=5)
for i, w in enumerate([word1, word2]):
len_words = len(tsne_word_list)
index = len_words - 2 + i
coord = (tsne2_projected[index, 0], tsne2_projected[index, 1])
ax[0].annotate(w, xy=coord, fontsize=5)
st.pyplot(fig)
if __name__ == "__main__":
run() | [
"streamlit.text_input",
"sklearn.manifold.TSNE",
"streamlit.slider",
"streamlit.header",
"gensim.models.KeyedVectors.load",
"numpy.zeros",
"streamlit.title",
"streamlit.write",
"sklearn.decomposition.PCA",
"streamlit.pyplot",
"matplotlib.pyplot.subplots"
] | [((306, 357), 'gensim.models.KeyedVectors.load', 'KeyedVectors.load', (['"""skip_model_cord19_covid_55k.kv"""'], {}), "('skip_model_cord19_covid_55k.kv')\n", (323, 357), False, 'from gensim.models import KeyedVectors\n'), ((1067, 1084), 'streamlit.write', 'st.write', (['results'], {}), '(results)\n', (1075, 1084), True, 'import streamlit as st\n'), ((1184, 1214), 'streamlit.title', 'st.title', (['"""Visualize Word2Vec"""'], {}), "('Visualize Word2Vec')\n", (1192, 1214), True, 'import streamlit as st\n'), ((1220, 1262), 'streamlit.header', 'st.header', (['"""Word Vector Embedding Statics"""'], {}), "('Word Vector Embedding Statics')\n", (1229, 1262), True, 'import streamlit as st\n'), ((1379, 1412), 'streamlit.write', 'st.write', (['"""Size of Corpus: ~550K"""'], {}), "('Size of Corpus: ~550K')\n", (1387, 1412), True, 'import streamlit as st\n'), ((1418, 1464), 'streamlit.header', 'st.header', (['"""Story 1: Try Some Word Operations"""'], {}), "('Story 1: Try Some Word Operations')\n", (1427, 1464), True, 'import streamlit as st\n'), ((2783, 2855), 'streamlit.header', 'st.header', (['"""Story 2: Opposite Meaning do not equal to further distance:"""'], {}), "('Story 2: Opposite Meaning do not equal to further distance:')\n", (2792, 2855), True, 'import streamlit as st\n'), ((2873, 2923), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Test Word 1 """', 'value': '"""death"""'}), "(label='Test Word 1 ', value='death')\n", (2886, 2923), True, 'import streamlit as st\n'), ((2936, 2985), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Test Word 2"""', 'value': '"""alive"""'}), "(label='Test Word 2', value='alive')\n", (2949, 2985), True, 'import streamlit as st\n'), ((2997, 3022), 'streamlit.slider', 'st.slider', (['"""Top N"""', '(5)', '(25)'], {}), "('Top N', 5, 25)\n", (3006, 3022), True, 'import streamlit as st\n'), ((3437, 3456), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (3440, 3456), False, 'from sklearn.decomposition import PCA\n'), ((3469, 3489), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(30)'}), '(n_components=30)\n', (3472, 3489), False, 'from sklearn.decomposition import PCA\n'), ((3608, 3716), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'perplexity': '(45)', 'init': '"""pca"""', 'n_iter': '(500)', 'early_exaggeration': '(16)', 'learning_rate': '"""auto"""'}), "(n_components=2, perplexity=45, init='pca', n_iter=500,\n early_exaggeration=16, learning_rate='auto')\n", (3612, 3716), False, 'from sklearn.manifold import TSNE\n'), ((4297, 4325), 'numpy.zeros', 'np.zeros', (['projected.shape[0]'], {}), '(projected.shape[0])\n', (4305, 4325), True, 'import numpy as np\n'), ((4382, 4416), 'numpy.zeros', 'np.zeros', (['tsne2_projected.shape[0]'], {}), '(tsne2_projected.shape[0])\n', (4390, 4416), True, 'import numpy as np\n'), ((4728, 4762), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(5, 3)'}), '(1, 2, figsize=(5, 3))\n', (4740, 4762), True, 'from matplotlib import pyplot as plt\n'), ((5504, 5518), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (5513, 5518), True, 'import streamlit as st\n'), ((3065, 3093), 'streamlit.write', 'st.write', (['"""No word received"""'], {}), "('No word received')\n", (3073, 3093), True, 'import streamlit as st\n'), ((3205, 3242), 'streamlit.write', 'st.write', (['"""Word not in trained vocab"""'], {}), "('Word not in trained vocab')\n", (3213, 3242), True, 'import streamlit as st\n'), ((4098, 4136), 'streamlit.write', 'st.write', (['"""Word not in trained vocab!"""'], {}), "('Word not in trained vocab!')\n", (4106, 4136), True, 'import streamlit as st\n')] |
import os
from typing import Optional, Sequence, Union
import numpy as np
from tensorflow.keras.utils import to_categorical
import yaml
from transformers.tokenization_distilbert import DistilBertTokenizer
def load_training_conf(conf_path: Optional[str] = None) -> dict:
conf_path = conf_path or os.path.join("src", "train_conf.yml")
with open(conf_path, "r") as file:
conf = yaml.full_load(file)
return conf
def encode_texts(tokenizer: DistilBertTokenizer, texts: Sequence[str]) -> np.ndarray:
return np.array(
[
tokenizer.encode(
text,
max_length=tokenizer.max_length,
pad_to_max_length=tokenizer.pad_to_max_length,
)
for text in texts
]
)
def encode_labels(
texts_labels: Sequence[str], unique_labels: Sequence[Union[str, int]]
) -> np.ndarray:
unique_labels = sorted(unique_labels)
# if labels are strings convert to ints before one-hot encoding
if isinstance(unique_labels[0], str):
label_int = dict((label, i) for (i, label) in enumerate(unique_labels))
texts_labels_encoded = np.array([label_int[label] for label in texts_labels])
else:
texts_labels_encoded = np.array(texts_labels)
return to_categorical(texts_labels_encoded, num_classes=max(label_int.values()) + 1)
def pip_packages() -> None:
with open("requirements.txt") as f:
pip_packages = "".join(f.readlines()).split(os.linesep)
# remove blank lines in requirements.txt
return [x for x in pip_packages if x != ""]
| [
"numpy.array",
"yaml.full_load",
"os.path.join"
] | [((302, 339), 'os.path.join', 'os.path.join', (['"""src"""', '"""train_conf.yml"""'], {}), "('src', 'train_conf.yml')\n", (314, 339), False, 'import os\n'), ((394, 414), 'yaml.full_load', 'yaml.full_load', (['file'], {}), '(file)\n', (408, 414), False, 'import yaml\n'), ((1149, 1203), 'numpy.array', 'np.array', (['[label_int[label] for label in texts_labels]'], {}), '([label_int[label] for label in texts_labels])\n', (1157, 1203), True, 'import numpy as np\n'), ((1245, 1267), 'numpy.array', 'np.array', (['texts_labels'], {}), '(texts_labels)\n', (1253, 1267), True, 'import numpy as np\n')] |
# encoding: utf-8
"""
tune.py
~~~~~~~
Functionality for tuning models
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__created__ = "2018-05-08"
__copyright__ = "Copyright 2018 <NAME>"
__license__ = "MIT https://opensource.org/licenses/MIT"
# standard imports
# third party imports
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, roc_auc_score
# local imports
from atnlp.core.logger import log, title_break
# globals
def grid_search(X_train, y_train, model, pname, pvals, scoring=None):
"""Perform 1D model hyper-parameter scan using 5-fold cross-validation
The sklearn grid is returned and a plot of the performance is made.
:param X_train: training data
:param y_train: ground truth labels
:param model: model
:param pname: model hyperparameter name
:param pvals: model hyperparameter values
:param scoring: sklearn performance metric (optional)
:return: sklearn GridSearchCV
"""
# configure grid-search parameters
params = {pname:pvals}
# run grid-search
grid = GridSearchCV(model, cv=5, param_grid=params,
return_train_score=True,
scoring=scoring)
result = grid.fit(X_train, y_train)
# plot results
scan_x = params[pname]
plt.errorbar(scan_x, grid.cv_results_['mean_test_score'],
yerr=grid.cv_results_['std_test_score'],
label='test')
plt.errorbar(scan_x, grid.cv_results_['mean_train_score'],
yerr=grid.cv_results_['std_train_score'],
label = 'train')
plt.legend()
ax = plt.gca()
ax.grid(True)
for line in ax.get_xgridlines() + ax.get_ygridlines():
line.set_linestyle('-.')
return grid
def find_threshold(y_true, y_score, target_contamination=0.01, show=True):
"""Return binary classification probability threshold that yields contamination closest to target
:param y_true: ground truth labels
:param y_score: predicted class scores
:param target_contamination: target level of contamination (1-precision)
:param show: make plots
:return: optimal threshold
"""
thres_min = 0.0
thres_max = 1.02
thres = np.arange(thres_min, thres_max, 0.005)
# first calculate denominator (total entries passing threshold)
tots = np.array([np.sum(y_score > v) for v in thres])
# filter entries where nothing passes
thres = thres[tots > 0]
tots = tots[tots > 0]
# if no points return a valid number, return threshold
if len(tots) == 0:
return thres_min
# calculate contamination
conts = np.array([np.sum(y_score[y_true == False] > v) for v in thres]) / tots
# get point closest to target contamination
idx = (np.abs(conts - target_contamination)).argmin()
# plot
if show:
recall = np.array([np.sum(y_score[y_true == True] > v) for v in thres]) / np.sum(y_true)
ymin = max(min([target_contamination - 0.1, conts[idx]]), 0.0)
ymax = min(max([target_contamination + 0.1, conts[idx]]), 1.0)
plt.figure(figsize=(20, 5))
# Plot 1: probability distributions
plt.subplot(1, 3, 1)
bins = np.arange(0., 1.001, 0.02)
plt.hist(y_score[y_true == True], bins=bins, label='Signal', alpha=0.5)
plt.hist(y_score[y_true == False], bins=bins, label='Background', alpha=0.5)
ax = plt.gca()
ax.set_yscale('log')
plt.vlines(x=thres[idx], ymin=ax.get_ylim()[0], ymax=ax.get_ylim()[1], linestyles='--')
plt.annotate('chosen threshold', xy=(thres[idx], ax.get_ylim()[1] / 10.0),
verticalalignment='top', horizontalalignment='right', rotation=90)
plt.xlabel('threshold')
plt.legend()
# Plot 2: threshold tuning
plt.subplot(1, 3, 2)
plt.hlines(y=target_contamination, xmin=thres_min, xmax=thres_max, linestyles='--')
plt.annotate('target', xy=(thres_min, target_contamination),
verticalalignment='bottom')
plt.plot(thres, conts, label='contamination', color='b')
plt.plot(thres, recall, label='recall', color='r')
plt.plot(thres[idx], conts[idx], 'bo')
plt.plot(thres[idx], recall[idx], 'ro')
# plt.ylim(ymin,ymax)
plt.legend()
ax = plt.gca()
ax.grid(True)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle('-.')
# ax.set_yscale('log')
plt.xlabel('threshold')
plt.ylabel('cont. / recall')
# Plot 3: ROC
plt.subplot(1, 3, 3)
plt.plot(conts, recall)
plt.xlabel('contamination')
plt.ylabel('recall')
plt.plot(conts[idx], recall[idx], 'bo')
ax = plt.gca()
ax.grid(True)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle('-.')
plt.xlim(-0.02, 1.02)
plt.ylim(-0.02, 1.02)
return thres[idx]
def fit_xgb_model(alg, X, y, X_test, y_test, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
"""Fit xgboost model
:param alg: XGBClassifier (sklearn api class)
:param X: training data
:param y: training labels
:param X_test: testing data
:param y_test: testing labels
:param useTrainCV: use cross validation
:param cv_folds: number of folds for cross-validation
:param early_stopping_rounds: minimum number of rounds before early stopping
"""
if useTrainCV:
import xgboost as xgb
dtrain = xgb.DMatrix(X, label=y)
cvresult = xgb.cv(alg.get_xgb_params(), dtrain,
num_boost_round=alg.get_params()['n_estimators'],
early_stopping_rounds=early_stopping_rounds,
nfold=cv_folds, metrics='auc')
alg.set_params(n_estimators=cvresult.shape[0])
# Fit the algorithm on the data
eval_set = [(X, y), (X_test, y_test)]
alg.fit(X, y, eval_metric='auc', eval_set=eval_set, verbose=False)
# Predict training set:
y_pred = alg.predict(X)
y_prob = alg.predict_proba(X)[:, 1]
# Print model report:
title_break("Model Report")
log().info("Accuracy : %.4g" % accuracy_score(y, y_pred))
log().info("AUC Score (Train): %f" % roc_auc_score(y, y_prob))
result = alg.evals_result()
n = len(result['validation_0']['auc'])
if useTrainCV:
x = np.arange(len(cvresult))
(ytr, eytr) = (cvresult['train-auc-mean'], cvresult['train-auc-std'])
(yte, eyte) = (cvresult['test-auc-mean'], cvresult['test-auc-std'])
plt.fill_between(x, ytr - eytr, ytr + eytr, facecolor='r', alpha=0.25, label='train(cv) err')
plt.fill_between(x, yte - eyte, yte + eyte, facecolor='b', alpha=0.25, label='test(cv) err')
plt.plot(x, ytr, color='r', linestyle='--', label='train(cv)')
plt.plot(x, yte, color='b', linestyle='--', label='test(cv)')
plt.plot(np.arange(n), result['validation_0']['auc'], color='r', label='train')
plt.plot(np.arange(n), result['validation_1']['auc'], color='b', linewidth=2, label='test')
plt.legend()
# EOF | [
"sklearn.model_selection.GridSearchCV",
"numpy.sum",
"numpy.abs",
"sklearn.metrics.accuracy_score",
"atnlp.core.logger.log",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.errorbar",
"matplot... | [((1133, 1223), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model'], {'cv': '(5)', 'param_grid': 'params', 'return_train_score': '(True)', 'scoring': 'scoring'}), '(model, cv=5, param_grid=params, return_train_score=True,\n scoring=scoring)\n', (1145, 1223), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1359, 1476), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['scan_x', "grid.cv_results_['mean_test_score']"], {'yerr': "grid.cv_results_['std_test_score']", 'label': '"""test"""'}), "(scan_x, grid.cv_results_['mean_test_score'], yerr=grid.\n cv_results_['std_test_score'], label='test')\n", (1371, 1476), True, 'from matplotlib import pyplot as plt\n'), ((1510, 1630), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['scan_x', "grid.cv_results_['mean_train_score']"], {'yerr': "grid.cv_results_['std_train_score']", 'label': '"""train"""'}), "(scan_x, grid.cv_results_['mean_train_score'], yerr=grid.\n cv_results_['std_train_score'], label='train')\n", (1522, 1630), True, 'from matplotlib import pyplot as plt\n'), ((1666, 1678), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1676, 1678), True, 'from matplotlib import pyplot as plt\n'), ((1688, 1697), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1695, 1697), True, 'from matplotlib import pyplot as plt\n'), ((2284, 2322), 'numpy.arange', 'np.arange', (['thres_min', 'thres_max', '(0.005)'], {}), '(thres_min, thres_max, 0.005)\n', (2293, 2322), True, 'import numpy as np\n'), ((6287, 6314), 'atnlp.core.logger.title_break', 'title_break', (['"""Model Report"""'], {}), "('Model Report')\n", (6298, 6314), False, 'from atnlp.core.logger import log, title_break\n'), ((7262, 7274), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7272, 7274), True, 'from matplotlib import pyplot as plt\n'), ((3149, 3176), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (3159, 3176), True, 'from matplotlib import pyplot as plt\n'), ((3230, 3250), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (3241, 3250), True, 'from matplotlib import pyplot as plt\n'), ((3266, 3293), 'numpy.arange', 'np.arange', (['(0.0)', '(1.001)', '(0.02)'], {}), '(0.0, 1.001, 0.02)\n', (3275, 3293), True, 'import numpy as np\n'), ((3301, 3372), 'matplotlib.pyplot.hist', 'plt.hist', (['y_score[y_true == True]'], {'bins': 'bins', 'label': '"""Signal"""', 'alpha': '(0.5)'}), "(y_score[y_true == True], bins=bins, label='Signal', alpha=0.5)\n", (3309, 3372), True, 'from matplotlib import pyplot as plt\n'), ((3381, 3457), 'matplotlib.pyplot.hist', 'plt.hist', (['y_score[y_true == False]'], {'bins': 'bins', 'label': '"""Background"""', 'alpha': '(0.5)'}), "(y_score[y_true == False], bins=bins, label='Background', alpha=0.5)\n", (3389, 3457), True, 'from matplotlib import pyplot as plt\n'), ((3471, 3480), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3478, 3480), True, 'from matplotlib import pyplot as plt\n'), ((3786, 3809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""threshold"""'], {}), "('threshold')\n", (3796, 3809), True, 'from matplotlib import pyplot as plt\n'), ((3818, 3830), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3828, 3830), True, 'from matplotlib import pyplot as plt\n'), ((3875, 3895), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (3886, 3895), True, 'from matplotlib import pyplot as plt\n'), ((3904, 3991), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'target_contamination', 'xmin': 'thres_min', 'xmax': 'thres_max', 'linestyles': '"""--"""'}), "(y=target_contamination, xmin=thres_min, xmax=thres_max,\n linestyles='--')\n", (3914, 3991), True, 'from matplotlib import pyplot as plt\n'), ((3996, 4088), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""target"""'], {'xy': '(thres_min, target_contamination)', 'verticalalignment': '"""bottom"""'}), "('target', xy=(thres_min, target_contamination),\n verticalalignment='bottom')\n", (4008, 4088), True, 'from matplotlib import pyplot as plt\n'), ((4114, 4170), 'matplotlib.pyplot.plot', 'plt.plot', (['thres', 'conts'], {'label': '"""contamination"""', 'color': '"""b"""'}), "(thres, conts, label='contamination', color='b')\n", (4122, 4170), True, 'from matplotlib import pyplot as plt\n'), ((4179, 4229), 'matplotlib.pyplot.plot', 'plt.plot', (['thres', 'recall'], {'label': '"""recall"""', 'color': '"""r"""'}), "(thres, recall, label='recall', color='r')\n", (4187, 4229), True, 'from matplotlib import pyplot as plt\n'), ((4238, 4276), 'matplotlib.pyplot.plot', 'plt.plot', (['thres[idx]', 'conts[idx]', '"""bo"""'], {}), "(thres[idx], conts[idx], 'bo')\n", (4246, 4276), True, 'from matplotlib import pyplot as plt\n'), ((4285, 4324), 'matplotlib.pyplot.plot', 'plt.plot', (['thres[idx]', 'recall[idx]', '"""ro"""'], {}), "(thres[idx], recall[idx], 'ro')\n", (4293, 4324), True, 'from matplotlib import pyplot as plt\n'), ((4363, 4375), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4373, 4375), True, 'from matplotlib import pyplot as plt\n'), ((4390, 4399), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4397, 4399), True, 'from matplotlib import pyplot as plt\n'), ((4592, 4615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""threshold"""'], {}), "('threshold')\n", (4602, 4615), True, 'from matplotlib import pyplot as plt\n'), ((4624, 4652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cont. / recall"""'], {}), "('cont. / recall')\n", (4634, 4652), True, 'from matplotlib import pyplot as plt\n'), ((4684, 4704), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (4695, 4704), True, 'from matplotlib import pyplot as plt\n'), ((4713, 4736), 'matplotlib.pyplot.plot', 'plt.plot', (['conts', 'recall'], {}), '(conts, recall)\n', (4721, 4736), True, 'from matplotlib import pyplot as plt\n'), ((4745, 4772), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""contamination"""'], {}), "('contamination')\n", (4755, 4772), True, 'from matplotlib import pyplot as plt\n'), ((4781, 4801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (4791, 4801), True, 'from matplotlib import pyplot as plt\n'), ((4810, 4849), 'matplotlib.pyplot.plot', 'plt.plot', (['conts[idx]', 'recall[idx]', '"""bo"""'], {}), "(conts[idx], recall[idx], 'bo')\n", (4818, 4849), True, 'from matplotlib import pyplot as plt\n'), ((4864, 4873), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4871, 4873), True, 'from matplotlib import pyplot as plt\n'), ((5035, 5056), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.02)', '(1.02)'], {}), '(-0.02, 1.02)\n', (5043, 5056), True, 'from matplotlib import pyplot as plt\n'), ((5065, 5086), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.02)', '(1.02)'], {}), '(-0.02, 1.02)\n', (5073, 5086), True, 'from matplotlib import pyplot as plt\n'), ((5670, 5693), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {'label': 'y'}), '(X, label=y)\n', (5681, 5693), True, 'import xgboost as xgb\n'), ((6740, 6837), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(ytr - eytr)', '(ytr + eytr)'], {'facecolor': '"""r"""', 'alpha': '(0.25)', 'label': '"""train(cv) err"""'}), "(x, ytr - eytr, ytr + eytr, facecolor='r', alpha=0.25,\n label='train(cv) err')\n", (6756, 6837), True, 'from matplotlib import pyplot as plt\n'), ((6842, 6938), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(yte - eyte)', '(yte + eyte)'], {'facecolor': '"""b"""', 'alpha': '(0.25)', 'label': '"""test(cv) err"""'}), "(x, yte - eyte, yte + eyte, facecolor='b', alpha=0.25,\n label='test(cv) err')\n", (6858, 6938), True, 'from matplotlib import pyplot as plt\n'), ((6943, 7005), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ytr'], {'color': '"""r"""', 'linestyle': '"""--"""', 'label': '"""train(cv)"""'}), "(x, ytr, color='r', linestyle='--', label='train(cv)')\n", (6951, 7005), True, 'from matplotlib import pyplot as plt\n'), ((7014, 7075), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yte'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""test(cv)"""'}), "(x, yte, color='b', linestyle='--', label='test(cv)')\n", (7022, 7075), True, 'from matplotlib import pyplot as plt\n'), ((7090, 7102), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7099, 7102), True, 'import numpy as np\n'), ((7174, 7186), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7183, 7186), True, 'import numpy as np\n'), ((2413, 2432), 'numpy.sum', 'np.sum', (['(y_score > v)'], {}), '(y_score > v)\n', (2419, 2432), True, 'import numpy as np\n'), ((2829, 2865), 'numpy.abs', 'np.abs', (['(conts - target_contamination)'], {}), '(conts - target_contamination)\n', (2835, 2865), True, 'import numpy as np\n'), ((2983, 2997), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (2989, 2997), True, 'import numpy as np\n'), ((6319, 6324), 'atnlp.core.logger.log', 'log', ([], {}), '()\n', (6322, 6324), False, 'from atnlp.core.logger import log, title_break\n'), ((6350, 6375), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (6364, 6375), False, 'from sklearn.metrics import accuracy_score, roc_auc_score\n'), ((6381, 6386), 'atnlp.core.logger.log', 'log', ([], {}), '()\n', (6384, 6386), False, 'from atnlp.core.logger import log, title_break\n'), ((6418, 6442), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'y_prob'], {}), '(y, y_prob)\n', (6431, 6442), False, 'from sklearn.metrics import accuracy_score, roc_auc_score\n'), ((2708, 2744), 'numpy.sum', 'np.sum', (['(y_score[y_true == False] > v)'], {}), '(y_score[y_true == False] > v)\n', (2714, 2744), True, 'import numpy as np\n'), ((2928, 2963), 'numpy.sum', 'np.sum', (['(y_score[y_true == True] > v)'], {}), '(y_score[y_true == True] > v)\n', (2934, 2963), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import sys
import numpy as np
import math
from wavestate import declarative
from numpy.polynomial.hermite import hermval
from scipy.special import eval_genlaguerre
from wavestate.utilities.np import matrix_stack, matrix_stack_id
from wavestate.utilities.mpl import (
# generate_stacked_plot_ax,
mplfigB,
asavefig,
)
# asavefig.formats.png.use = True
from scipy.special import (
eval_hermite,
factorial,
)
from numpy.polynomial.hermite import hermval
c_m_s = 299792458
def hermite_exp(n, x):
"""
From:
https://www.numbercrunch.de/blog/2014/08/calculating-the-hermite-functions/
"""
if n == 0:
return np.ones_like(x) * np.pi ** (-0.25) * np.exp(-(x ** 2) / 2)
if n == 1:
return np.sqrt(2.0) * x * np.exp(-(x ** 2) / 2) * np.pi ** (-0.25)
h_i_2 = np.ones_like(x) * np.pi ** (-0.25)
h_i_1 = np.sqrt(2.0) * x * np.pi ** (-0.25)
sum_log_scale = np.zeros_like(x)
for i in range(2, n + 1):
h_i = np.sqrt(2.0 / i) * x * h_i_1 - np.sqrt((i - 1.0) / i) * h_i_2
h_i_2, h_i_1 = h_i_1, h_i
log_scale = np.log(abs(h_i) + 1e-14).round()
scale = np.exp(-log_scale)
h_i = h_i * scale
h_i_1 = h_i_1 * scale
h_i_2 = h_i_2 * scale
sum_log_scale += log_scale
return h_i * np.exp(-(x ** 2) / 2 + sum_log_scale)
def p_fock(n, p):
if n < 100:
fock = (
(1 / np.sqrt((2 ** n) * factorial(n)))
* (np.pi ** -0.25)
* np.exp(-(p ** 2) / 2)
* eval_hermite(n, p)
)
else:
fock = hermite_exp(n, p)
return fock
def q_fock(n, q):
"""Returns the Fock position wavefunctions defined over q space.
Args:
n (int): Fock index
q (array): position values
Returns:
fock (array): nth Fock state wavefunction
code from QuTiP
"""
if n < 100:
# Employs the scipy.special eval_hermite function
fock = (
(1 / np.sqrt((2 ** n) * factorial(n)))
* (np.pi ** -0.25)
* np.exp(-(q ** 2) / 2)
* eval_hermite(n, q)
)
else:
fock = hermite_exp(n, q)
return fock
def basis_fock2q(n, q):
M = np.empty((len(q), n))
for k in range(0, n):
M[:, k] = q_fock(n=k, q=q)
return M
def raise_fock(n):
return np.diagflat(np.arange(1, n) ** 0.5, -1)
def lower_fock(n):
return np.diagflat(np.arange(1, n) ** 0.5, 1)
def linspace_clopen(half_width, N):
return np.arange(N) * (half_width / (N // 2)) - half_width
def angle(a, shift=None, deg=False):
if deg:
mod = 360
else:
mod = np.pi * 2
if shift is None:
if deg:
shift = -135
else:
shift = -3 * np.pi / 4
return (np.angle(a, deg=deg) - shift) % mod + shift
def q2p(psi, q, extend=True):
"""
FFT between q and p space. Assumes q is centered around 0 and evenly spaced
"""
dq = q[1] - q[0]
if extend:
psi2 = np.concatenate(
[psi[len(psi) // 2 :], np.zeros(len(psi)), psi[: len(psi) // 2]]
)
psi_p = np.fft.fft(psi2) * dq / (np.pi * 2) ** 0.5
psi_p = np.concatenate([psi_p[-len(psi) // 2 :], psi_p[: len(psi) // 2]])
# print(len(psi_p))
p = linspace_clopen(np.pi / dq / 2, len(q))
return psi_p, p
else:
psi_p = (
np.fft.fftshift(np.fft.fft(np.fft.ifftshift(psi))) * dq / (np.pi * 2) ** 0.5
)
p = linspace_clopen(np.pi / dq, len(q))
return psi_p, p
def q2wigner_fft(psi, q):
"""
from qutip.wigner._wigner_fft
"""
import scipy.linalg as la
n = 2 * len(psi)
r1 = np.concatenate(((psi[::-1].conj()), np.zeros(n // 2)), axis=0)
r2 = np.concatenate((psi, np.zeros(n // 2)), axis=0)
w = la.toeplitz(np.zeros(n // 2), r1) * np.flipud(la.toeplitz(np.zeros(n // 2), r2))
w = np.concatenate((w[:, n // 2 : n], w[:, 0 : n // 2]), axis=1)
w = np.fft.fft(w)
w = np.real(np.concatenate((w[:, 3 * n // 4 : n + 1], w[:, 0 : n // 4]), axis=1))
p = np.arange(-n / 4, n / 4) * np.pi / (n * (q[1] - q[0]))
p = q
w = w / (p[1] - p[0]) / n
return w, p
def rot45(rho, method="half"):
n = rho.shape[-1]
def update(k):
k2 = k // 2
x = np.diagonal(rho, -k)
if k % 2 == 1:
rho2[mid + k, k2 : -k2 - 1] = np.diagonal(rho, k) / 2
if k2 == 0:
rho2[mid + k, k2 + 1 :] += np.diagonal(rho, k) / 2
else:
rho2[mid + k, k2 + 1 : -k2] += np.diagonal(rho, k) / 2
else:
rho2[mid + k, k2:-k2] = np.diagonal(rho, k)
x = np.diagonal(rho, k)
if k % 2 == 1:
rho2[mid - k, k2 : -k2 - 1] = np.diagonal(rho, k) / 2
if k2 == 0:
rho2[mid - k, k2 + 1 :] += np.diagonal(rho, k) / 2
else:
rho2[mid - k, k2 + 1 : -k2] += np.diagonal(rho, k) / 2
else:
rho2[mid - k, k2:-k2] = np.diagonal(rho, k)
if method == "half":
rho2 = np.zeros((n, n), dtype=complex)
mid = n // 2
rho2[mid, :] = np.diagonal(rho)
for k in range(1, n // 2):
update(k)
elif method == "full":
n2 = 2 * n
rho2 = np.zeros((n2, n), dtype=complex)
mid = n
rho2[mid, :] = np.diagonal(rho)
for k in range(1, n):
update(k)
elif method == "hermitian":
n2 = 2 * n
rho2 = np.zeros((n, n), dtype=complex)
rho2[0, :] = np.diagonal(rho)
for k in range(1, n):
k2 = k // 2
x = np.diagonal(rho, k)
if k % 2 == 1:
rho2[k, k2 : -k2 - 1] = np.diagonal(rho, k) / 2
if k2 == 0:
rho2[k, k2 + 1 :] += np.diagonal(rho, k) / 2
else:
rho2[k, k2 + 1 : -k2] += np.diagonal(rho, k) / 2
else:
rho2[k, k2:-k2] = np.diagonal(rho, k)
else:
raise RuntimeError("Unrecognized Method")
return rho2
def psiq2wigner_fft(psi, q, method="full"):
rho = psi.reshape(-1, 1) * psi.conjugate().reshape(1, -1)
return rhoq2wigner_fft(rho, q, method=method)
def rhoq2wigner_fft(rho, q, method="hermitian"):
""" """
assert rho.shape[0] == rho.shape[1]
n = rho.shape[0]
n2 = 2 * n
if method == "full":
w = rot45(rho, method="full")
w = np.fft.ifftshift(w, axes=0)
w = np.fft.fft(w, axis=0)
w = np.real(np.concatenate((w[3 * n2 // 4 :, :], w[: n2 // 4, :]), axis=0))
p = np.arange(-n / 2, n / 2) * np.pi / (n * (q[1] - q[0]))
elif method == "half":
w = rot45(rho, method="half")
w = np.fft.ifftshift(w, axes=0)
w = np.fft.fft(w, axis=0)
w = np.fft.fftshift(w, axes=0)
w = np.real(w)
p = np.arange(-n / 2, n / 2) * 2 * np.pi / (n * (q[1] - q[0]))
elif method == "hermitian":
w = rot45(rho, method="hermitian")
w = np.fft.hfft(w, axis=0)
w = np.concatenate((w[3 * n2 // 4 :, :], w[: n2 // 4, :]), axis=0)
p = np.arange(-n / 2, n / 2) * np.pi / (n * (q[1] - q[0]))
return w, p
def gkp(q, D, mu=0, s=None):
"""Returns a discretized q space and a normalized logical (0 or 1)
GKP state. Default is to return logical 0 (mu=0).
Args:
q (float array): wavefunction domain
D (float): width of the Gaussian envelope and squeezed states
in the superposition
s (int): maximum number of tines in the comb
mu (0 or 1): logical value for the GKP state
Returns:
q (array): position values
gkp (array): array of GKP wavefunction values.
From eq (35) in PhysRevA.64.012310
"""
if s is None:
s = 2 * (int(q[-1]) + 1)
if mu == 0:
s_arr = np.arange(-s // 2, s // 2 + 1)
else:
s_arr = np.arange(-s // 2, s // 2 + 2)
# Initiating empty arrays to be filled in
gkp = np.zeros(len(q))
# Looping over all peaks in the superposition
for s_val in s_arr:
# gkp += (
# np.exp(-((D**-2)*np.pi*(2*s_val+mu)**2)*2.0)
# * np.exp(-(q-(2*s_val+mu)*np.sqrt(np.pi))**2/2 * (D**2))
# / (np.pi * D**2)**-0.25
# )
gkp += (
np.exp(-2 * np.pi * D ** 2 * (s_val + mu / 2) ** 2)
* np.exp(-((q - (2 * s_val + mu) * np.sqrt(np.pi)) ** 2) / (D ** 2) / 2)
* (4 / np.pi) ** 0.25
)
# Normalization constants
# GKP gives a good approximation of sqrt(2*Delta) if Delta/alpha is small,
# but we can find them numerically to get a better approximation
# dq = q[1] - q[0]
# no need to normalize
# Norm = 1#/np.sqrt(dq * np.sum(np.absolute(gkp)**2))
# Normalizing
# gkp = Norm*gkp
return gkp
| [
"numpy.fft.ifftshift",
"scipy.special.factorial",
"numpy.zeros_like",
"numpy.ones_like",
"scipy.special.eval_hermite",
"numpy.fft.fft",
"numpy.angle",
"numpy.zeros",
"numpy.fft.fftshift",
"numpy.arange",
"numpy.exp",
"numpy.real",
"numpy.fft.hfft",
"numpy.sqrt",
"numpy.concatenate",
"n... | [((1274, 1290), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1287, 1290), True, 'import numpy as np\n'), ((4262, 4318), 'numpy.concatenate', 'np.concatenate', (['(w[:, n // 2:n], w[:, 0:n // 2])'], {'axis': '(1)'}), '((w[:, n // 2:n], w[:, 0:n // 2]), axis=1)\n', (4276, 4318), True, 'import numpy as np\n'), ((4331, 4344), 'numpy.fft.fft', 'np.fft.fft', (['w'], {}), '(w)\n', (4341, 4344), True, 'import numpy as np\n'), ((1171, 1186), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1183, 1186), True, 'import numpy as np\n'), ((1500, 1518), 'numpy.exp', 'np.exp', (['(-log_scale)'], {}), '(-log_scale)\n', (1506, 1518), True, 'import numpy as np\n'), ((1657, 1692), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2 + sum_log_scale)'], {}), '(-x ** 2 / 2 + sum_log_scale)\n', (1663, 1692), True, 'import numpy as np\n'), ((4361, 4425), 'numpy.concatenate', 'np.concatenate', (['(w[:, 3 * n // 4:n + 1], w[:, 0:n // 4])'], {'axis': '(1)'}), '((w[:, 3 * n // 4:n + 1], w[:, 0:n // 4]), axis=1)\n', (4375, 4425), True, 'import numpy as np\n'), ((4657, 4677), 'numpy.diagonal', 'np.diagonal', (['rho', '(-k)'], {}), '(rho, -k)\n', (4668, 4677), True, 'import numpy as np\n'), ((5029, 5048), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5040, 5048), True, 'import numpy as np\n'), ((5429, 5460), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'complex'}), '((n, n), dtype=complex)\n', (5437, 5460), True, 'import numpy as np\n'), ((5505, 5521), 'numpy.diagonal', 'np.diagonal', (['rho'], {}), '(rho)\n', (5516, 5521), True, 'import numpy as np\n'), ((6802, 6829), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['w'], {'axes': '(0)'}), '(w, axes=0)\n', (6818, 6829), True, 'import numpy as np\n'), ((6842, 6863), 'numpy.fft.fft', 'np.fft.fft', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (6852, 6863), True, 'import numpy as np\n'), ((8214, 8244), 'numpy.arange', 'np.arange', (['(-s // 2)', '(s // 2 + 1)'], {}), '(-s // 2, s // 2 + 1)\n', (8223, 8244), True, 'import numpy as np\n'), ((8271, 8301), 'numpy.arange', 'np.arange', (['(-s // 2)', '(s // 2 + 2)'], {}), '(-s // 2, s // 2 + 2)\n', (8280, 8301), True, 'import numpy as np\n'), ((1047, 1066), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2)'], {}), '(-x ** 2 / 2)\n', (1053, 1066), True, 'import numpy as np\n'), ((1218, 1230), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1225, 1230), True, 'import numpy as np\n'), ((1880, 1898), 'scipy.special.eval_hermite', 'eval_hermite', (['n', 'p'], {}), '(n, p)\n', (1892, 1898), False, 'from scipy.special import eval_hermite, factorial\n'), ((2447, 2465), 'scipy.special.eval_hermite', 'eval_hermite', (['n', 'q'], {}), '(n, q)\n', (2459, 2465), False, 'from scipy.special import eval_hermite, factorial\n'), ((2709, 2724), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (2718, 2724), True, 'import numpy as np\n'), ((2781, 2796), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (2790, 2796), True, 'import numpy as np\n'), ((2857, 2869), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2866, 2869), True, 'import numpy as np\n'), ((4081, 4097), 'numpy.zeros', 'np.zeros', (['(n // 2)'], {}), '(n // 2)\n', (4089, 4097), True, 'import numpy as np\n'), ((4138, 4154), 'numpy.zeros', 'np.zeros', (['(n // 2)'], {}), '(n // 2)\n', (4146, 4154), True, 'import numpy as np\n'), ((4185, 4201), 'numpy.zeros', 'np.zeros', (['(n // 2)'], {}), '(n // 2)\n', (4193, 4201), True, 'import numpy as np\n'), ((4439, 4463), 'numpy.arange', 'np.arange', (['(-n / 4)', '(n / 4)'], {}), '(-n / 4, n / 4)\n', (4448, 4463), True, 'import numpy as np\n'), ((4997, 5016), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5008, 5016), True, 'import numpy as np\n'), ((5368, 5387), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5379, 5387), True, 'import numpy as np\n'), ((5640, 5672), 'numpy.zeros', 'np.zeros', (['(n2, n)'], {'dtype': 'complex'}), '((n2, n), dtype=complex)\n', (5648, 5672), True, 'import numpy as np\n'), ((5712, 5728), 'numpy.diagonal', 'np.diagonal', (['rho'], {}), '(rho)\n', (5723, 5728), True, 'import numpy as np\n'), ((6884, 6944), 'numpy.concatenate', 'np.concatenate', (['(w[3 * n2 // 4:, :], w[:n2 // 4, :])'], {'axis': '(0)'}), '((w[3 * n2 // 4:, :], w[:n2 // 4, :]), axis=0)\n', (6898, 6944), True, 'import numpy as np\n'), ((7092, 7119), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['w'], {'axes': '(0)'}), '(w, axes=0)\n', (7108, 7119), True, 'import numpy as np\n'), ((7132, 7153), 'numpy.fft.fft', 'np.fft.fft', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (7142, 7153), True, 'import numpy as np\n'), ((7166, 7192), 'numpy.fft.fftshift', 'np.fft.fftshift', (['w'], {'axes': '(0)'}), '(w, axes=0)\n', (7181, 7192), True, 'import numpy as np\n'), ((7205, 7215), 'numpy.real', 'np.real', (['w'], {}), '(w)\n', (7212, 7215), True, 'import numpy as np\n'), ((1010, 1025), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1022, 1025), True, 'import numpy as np\n'), ((1118, 1137), 'numpy.exp', 'np.exp', (['(-x ** 2 / 2)'], {}), '(-x ** 2 / 2)\n', (1124, 1137), True, 'import numpy as np\n'), ((1366, 1388), 'numpy.sqrt', 'np.sqrt', (['((i - 1.0) / i)'], {}), '((i - 1.0) / i)\n', (1373, 1388), True, 'import numpy as np\n'), ((1844, 1863), 'numpy.exp', 'np.exp', (['(-p ** 2 / 2)'], {}), '(-p ** 2 / 2)\n', (1850, 1863), True, 'import numpy as np\n'), ((2411, 2430), 'numpy.exp', 'np.exp', (['(-q ** 2 / 2)'], {}), '(-q ** 2 / 2)\n', (2417, 2430), True, 'import numpy as np\n'), ((3136, 3156), 'numpy.angle', 'np.angle', (['a'], {'deg': 'deg'}), '(a, deg=deg)\n', (3144, 3156), True, 'import numpy as np\n'), ((3478, 3494), 'numpy.fft.fft', 'np.fft.fft', (['psi2'], {}), '(psi2)\n', (3488, 3494), True, 'import numpy as np\n'), ((4231, 4247), 'numpy.zeros', 'np.zeros', (['(n // 2)'], {}), '(n // 2)\n', (4239, 4247), True, 'import numpy as np\n'), ((4743, 4762), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (4754, 4762), True, 'import numpy as np\n'), ((5114, 5133), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5125, 5133), True, 'import numpy as np\n'), ((5847, 5878), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'complex'}), '((n, n), dtype=complex)\n', (5855, 5878), True, 'import numpy as np\n'), ((5900, 5916), 'numpy.diagonal', 'np.diagonal', (['rho'], {}), '(rho)\n', (5911, 5916), True, 'import numpy as np\n'), ((6960, 6984), 'numpy.arange', 'np.arange', (['(-n / 2)', '(n / 2)'], {}), '(-n / 2, n / 2)\n', (6969, 6984), True, 'import numpy as np\n'), ((7374, 7396), 'numpy.fft.hfft', 'np.fft.hfft', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (7385, 7396), True, 'import numpy as np\n'), ((7409, 7469), 'numpy.concatenate', 'np.concatenate', (['(w[3 * n2 // 4:, :], w[:n2 // 4, :])'], {'axis': '(0)'}), '((w[3 * n2 // 4:, :], w[:n2 // 4, :]), axis=0)\n', (7423, 7469), True, 'import numpy as np\n'), ((8676, 8727), 'numpy.exp', 'np.exp', (['(-2 * np.pi * D ** 2 * (s_val + mu / 2) ** 2)'], {}), '(-2 * np.pi * D ** 2 * (s_val + mu / 2) ** 2)\n', (8682, 8727), True, 'import numpy as np\n'), ((1099, 1111), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1106, 1111), True, 'import numpy as np\n'), ((1335, 1351), 'numpy.sqrt', 'np.sqrt', (['(2.0 / i)'], {}), '(2.0 / i)\n', (1342, 1351), True, 'import numpy as np\n'), ((4834, 4853), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (4845, 4853), True, 'import numpy as np\n'), ((4923, 4942), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (4934, 4942), True, 'import numpy as np\n'), ((5205, 5224), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5216, 5224), True, 'import numpy as np\n'), ((5294, 5313), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5305, 5313), True, 'import numpy as np\n'), ((5987, 6006), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (5998, 6006), True, 'import numpy as np\n'), ((3774, 3795), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['psi'], {}), '(psi)\n', (3790, 3795), True, 'import numpy as np\n'), ((6334, 6353), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (6345, 6353), True, 'import numpy as np\n'), ((7228, 7252), 'numpy.arange', 'np.arange', (['(-n / 2)', '(n / 2)'], {}), '(-n / 2, n / 2)\n', (7237, 7252), True, 'import numpy as np\n'), ((7484, 7508), 'numpy.arange', 'np.arange', (['(-n / 2)', '(n / 2)'], {}), '(-n / 2, n / 2)\n', (7493, 7508), True, 'import numpy as np\n'), ((6074, 6093), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (6085, 6093), True, 'import numpy as np\n'), ((1784, 1796), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (1793, 1796), False, 'from scipy.special import eval_hermite, factorial\n'), ((2351, 2363), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (2360, 2363), False, 'from scipy.special import eval_hermite, factorial\n'), ((6167, 6186), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (6178, 6186), True, 'import numpy as np\n'), ((6258, 6277), 'numpy.diagonal', 'np.diagonal', (['rho', 'k'], {}), '(rho, k)\n', (6269, 6277), True, 'import numpy as np\n'), ((8775, 8789), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (8782, 8789), True, 'import numpy as np\n')] |
"""
potentials -
"""
import numpy as np
import atsim.potentials.potentialforms as pairpot
def morse(r, params):
Ed = params[0]
re = params[1]
drs = params[2]
gam = 1.0/(drs*re)
E = map(pairpot.morse(gam, re, Ed), r)
return np.array(E)
| [
"atsim.potentials.potentialforms.morse",
"numpy.array"
] | [((251, 262), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (259, 262), True, 'import numpy as np\n'), ((209, 235), 'atsim.potentials.potentialforms.morse', 'pairpot.morse', (['gam', 're', 'Ed'], {}), '(gam, re, Ed)\n', (222, 235), True, 'import atsim.potentials.potentialforms as pairpot\n')] |
import numpy as np
def document_vectorizer(corpus, model, num_features):
"""
This averages all the word embeddings in the tweet.
This function averages all the word embeddings in the tweet.
corpus: String text corpus
Model: Model to use
num_features: Int, the number of features to use
"""
vocabulary = set(model.wv.index_to_key)
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,), dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords += 1
feature_vector = np.add(feature_vector, model.wv[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features)
def document_vectorizer_glove(corpus, model, num_features):
"""
This function averages all the word embeddings based on the glove model.
corpus: String text corpus
Model: Model to use
num_features: Int, the number of features to use
returns: numpy array
"""
vocabulary = set(model.index_to_key)
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,), dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords += 1
feature_vector = np.add(feature_vector, model[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features) | [
"numpy.zeros",
"numpy.divide",
"numpy.add",
"numpy.array"
] | [((994, 1012), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1002, 1012), True, 'import numpy as np\n'), ((1963, 1981), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1971, 1981), True, 'import numpy as np\n'), ((474, 516), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float64"""'}), "((num_features,), dtype='float64')\n", (482, 516), True, 'import numpy as np\n'), ((1446, 1488), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float64"""'}), "((num_features,), dtype='float64')\n", (1454, 1488), True, 'import numpy as np\n'), ((756, 789), 'numpy.divide', 'np.divide', (['feature_vector', 'nwords'], {}), '(feature_vector, nwords)\n', (765, 789), True, 'import numpy as np\n'), ((1725, 1758), 'numpy.divide', 'np.divide', (['feature_vector', 'nwords'], {}), '(feature_vector, nwords)\n', (1734, 1758), True, 'import numpy as np\n'), ((669, 707), 'numpy.add', 'np.add', (['feature_vector', 'model.wv[word]'], {}), '(feature_vector, model.wv[word])\n', (675, 707), True, 'import numpy as np\n'), ((1641, 1676), 'numpy.add', 'np.add', (['feature_vector', 'model[word]'], {}), '(feature_vector, model[word])\n', (1647, 1676), True, 'import numpy as np\n')] |
import tvm
import numpy as np
import argparse
import os
import time
def run_model(model_dir, input_fp, output_fp, warmup_trials, run_trials, cuda, try_randin):
# import compiled graph
print("=> [TVM on TX2] using model files in {}".format(model_dir))
assert(os.path.isdir(model_dir))
print("=> [TVM on TX2] loading model lib and ptx")
loaded_lib = tvm.module.load(os.path.join(model_dir, "deploy_lib.o"))
if cuda:
dev_lib = tvm.module.load(os.path.join(model_dir, "deploy_cuda.ptx"))
loaded_lib.import_module(dev_lib)
print("=> [TVM on TX2] loading model graph and params")
loaded_graph = open(os.path.join(model_dir,"deploy_graph.json")).read()
loaded_params = bytearray(open(os.path.join(model_dir, "deploy_param.params"), "rb").read())
print("=> [TVM on TX2] creating TVM runtime module")
fcreate = tvm.get_global_func("tvm.graph_runtime.create")
ctx = tvm.gpu(0) if cuda else tvm.cpu(0)
gmodule = fcreate(loaded_graph, loaded_lib, ctx.device_type, ctx.device_id)
set_input, get_output, run = gmodule["set_input"], gmodule["get_output"], gmodule["run"]
print("=> [TVM on TX2] feeding inputs and params into TVM module")
rgb_np = np.load(input_fp) # HWC
x = np.zeros([1,3,224,224]) # NCHW
x[0,:,:,:] = np.transpose(rgb_np, (2,0,1))
set_input('0', tvm.nd.array(x.astype('float32')))
gmodule["load_params"](loaded_params)
print("=> [TVM on TX2] running TVM module, saving output")
run() # not gmodule.run()
out_shape = (1, 1, 224, 224)
out = tvm.nd.empty(out_shape, "float32")
get_output(0, out)
np.save(output_fp, out.asnumpy())
print("=> [TVM on TX2] benchmarking: {} warmup, {} run trials".format(warmup_trials, run_trials))
# run model several times as a warmup
for i in range(warmup_trials):
run()
ctx.sync()
# profile runtime using TVM time evaluator
ftimer = gmodule.time_evaluator("run", ctx, number=1, repeat=run_trials)
profile_result = ftimer()
profiled_runtime = profile_result[0]
print("=> [TVM on TX2] profiled runtime (in ms): {:.5f}".format(1000*profiled_runtime))
# try randomizing input
if try_randin:
randin_runtime = 0
for i in range(run_trials):
x = np.random.randn(1, 3, 224, 224)
set_input('0', tvm.nd.array(x.astype('float32')))
randin_ftimer = gmodule.time_evaluator("run", ctx, number=1, repeat=1)
randin_profile_result = randin_ftimer()
randin_runtime += randin_profile_result[0]
randomized_input_runtime = randin_runtime/run_trials
print("=> [TVM on TX2] with randomized input on every run, profiled runtime (in ms): {:.5f}".format(1000*randomized_input_runtime))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', type=str, required=True,
help='path to folder with TVM-compiled model files (required)')
parser.add_argument('--input-fp', type=str, default='data/rgb.npy',
help='numpy file containing input rgb data (default: data/rgb.npy')
parser.add_argument('--output-fp', type=str, default='data/pred.npy',
help='numpy file to store output prediction data (default: data/pred.npy')
parser.add_argument('--warmup', type=int, default=10,
help='number of inference warmup trials (default: 10)')
parser.add_argument('--run', type=int, default=100,
help='number of inference run trials (default: 100)')
parser.add_argument('--cuda', type=bool, default=False,
help='run with CUDA (default: False)')
parser.add_argument('--randin', type=bool, default=False,
help='profile runtime while randomizing input on every run (default: False)')
args = parser.parse_args()
run_model(args.model_dir, args.input_fp, args.output_fp, args.warmup, args.run, args.cuda, try_randin=args.randin)
if __name__ == '__main__':
main()
| [
"numpy.load",
"argparse.ArgumentParser",
"os.path.join",
"tvm.nd.empty",
"os.path.isdir",
"tvm.cpu",
"numpy.random.randn",
"numpy.transpose",
"numpy.zeros",
"tvm.gpu",
"tvm.get_global_func"
] | [((271, 295), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (284, 295), False, 'import os\n'), ((870, 917), 'tvm.get_global_func', 'tvm.get_global_func', (['"""tvm.graph_runtime.create"""'], {}), "('tvm.graph_runtime.create')\n", (889, 917), False, 'import tvm\n'), ((1221, 1238), 'numpy.load', 'np.load', (['input_fp'], {}), '(input_fp)\n', (1228, 1238), True, 'import numpy as np\n'), ((1253, 1279), 'numpy.zeros', 'np.zeros', (['[1, 3, 224, 224]'], {}), '([1, 3, 224, 224])\n', (1261, 1279), True, 'import numpy as np\n'), ((1301, 1332), 'numpy.transpose', 'np.transpose', (['rgb_np', '(2, 0, 1)'], {}), '(rgb_np, (2, 0, 1))\n', (1313, 1332), True, 'import numpy as np\n'), ((1564, 1598), 'tvm.nd.empty', 'tvm.nd.empty', (['out_shape', '"""float32"""'], {}), "(out_shape, 'float32')\n", (1576, 1598), False, 'import tvm\n'), ((2805, 2830), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2828, 2830), False, 'import argparse\n'), ((386, 425), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_lib.o"""'], {}), "(model_dir, 'deploy_lib.o')\n", (398, 425), False, 'import os\n'), ((928, 938), 'tvm.gpu', 'tvm.gpu', (['(0)'], {}), '(0)\n', (935, 938), False, 'import tvm\n'), ((952, 962), 'tvm.cpu', 'tvm.cpu', (['(0)'], {}), '(0)\n', (959, 962), False, 'import tvm\n'), ((474, 516), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_cuda.ptx"""'], {}), "(model_dir, 'deploy_cuda.ptx')\n", (486, 516), False, 'import os\n'), ((2294, 2325), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (2309, 2325), True, 'import numpy as np\n'), ((645, 689), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_graph.json"""'], {}), "(model_dir, 'deploy_graph.json')\n", (657, 689), False, 'import os\n'), ((732, 778), 'os.path.join', 'os.path.join', (['model_dir', '"""deploy_param.params"""'], {}), "(model_dir, 'deploy_param.params')\n", (744, 778), False, 'import os\n')] |
import pytest
pytest.importorskip('sklearn')
from sklearn.linear_model import SGDClassifier
import dask.array as da
import numpy as np
import dask
x = np.array([[1, 0],
[2, 0],
[3, 0],
[4, 0],
[0, 1],
[0, 2],
[3, 3],
[4, 4]])
y = np.array([1, 1, 1, 1, -1, -1, 0, 0])
z = np.array([[1, -1],
[-1, 1],
[10, -10],
[-10, 10]])
X = da.from_array(x, chunks=(3, 2))
Y = da.from_array(y, chunks=(3,))
Z = da.from_array(z, chunks=(2, 2))
def test_fit():
sgd = SGDClassifier()
sgd = da.learn.fit(sgd, X, Y, get=dask.get, classes=np.array([-1, 0, 1]))
sol = sgd.predict(z)
result = da.learn.predict(sgd, Z)
assert result.chunks == ((2, 2),)
assert result.compute(get=dask.get).tolist() == sol.tolist()
| [
"pytest.importorskip",
"sklearn.linear_model.SGDClassifier",
"numpy.array",
"dask.array.from_array",
"dask.array.learn.predict"
] | [((14, 44), 'pytest.importorskip', 'pytest.importorskip', (['"""sklearn"""'], {}), "('sklearn')\n", (33, 44), False, 'import pytest\n'), ((154, 228), 'numpy.array', 'np.array', (['[[1, 0], [2, 0], [3, 0], [4, 0], [0, 1], [0, 2], [3, 3], [4, 4]]'], {}), '([[1, 0], [2, 0], [3, 0], [4, 0], [0, 1], [0, 2], [3, 3], [4, 4]])\n', (162, 228), True, 'import numpy as np\n'), ((332, 368), 'numpy.array', 'np.array', (['[1, 1, 1, 1, -1, -1, 0, 0]'], {}), '([1, 1, 1, 1, -1, -1, 0, 0])\n', (340, 368), True, 'import numpy as np\n'), ((374, 424), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1], [10, -10], [-10, 10]]'], {}), '([[1, -1], [-1, 1], [10, -10], [-10, 10]])\n', (382, 424), True, 'import numpy as np\n'), ((472, 503), 'dask.array.from_array', 'da.from_array', (['x'], {'chunks': '(3, 2)'}), '(x, chunks=(3, 2))\n', (485, 503), True, 'import dask.array as da\n'), ((508, 537), 'dask.array.from_array', 'da.from_array', (['y'], {'chunks': '(3,)'}), '(y, chunks=(3,))\n', (521, 537), True, 'import dask.array as da\n'), ((542, 573), 'dask.array.from_array', 'da.from_array', (['z'], {'chunks': '(2, 2)'}), '(z, chunks=(2, 2))\n', (555, 573), True, 'import dask.array as da\n'), ((602, 617), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (615, 617), False, 'from sklearn.linear_model import SGDClassifier\n'), ((736, 760), 'dask.array.learn.predict', 'da.learn.predict', (['sgd', 'Z'], {}), '(sgd, Z)\n', (752, 760), True, 'import dask.array as da\n'), ((675, 695), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (683, 695), True, 'import numpy as np\n')] |
#WGAN-GP
import torch.nn as nn
import numpy as np
class Discriminator(nn.Module):
"""
Implementation of WGAN_GP discrminator is learnt from https://github.com/eriklindernoren/PyTorch-GAN.git
"""
def __init__(self,conf_data):
super(Discriminator, self).__init__()
self.img_shape = (conf_data['discriminator']['channels'],conf_data['discriminator']['input_shape'],conf_data['discriminator']['input_shape'])
self.model = nn.Sequential(
nn.Linear(int(np.prod(self.img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1)
)
def forward(self, img):
img_flat = img.view(img.shape[0], -1)
validity = self.model(img_flat)
return validity | [
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.Linear"
] | [((544, 575), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (556, 575), True, 'import torch.nn as nn\n'), ((589, 608), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (598, 608), True, 'import torch.nn as nn\n'), ((622, 653), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (634, 653), True, 'import torch.nn as nn\n'), ((667, 684), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (676, 684), True, 'import torch.nn as nn\n'), ((500, 523), 'numpy.prod', 'np.prod', (['self.img_shape'], {}), '(self.img_shape)\n', (507, 523), True, 'import numpy as np\n')] |
# python 2/3 compatibility
from __future__ import division, print_function
# global imports
import numpy
import json
import urllib3
import pandas
import libsbml
from scipy.sparse import lil_matrix, hstack
# package imports
from .rba.core.constraint_blocks import ConstraintBlocks
from .element_block import ElementBlock
class ReactionBlock(ElementBlock):
"""
Class holding information on the reactions in the model.
Attributes
----------
Elements : dict
Each model-enzyme is represented by a key.
The values, holding information on each enzyme, are dicts with predefined keys:
'ID' : reaction ID in model (type str)
'Compartment_Machinery' : Localisation of enzyme-subunit, catalysing this reaction. (type list)
'Name' : name according to BiGG (type str)
'OtherIDs' : Other reaction names (eg. BiGG, KEGG) (type list)
'Formula' : Reaction formula as string (type str)
'Reactants' : Which metabolites does this reaction consume of and many (type dict)
'Products' : Which metabolites does this reaction produce of and many (type dict)
'Type' : Type of reaction ('normal' or 'transport') (type str)
'Compartment_Species' : Location of the metabolites involved with this reaction (type list)
'Enzyme' : Enzyme catalysing this reaction (type str)
'Twins' : Isoreactions of this reactions (catalysed by iso-enzymes) (type list)
"""
def fromFiles(self, model, Info, ReactionAnnotations, sbml, metaboliteBlock):
"""
Derive reaction-info from RBA-model.
Input
-----
RBA-model
Dataframe, holding BiGG-reaction information.
Returns
-------
Dictionary with reaction-info.
"""
blocks = ConstraintBlocks(model)
full_S = build_S(list(blocks.metabolism.external + blocks.metabolism.internal),
model.metabolism.reactions).toarray()
self.Elements = {}
self.BiGGids = []
index = 0
http = urllib3.PoolManager()
if type(sbml) is not str:
if type(sbml.model) is libsbml.Model:
sbmlIDMap = [reaction.id for reaction in sbml.model.reactions]
reconstruction = Info.loc['Reconstruction', 'Value']
for i in range(len(blocks.metabolism.reactions)):
Reactants = associatedReactants(i, blocks, model, full_S)
Products = associatedProducts(i, blocks, model, full_S)
Reversibility = checkReversibility(i, blocks)
CompartmentInfo = findCompartment(
i, blocks, Reactants, Products, Reversibility, metaboliteBlock)
Twins = findTwinRxns(i, blocks)
protoID = deriveProto_ID(i, blocks, Twins)
IDdict = {'ProtoID': protoID}
reactionName = ' '
if type(sbml) is not str:
if type(sbml.model) is libsbml.Model:
if protoID in sbmlIDMap:
IDdict.update(getReactionAnnotationsFromSBML(
sbmlIDMap.index(protoID), sbml))
reactionName = sbml.model.reactions[sbmlIDMap.index(protoID)].name
elif 'R_'+protoID in sbmlIDMap:
IDdict.update(getReactionAnnotationsFromSBML(
sbmlIDMap.index(str('R_'+protoID)), sbml))
reactionName = sbml.model.reactions[sbmlIDMap.index(str('R_'+protoID))].name
if type(ReactionAnnotations) is pandas.core.frame.DataFrame:
IDdict.update(readReactionAnnotations(i, ReactionAnnotations, blocks))
self.BiGGids.append(protoID)
index += 1
self.Elements[blocks.metabolism.reactions[i]] = {'ID': blocks.metabolism.reactions[i],
'Compartment_Machinery': [],
'Name': reactionName,
'OtherIDs': IDdict,
'index': index,
'Formula': Reactants['rSide'] + ' <=> ' + Products['pSide'],
'Reactants': Reactants['reactants'],
'Products': Products['products'],
'Reversible': Reversibility['Reversible'],
'Type': CompartmentInfo['type'],
'Compartment_Species': CompartmentInfo['comp'],
'Enzyme': findAssociatedEnzyme(i, blocks),
'Twins': Twins}
def overview(self):
"""
Derive statistics on reactions.
Returns
-------
Dictionary with general numbers on reactions.
"""
nTot = len(list(self.Elements.keys()))
nUnique = numpy.nan
nSpont = 0
nEnzyme = 0
nInternalTransport = 0
nExchange = 0
nInternal = 0
nRev = 0
nIrrev = 0
BiGGIDs = []
for i in list(self.Elements.keys()):
if len(self.Elements[i]['Enzyme']) > 0:
nEnzyme += 1
else:
nSpont += 1
if self.Elements[i]['Type'] == 'Normal':
nInternal += 1
if self.Elements[i]['Type'] == 'Transport (-->)':
if len(self.Elements[i]['Reactants']) == 0 or len(self.Elements[i]['Products']) == 0:
nExchange += 1
else:
nInternalTransport += 1
if self.Elements[i]['Type'] == 'Transport (<==>)':
if len(self.Elements[i]['Reactants']) == 0 or len(self.Elements[i]['Products']) == 0:
nExchange += 1
else:
nInternalTransport += 1
if self.Elements[i]['Reversible']:
nRev += 1
else:
nIrrev += 1
# BiGGIDs.append(self.Elements[i]['OtherIDs']['BiGG.Reaction'])
nUnique = len(list(numpy.unique(self.BiGGids)))
out = {'ReactionsTotal': nTot,
'ReactionsUnique': nUnique,
'ReactionsSpontaneous': nSpont,
'ReactionsEnzymatic': nEnzyme,
'ReactionsInternal': nInternal,
'ReactionsExchange': nExchange,
'ReactionsCompartmentTransport': nInternalTransport,
'ReactionsReversible': nRev,
'ReactionsIrreversible': nIrrev}
return(out)
def deriveProto_ID(reaction, blocks, Twins):
"""
Derive BiGG-ID from reactionID.
Relies on the assumption that reactionID in RBA-model equals an 'R_', followed by the BiGG-ID.
When the reaction has twins (duplicated reactions due to isozymes),
the one without an appended '_2', '_3' ... needs to be found first.
Returns
-------
String with derived BiGG-ID.
"""
if len(Twins) > 0: # Check if isozymic reactions exist
# Find "original" reaction (shortest name)
b = min(Twins+[blocks.metabolism.reactions[reaction]], key=len)
out = b
# Remove 'M_'-prefix
if b.startswith('R_'):
out = b[2:]
else:
out = blocks.metabolism.reactions[reaction]
if out.startswith('R_'):
out = out[2:]
return(out)
def findReactionAnnotations(rx_name, http, reconstruction):
# if not reconstruction is 'GSMM':
# response = http.request('GET', 'http://bigg.ucsd.edu/api/v2/models/' + reconstruction +'/reactions/' + rx_name)
if not reconstruction is 'GSMM':
response = http.request('GET', 'http://bigg.ucsd.edu/api/v2/models/' +
reconstruction + '/reactions/' + rx_name)
else:
response = http.request('GET', 'http://bigg.ucsd.edu/api/v2/models/' +
'universal' + '/reactions/' + rx_name)
try:
x = json.loads(response.data.decode('utf-8'))
out = {'BiGG': 'NA', 'KEGG': 'NA', 'BioCyc': 'NA', 'Name': ''}
out['BiGG'] = rx_name
out['Name'] = x['name']
if 'KEGG Reaction' in list(x['database_links'].keys()):
out['KEGG'] = x['database_links']['KEGG Reaction'][0]['id']
out['BioCyc'] = x['database_links']['BioCyc'][0]['id']
return(out)
except:
return({'BiGG': 'NA', 'KEGG': 'NA', 'BioCyc': 'NA', 'Name': ''})
# else:
# return('NA')
def getReactionAnnotationsFromSBML(index, sbml):
out = {}
for a in sbml.model.reactions[index].getAnnotationString().split('\n'):
if 'rdf:resource="http://identifiers.org/' in a:
annotationType = a.split(
'rdf:resource="http://identifiers.org/')[1].split('"/>')[0].split('/')
out.update({annotationType[0]: annotationType[1]})
return(out)
def readReactionAnnotations(r, ReactionAnnotations, blocks):
AnnotationKeys = list(ReactionAnnotations)
AnnotationIDs = [numpy.nan]*len(AnnotationKeys)
for i in AnnotationKeys:
if blocks.metabolism.reactions[r] in list(ReactionAnnotations[i]):
reaction = list(ReactionAnnotations[i]).index(blocks.metabolism.reactions[r])
AnnotationIDs = list(ReactionAnnotations.loc[reaction])
return(dict(zip(AnnotationKeys, AnnotationIDs)))
def findRxnName(rxnid, reactionsBiGG):
"""
Retreive (descriptive) name of reaction from BiGG-file.
Returns
-------
String with name (when found), otherwise empty.
"""
if rxnid in list(reactionsBiGG.index):
return(str(reactionsBiGG.loc[rxnid]['name']))
else:
return(str(''))
def associatedReactants(i, blocks, model, Sfull):
"""
Derive information of reactant-side of reaction.
Returns
-------
'reactants': Dictionary with reactants and stoichiometric factors.
'rSide': String, representing reactant-side of reaction-formula.
"""
rxn = model.metabolism.reactions.get_by_id(blocks.metabolism.reactions[i])
if rxn is not None:
reactants = {i.species: transform_to_int(i.stoichiometry)
for i in rxn.reactants._elements if i is not None}
eq = ''
for i in reactants.keys():
if reactants[i] != 0:
eq += str(reactants[i])+' '+str(i)+' '
else:
reactants = {}
eq = ''
return({'reactants': reactants, 'rSide': eq})
def associatedProducts(i, blocks, model, Sfull):
"""
Derive information of product-side of reaction.
Returns
-------
'products': Dictionary with products and stoichiometric factors.
'pSide': String, representing product-side of reaction-formula.
"""
rxn = model.metabolism.reactions.get_by_id(blocks.metabolism.reactions[i])
if rxn is not None:
products = {i.species: transform_to_int(i.stoichiometry)
for i in rxn.products._elements if i is not None}
eq = ''
for i in products.keys():
if products[i] != 0:
eq += str(products[i])+' '+str(i)+' '
else:
products = {}
eq = ''
return({'products': products, 'pSide': eq})
def checkReversibility(rx, blocks):
"""
Information on default reaction flux-bounds and reversibility.
Returns
-------
Dictionary with numerical values on flux-bounds and boolean for reversibility.
"""
LB = blocks.metabolism._lb[rx].__dict__['value']
UB = blocks.metabolism._ub[rx].__dict__['value']
out = {'Reversible': True,
'UB': UB,
'LB': LB}
if LB == 0:
out['Reversible'] = False
return(out)
def findCompartment(rx, blocks, aR, aP, rR, metaboliteBlock):
"""
Derive information on compartment aspects of the reaction.
Returns
-------
'type': String 'Transport','Exchange' or 'Normal' (kind of reaction).
'comp': compartment of SBML-model. arrow between compartments when reaction is 'Transport'.
"""
r = list(aR['reactants'].keys())
if len(r) > 0:
rComp = list(set([metaboliteBlock.Elements[rc]['Compartment'] for rc in r]))
else:
rComp = []
p = list(aP['products'].keys())
if len(p) > 0:
pComp = list(set([metaboliteBlock.Elements[pc]['Compartment'] for pc in p]))
else:
pComp = []
if set(rComp) == set(pComp):
typ = 'Normal'
comp = rComp
elif len(list(set(list(rComp+pComp)))) == 1:
comp = list(set(rComp+pComp))
typ = 'Exchange'
else:
typ = 'Transport (-->)'
comp = list(set(rComp+pComp))
if rR['Reversible'] == 'True':
typ = 'Transport (<==>)'
out = {'type': typ,
'comp': comp}
return(out)
def findAssociatedEnzyme(rx, blocks):
"""
Return enzyme species, associated with reaction.
Returns
-------
String with enzymeID, empty string if reaction is spontaneous.
"""
RN = blocks.metabolism.reactions[rx]
if RN in blocks.enzymes.reaction_catalyzed:
return(blocks.enzymes.ids[blocks.enzymes.reaction_catalyzed.index(RN)])
else:
return(str(''))
def findTwinRxns(rx, blocks):
"""
Find Twin reactions (identical reactions, catalyzed by different (iso-)enzymes)
Returns
-------
List of iso-reactions.
"""
out = []
if 'duplicate' in blocks.metabolism.reactions[rx]:
for x in blocks.metabolism.reactions:
if 'duplicate' in x:
if blocks.metabolism.reactions[rx].rsplit('_duplicate')[0] == x.rsplit('_duplicate')[0]:
if blocks.metabolism.reactions[rx] is not x:
out.append(x)
else:
if blocks.metabolism.reactions[rx].rsplit('_duplicate')[0] == x:
if blocks.metabolism.reactions[rx] is not x:
out.append(x)
else:
for x in blocks.metabolism.reactions:
if blocks.metabolism.reactions[rx]+'_duplicate' in x:
out.append(x)
return(out)
def build_S(metabolites, reactions):
"""
Build stoichiometry matrix from metabolites and reactions.
Parameters
----------
metabolites:
Metabolite identifiers (used to define row order).
reactions: rba.xml.ListOfReactions
Reaction data.
Returns
-------
scipy.sparse.lil_matrix
Stoichiometry matrix.
"""
m_index = {m: i for i, m in enumerate(metabolites)}
S = lil_matrix((len(metabolites), len(reactions)))
for r_index, reaction in enumerate(reactions):
for reactant in reaction.reactants:
S[m_index[reactant.species], r_index] = -reactant.stoichiometry
for product in reaction.products:
S[m_index[product.species], r_index] = product.stoichiometry
return S
def transform_to_int(number):
if number % 1 == 0:
return(int(number))
else:
return(number)
| [
"urllib3.PoolManager",
"numpy.unique"
] | [((2092, 2113), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (2111, 2113), False, 'import urllib3\n'), ((6437, 6463), 'numpy.unique', 'numpy.unique', (['self.BiGGids'], {}), '(self.BiGGids)\n', (6449, 6463), False, 'import numpy\n')] |
import dataclasses as dc
import typing
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
import ipywidgets.widgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.nonparametric.smoothers_lowess as smoothers_lowess
import xarray as xr
from diffpy.pdfgetx import PDFConfig, PDFGetter, Transformation
from diffpy.pdfgetx.pdfconfig import PDFConfigError
from ipywidgets import interact
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pdffitx.vend import mask_img, generate_binner
class MyPDFConfig(PDFConfig):
"""PDFConfig for the lowess smoothing.
Attributes
----------
qcutoff :
The Q > qcutoff region will be LOWESS smoothed.
lowessf :
The frac parameter used in LOWESS smoothing. The larger it is, the smoother it will be.
"""
def __init__(self):
super(MyPDFConfig, self).__init__()
self.qcutoff = 24.0
self.lowessf = 0.04
self.endzero = True
self.dataformat = "QA"
self.qmin = 0.
self.qmaxinst = 24.0
self.qmax = 22.0
def smooth(xin: np.ndarray, yin: np.ndarray, xcutoff: float, lowessf: float, endzero: bool) -> typing.Tuple[
np.ndarray, np.ndarray]:
"""Smooth the input data in region x >= xcutoff using lowessf parameter. If endzero True, terminate the data to the last zero point."""
xout, yout = xin.copy(), yin.copy()
cutoff = np.searchsorted(xin, xcutoff) + 1
if cutoff < xin.shape[0]:
xout[cutoff:], yout[cutoff:] = smoothers_lowess.lowess(yin[cutoff:], xin[cutoff:], frac=lowessf).T
if endzero:
# first element with a different sign
ind = np.argmin(np.sign(yout[-1] * yout[::-1]))
xout, yout = xout[:xout.shape[0] - ind], yout[:yout.shape[0] - ind]
return xout, yout
class LowessTransform(Transformation):
"""The transformation doing the LOWESS smoothing on F(Q)."""
summary = "LOWESS smoothing"
outputtype = "lsfq"
xinlabel = ""
yinlabel = ""
xoutlabel = ""
youtlabel = ""
xin = None
yin = None
xout = None
yout = None
def __init__(self, config: MyPDFConfig):
super(LowessTransform, self).__init__(config)
def checkConfig(self):
if not isinstance(self.config, MyPDFConfig):
raise PDFConfigError("The config for LowessTransform must be LowessPDFConfig.")
def transform(self):
self.xout, self.yout = smooth(self.xin, self.yin, self.config.qcutoff, self.config.lowessf,
self.config.endzero)
class MyPDFGetter(PDFGetter):
"""The PDFGetter with LOWESS smoothing of F(Q) included."""
def __init__(self, config: MyPDFConfig):
super(MyPDFGetter, self).__init__(config)
self.transformations.insert(-1, LowessTransform(config))
@dataclass
class MaskConfig:
edge: int = 30
lower_thresh: float = 0.0
upper_thresh: float = None
alpha: float = 2.
auto_type: str = "median"
tmsk: np.ndarray = None
@dataclass
class IntegrationConfig:
npt: int = 3001
correctSolidAngle: bool = False
dummy: float = 0.
unit: str = "q_A^-1"
safe: bool = False
polarization_factor: float = 0.99
method: typing.Tuple[str, str, str] = ("bbox", "csr", "cython")
@dataclass
class BackgroundConfig:
scale: float = 1.
@dataclass
class LabelConfig:
Q: str = "Q"
F: str = "F"
I: str = "I"
r: str = "r"
G: str = "G"
A: str = "Å"
QU: str = "Å$^{-1}$"
IU: str = "A. U."
rU: str = "Å"
GU: str = "Å$^{-2}$"
FU: str = "Å$^{-1}$"
@dataclass
class ReductionConfig:
geometry: AzimuthalIntegrator = AzimuthalIntegrator()
mask: MaskConfig = MaskConfig()
integration: IntegrationConfig = IntegrationConfig()
background: BackgroundConfig = BackgroundConfig()
pdf: MyPDFConfig = MyPDFConfig()
label: LabelConfig = LabelConfig()
class ReductionCalculator:
def __init__(self, config: ReductionConfig):
self.config: ReductionConfig = config
self.dataset: xr.Dataset = xr.Dataset()
self.dark_dataset: xr.Dataset = xr.Dataset()
self.bkg_dataset: xr.Dataset = xr.Dataset()
self.executor = ThreadPoolExecutor(max_workers=24)
def set_dataset(self, dataset: xr.Dataset) -> None:
self.dataset = dataset
return
def set_dark_dataset(self, dataset: xr.Dataset) -> None:
self.dark_dataset = dataset
return
def set_bkg_dataset(self, dataset: xr.Dataset) -> None:
self.bkg_dataset = dataset
return
@staticmethod
def _average(
ds: xr.Dataset,
image_name: str,
along: typing.Sequence[str]
):
n = len(along)
averaged = xr.apply_ufunc(
np.mean,
ds[image_name],
input_core_dims=[along],
exclude_dims=set(along),
kwargs={"axis": tuple(range(-n, 0))}
)
ds = ds.assign({image_name: averaged})
return ds
def average_dark(
self,
image_name: str,
along: typing.Sequence[str] = ("time", "dim_0")
):
self.dark_dataset = self._average(
self.dark_dataset,
image_name,
along
)
return
def average_bkg(
self,
image_name: str,
along: typing.Sequence[str] = ("time", "dim_0")
):
self.bkg_dataset = self._average(
self.bkg_dataset,
image_name,
along
)
def average(
self,
image_name: str,
along: typing.Sequence[str] = ("dim_0",)
):
self.dataset = self._average(
self.dataset,
image_name,
along
)
return
def _dark_subtract(
self,
ds,
image_name: str,
image_dims: typing.Sequence[str]
):
dark_ds = self.dark_dataset
corrected = xr.apply_ufunc(
np.subtract,
ds[image_name],
dark_ds[image_name].values,
input_core_dims=[image_dims, image_dims],
output_core_dims=[image_dims]
)
ds = ds.assign({image_name: corrected})
return ds
def dark_subtract(
self,
image_name: str,
image_dims: typing.Sequence[str] = ("dim_1", "dim_2")
):
self.dataset = self._dark_subtract(
self.dataset,
image_name,
image_dims
)
return
def dark_subtract_bkg(
self,
image_name: str,
image_dims: typing.Sequence[str] = ("dim_1", "dim_2")
):
self.bkg_dataset = self._dark_subtract(
self.bkg_dataset,
image_name,
image_dims
)
def mask(
self,
image_name: str,
image_dims: typing.Sequence[str] = ("dim_1", "dim_2")
) -> None:
ds = self.dataset
ai = self.config.geometry
mc = self.config.mask
kwargs = dc.asdict(mc)
shape = [ds.dims[d] for d in image_dims]
kwargs["binner"] = generate_binner(ai, shape)
mask = xr.apply_ufunc(
mask_img,
ds[image_name],
kwargs=kwargs,
input_core_dims=[image_dims],
output_core_dims=[image_dims],
vectorize=True
)
ds = ds.assign({image_name: ds[image_name] * mask})
self.dataset = ds
return
def _integrate(
self,
ds: xr.Dataset,
image_name: str,
image_dims: typing.Tuple[str, str],
chi_name: str,
q_name: str
) -> xr.Dataset:
ai = self.config.geometry
exe = self.executor
ic = self.config.integration
images = ds[image_name]
other_dims = tuple(set(images.dims) - set(image_dims))
images.transpose(*other_dims, *image_dims)
kwargs = dc.asdict(ic)
images_data = images.data
if len(other_dims) > 0:
res = np.asarray(
list(exe.map(lambda img: ai.integrate1d(img, **kwargs), images_data))
)
q = res[0, 0, :]
i = res[:, 1, :]
else:
res = ai.integrate1d(images_data, **kwargs)
q = res[0]
i = res[1]
dims = other_dims + (q_name,)
ds = ds.assign_coords({q_name: q})
ds = ds.assign({chi_name: (dims, i)})
return ds
def integrate(
self,
image_name: str,
image_dims: typing.Tuple[str, str] = ("dim_1", "dim_2"),
chi_name: str = "I",
q_name: str = "Q"
):
self.dataset = self._integrate(
self.dataset,
image_name,
image_dims,
chi_name,
q_name
)
return
def integrate_bkg(
self,
image_name: str,
image_dims: typing.Tuple[str, str] = ("dim_1", "dim_2"),
chi_name: str = "I",
q_name: str = "Q"
):
self.bkg_dataset = self._integrate(
self.bkg_dataset,
image_name,
image_dims,
chi_name,
q_name
)
def bkg_subtract(
self,
chi_name: str = "I",
q_name: str = "Q"
):
scale = self.config.background.scale
ds = self.dataset
bkg_ds = self.bkg_dataset
subtracted = xr.apply_ufunc(
lambda x, y: np.subtract(x, scale * y),
ds[chi_name],
bkg_ds[chi_name],
input_core_dims=[[q_name], [q_name]],
output_core_dims=[[q_name]]
)
self.dataset = self.dataset.assign(
{chi_name: subtracted}
)
return
def get_G(
self,
chi_name: str = "I",
q_name: str = "Q",
g_name: str = "G",
r_name: str = "r"
):
ds = self.dataset
label = self.config.label
x = ds[q_name].data
mpg = MyPDFGetter(self.config.pdf)
g = xr.apply_ufunc(
lambda y: mpg.__call__(x, y)[1],
ds[chi_name],
input_core_dims=[[q_name]],
output_core_dims=[[r_name]],
exclude_dims={q_name},
vectorize=True
)
r = xr.DataArray(mpg.gr[0], dims=[r_name], attrs={"units": label.rU, "standard_name": label.r})
g.attrs.update({"units": label.GU, "standard_name": label.G})
ds = ds.assign_coords({r_name: r})
ds = ds.assign({g_name: g})
self.dataset = ds
return
def interact_fq(
self,
index: int = 0,
chi_name: str = "I",
q_name: str = "Q"
):
i = self.dataset[chi_name][index]
q = i[q_name]
mpg = MyPDFGetter(self.config.pdf)
config = mpg.config
pdf_config = self.config.pdf
label = self.config.label
def func(
qmin,
qmax,
qmaxinst,
lowessf,
qcutoff,
endzero
):
config.qmin = qmin
config.qmax = qmax
config.qmaxinst = qmaxinst
config.lowessf = lowessf
config.qcutoff = qcutoff
config.endzero = endzero
mpg.__call__(q, i)
q1, f1 = mpg.t[-3].xout, mpg.t[-3].yout
q2, f2 = mpg.t[-2].xout, mpg.t[-2].yout
ax: plt.Axes = plt.subplots()[1]
ax.plot(q1, f1)
ax.plot(q2, f2)
ax.set_xlabel("{} [{}]".format(label.Q, label.QU))
ax.set_ylabel("{} [{}]".format(label.F, label.FU))
plt.pause(0.1)
return interact(
func,
qmin=widgets.FloatSlider(pdf_config.qmin, min=0., max=5., step=0.05),
qmax=widgets.FloatSlider(pdf_config.qmax, min=15., max=25.0, step=0.1),
qmaxinst=widgets.FloatSlider(pdf_config.qmaxinst, min=25.0, max=30.0, step=0.1),
lowessf=widgets.FloatSlider(pdf_config.lowessf, min=0.0, max=0.5, step=0.01),
qcutoff=widgets.FloatSlider(pdf_config.qcutoff, min=0.0, max=30.0, step=0.1),
endzero=widgets.Checkbox(pdf_config.endzero)
)
def get_I(
self,
image_name: str,
chi_name: str = "I",
q_name: str = "Q",
avg_along: typing.Sequence[str] = ("dim_0",),
dark_avg_along: typing.Sequence[str] = ("time", "dim_0"),
bkg_avg_along: typing.Sequence[str] = ("time", "dim_0"),
image_dims: typing.Tuple[str, str] = ("dim_1", "dim_2"),
drop_image: bool = True
):
self.average(image_name, avg_along)
if image_name in self.dark_dataset:
self.average_dark(image_name, dark_avg_along)
self.dark_subtract(image_name, image_dims)
self.mask(image_name, image_dims)
self.integrate(image_name, image_dims, chi_name, q_name)
if image_name in self.bkg_dataset:
self.average_bkg(image_name, bkg_avg_along)
self.dark_subtract_bkg(image_name, image_dims)
self.integrate_bkg(image_name, image_dims, chi_name, q_name)
self.bkg_subtract(chi_name)
ds = self.dataset
if drop_image:
ds = ds.drop_vars(image_name)
label = self.config.label
ds[chi_name].attrs.update({"units": label.IU, "standard_name": label.I})
ds[q_name].attrs.update({"units": label.QU, "standard_name": label.Q})
self.dataset = ds
return
def reset_dims(
self,
dim2dims: typing.Dict[str, typing.List[str]],
):
self.dataset = reset_dims(self.dataset, dim2dims)
return
def reset_dims(
ds: xr.Dataset,
dim2dims: typing.Dict[str, typing.List[str]]
) -> xr.Dataset:
# set new dims
old_dims = list(dim2dims.keys())
ds = ds.reset_index(old_dims)
ds = ds.set_index(dim2dims)
ds = ds.unstack()
# rename new dims
replaced_dims = {}
for old_dim, new_dims in dim2dims.items():
if isinstance(new_dims, str):
replaced_dims[old_dim] = new_dims
elif len(new_dims) == 1:
replaced_dims[old_dim] = new_dims[0]
if len(replaced_dims) > 0:
ds = ds.rename_dims(replaced_dims)
ds = ds.rename_vars(replaced_dims)
# rename old dims (coords now)
rule = {"{}_".format(old_dim): old_dim for old_dim in old_dims}
ds = ds.rename_vars(rule)
return ds
| [
"ipywidgets.widgets.Checkbox",
"pyFAI.azimuthalIntegrator.AzimuthalIntegrator",
"numpy.subtract",
"ipywidgets.widgets.FloatSlider",
"numpy.searchsorted",
"pdffitx.vend.generate_binner",
"xarray.Dataset",
"statsmodels.nonparametric.smoothers_lowess.lowess",
"numpy.sign",
"diffpy.pdfgetx.pdfconfig.P... | [((3692, 3713), 'pyFAI.azimuthalIntegrator.AzimuthalIntegrator', 'AzimuthalIntegrator', ([], {}), '()\n', (3711, 3713), False, 'from pyFAI.azimuthalIntegrator import AzimuthalIntegrator\n'), ((1448, 1477), 'numpy.searchsorted', 'np.searchsorted', (['xin', 'xcutoff'], {}), '(xin, xcutoff)\n', (1463, 1477), True, 'import numpy as np\n'), ((4097, 4109), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4107, 4109), True, 'import xarray as xr\n'), ((4150, 4162), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4160, 4162), True, 'import xarray as xr\n'), ((4202, 4214), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4212, 4214), True, 'import xarray as xr\n'), ((4239, 4273), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(24)'}), '(max_workers=24)\n', (4257, 4273), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((6037, 6185), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['np.subtract', 'ds[image_name]', 'dark_ds[image_name].values'], {'input_core_dims': '[image_dims, image_dims]', 'output_core_dims': '[image_dims]'}), '(np.subtract, ds[image_name], dark_ds[image_name].values,\n input_core_dims=[image_dims, image_dims], output_core_dims=[image_dims])\n', (6051, 6185), True, 'import xarray as xr\n'), ((7137, 7150), 'dataclasses.asdict', 'dc.asdict', (['mc'], {}), '(mc)\n', (7146, 7150), True, 'import dataclasses as dc\n'), ((7227, 7253), 'pdffitx.vend.generate_binner', 'generate_binner', (['ai', 'shape'], {}), '(ai, shape)\n', (7242, 7253), False, 'from pdffitx.vend import mask_img, generate_binner\n'), ((7269, 7406), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['mask_img', 'ds[image_name]'], {'kwargs': 'kwargs', 'input_core_dims': '[image_dims]', 'output_core_dims': '[image_dims]', 'vectorize': '(True)'}), '(mask_img, ds[image_name], kwargs=kwargs, input_core_dims=[\n image_dims], output_core_dims=[image_dims], vectorize=True)\n', (7283, 7406), True, 'import xarray as xr\n'), ((8063, 8076), 'dataclasses.asdict', 'dc.asdict', (['ic'], {}), '(ic)\n', (8072, 8076), True, 'import dataclasses as dc\n'), ((10487, 10582), 'xarray.DataArray', 'xr.DataArray', (['mpg.gr[0]'], {'dims': '[r_name]', 'attrs': "{'units': label.rU, 'standard_name': label.r}"}), "(mpg.gr[0], dims=[r_name], attrs={'units': label.rU,\n 'standard_name': label.r})\n", (10499, 10582), True, 'import xarray as xr\n'), ((1551, 1616), 'statsmodels.nonparametric.smoothers_lowess.lowess', 'smoothers_lowess.lowess', (['yin[cutoff:]', 'xin[cutoff:]'], {'frac': 'lowessf'}), '(yin[cutoff:], xin[cutoff:], frac=lowessf)\n', (1574, 1616), True, 'import statsmodels.nonparametric.smoothers_lowess as smoothers_lowess\n'), ((1705, 1735), 'numpy.sign', 'np.sign', (['(yout[-1] * yout[::-1])'], {}), '(yout[-1] * yout[::-1])\n', (1712, 1735), True, 'import numpy as np\n'), ((2335, 2408), 'diffpy.pdfgetx.pdfconfig.PDFConfigError', 'PDFConfigError', (['"""The config for LowessTransform must be LowessPDFConfig."""'], {}), "('The config for LowessTransform must be LowessPDFConfig.')\n", (2349, 2408), False, 'from diffpy.pdfgetx.pdfconfig import PDFConfigError\n'), ((11873, 11887), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (11882, 11887), True, 'import matplotlib.pyplot as plt\n'), ((9639, 9664), 'numpy.subtract', 'np.subtract', (['x', '(scale * y)'], {}), '(x, scale * y)\n', (9650, 9664), True, 'import numpy as np\n'), ((11661, 11675), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11673, 11675), True, 'import matplotlib.pyplot as plt\n'), ((11949, 12014), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', (['pdf_config.qmin'], {'min': '(0.0)', 'max': '(5.0)', 'step': '(0.05)'}), '(pdf_config.qmin, min=0.0, max=5.0, step=0.05)\n', (11968, 12014), True, 'import ipywidgets.widgets as widgets\n'), ((12031, 12097), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', (['pdf_config.qmax'], {'min': '(15.0)', 'max': '(25.0)', 'step': '(0.1)'}), '(pdf_config.qmax, min=15.0, max=25.0, step=0.1)\n', (12050, 12097), True, 'import ipywidgets.widgets as widgets\n'), ((12119, 12189), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', (['pdf_config.qmaxinst'], {'min': '(25.0)', 'max': '(30.0)', 'step': '(0.1)'}), '(pdf_config.qmaxinst, min=25.0, max=30.0, step=0.1)\n', (12138, 12189), True, 'import ipywidgets.widgets as widgets\n'), ((12211, 12279), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', (['pdf_config.lowessf'], {'min': '(0.0)', 'max': '(0.5)', 'step': '(0.01)'}), '(pdf_config.lowessf, min=0.0, max=0.5, step=0.01)\n', (12230, 12279), True, 'import ipywidgets.widgets as widgets\n'), ((12301, 12369), 'ipywidgets.widgets.FloatSlider', 'widgets.FloatSlider', (['pdf_config.qcutoff'], {'min': '(0.0)', 'max': '(30.0)', 'step': '(0.1)'}), '(pdf_config.qcutoff, min=0.0, max=30.0, step=0.1)\n', (12320, 12369), True, 'import ipywidgets.widgets as widgets\n'), ((12391, 12427), 'ipywidgets.widgets.Checkbox', 'widgets.Checkbox', (['pdf_config.endzero'], {}), '(pdf_config.endzero)\n', (12407, 12427), True, 'import ipywidgets.widgets as widgets\n')] |
import json
import random
import logging
import unittest
import numpy as np
import tempfile
import os.path
from glob import glob
import tenncor as tc
from dbg.print import graph_to_str
from testutil.array_testcase import ArrTest
from testutil.generate_testcases import generate_testcases
from testutil.compare_testcase import joint_json
_testcases = glob('models/test/testcases/tf_cases_*.json')
_test_data = {}
_reader = joint_json(_testcases)
def _round_helper(x):
if isinstance(x, float):
return round(x)
return tc.api.round(x)
class ApiTest(ArrTest):
def _common_assign(self, case, api):
pos_cases = _reader.get_case(case + '_pos')
assert(len(pos_cases) > 0)
for inps, out in pos_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var_target')
src = tc.variable(data2, 'var_source')
ass1 = api(var, src)
self._array_close(np.array(out), ass1.get())
neg_cases = _reader.get_case(case + '_neg')
assert(len(neg_cases) > 0)
for inps, out in neg_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var2 = tc.variable(data, 'var_target2')
src2 = tc.variable(data2, 'var_source2')
ass2 = api(var2, tc.api.neg(src2))
self._array_close(np.array(out), ass2.get())
def _common_unary(self, case, api):
fwd_cases = _reader.get_case(case + '_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case(case + '_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data) = inps
data = np.array(data)
shape = data.shape
var = tc.variable(data, 'var')
res = api(var)
var2 = tc.variable(data, 'var2')
ex, zero = tuple(tc.derive(res, [var, var2]))
self._array_eq(np.zeros(shape, dtype=np.float32), zero.get())
self._array_close(np.array(out), ex.get())
def _common_unary_nograd(self, case, api):
cases = _reader.get_case(case)
assert(len(cases) > 0)
for inps, out in cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var)
self._array_close(np.array(out), res.get())
def _common_binary(self, case, api):
out_cases = _reader.get_case(case + '_out')
assert(len(out_cases) > 0)
for inps, out in out_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = api(var, var2)
self._array_close(np.array(out), res.get())
both_cases = _reader.get_case(case + '_both')
assert(len(both_cases) > 0)
for inps, out in both_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var, var)
self._array_close(np.array(out), res.get())
clhs_cases = _reader.get_case(case + '_clhs')
assert(len(clhs_cases) > 0)
for inps, out in clhs_cases:
(data, cst) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var, cst)
self._array_close(np.array(out), res.get())
crhs_cases = _reader.get_case(case + '_crhs')
assert(len(crhs_cases) > 0)
for inps, out in crhs_cases:
(cst, data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(cst, var)
self._array_close(np.array(out), res.get())
outda_cases = _reader.get_case(case + '_outda')
assert(len(outda_cases) > 0)
for inps, out in outda_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = api(var, var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
outdb_cases = _reader.get_case(case + '_outdb')
assert(len(outdb_cases) > 0)
for inps, out in outdb_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = api(var, var2)
zero, ex = tuple(tc.derive(res, [var3, var2]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
bothd_cases = _reader.get_case(case + '_bothd')
assert(len(bothd_cases) > 0)
for inps, out in bothd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var, var)
ex = tc.derive(res, [var])[0]
self._array_close(np.array(out), ex.get())
clhsd_cases = _reader.get_case(case + '_clhsd')
assert(len(clhsd_cases) > 0)
for inps, out in clhsd_cases:
(data, cst) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(var, cst)
ex = tc.derive(res, [var])[0]
self._array_close(np.array(out), ex.get())
crhsd_cases = _reader.get_case(case + '_crhsd')
assert(len(crhsd_cases) > 0)
for inps, out in crhsd_cases:
(cst, data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = api(cst, var)
ex = tc.derive(res, [var])[0]
self._array_close(np.array(out), ex.get())
def test_variable(self):
shapes = [[3, 4, 5]]
if 'elementary.shape' in _test_data:
shapes += _test_data['elementary.shape']
for shape in shapes:
data1 = np.ones(shape, dtype=np.float32)
data0 = np.zeros(shape, dtype=np.float32)
data = np.random.rand(*shape) * 234
var = tc.variable(data, 'var')
fout = var.get()
pad_removed = len(shape) - len(fout.shape)
padding = [1] * pad_removed
self.assertEqual(shape, padding + list(fout.shape))
self._array_close(data, fout)
var2 = tc.variable(data, 'var2')
one, zero = tuple(tc.derive(var, [var, var2]))
out1 = one.get()
out0 = zero.get()
self.assertEqual(shape, padding + list(out1.shape))
self.assertEqual(shape, padding + list(out0.shape))
self._array_eq(data1, out1)
self._array_eq(data0, out0)
def test_assign(self):
self._common_assign('assign', tc.api.assign)
def test_assign_add(self):
self._common_assign('assign_add', tc.api.assign_add)
def test_assign_sub(self):
self._common_assign('assign_sub', tc.api.assign_sub)
def test_assign_mul(self):
self._common_assign('assign_mul', tc.api.assign_mul)
def test_assign_div(self):
self._common_assign('assign_div', tc.api.assign_div)
def test_abs(self):
self._common_unary('abs', tc.api.abs)
def test_neg(self):
self._common_unary('neg', tc.api.neg)
def test_sin(self):
self._common_unary('sin', tc.api.sin)
def test_cos(self):
self._common_unary('cos', tc.api.cos)
def test_tan(self):
self._common_unary('tan', tc.api.tan)
def test_exp(self):
self._common_unary('exp', tc.api.exp)
def test_log(self):
self._common_unary('log', tc.api.log)
def test_sqrt(self):
self._common_unary('sqrt', tc.api.sqrt)
def test_round(self):
self._common_unary('round', tc.api.round)
def test_sigmoid(self):
self._common_unary('sigmoid', tc.api.sigmoid)
def test_tanh(self):
self._common_unary('tanh', tc.api.tanh)
def test_clip_by_range(self):
self._common_unary('clip_by_range',
lambda x: tc.api.clip_by_range(x, 0.3, 0.6))
def test_clip_by_l2norm(self):
self._common_unary('clip_by_l2norm',
lambda x: tc.api.clip_by_l2norm(x, 5))
def test_softmax(self):
self._common_unary('softmax0',
lambda arr: tc.api.softmax(arr, offset=0, ndims=1))
self._common_unary('softmax1',
lambda arr: tc.api.softmax(arr, offset=1, ndims=1))
def test_relu(self):
self._common_unary('relu', tc.api.relu)
def test_square(self):
self._common_unary('square', tc.api.square)
def test_cube(self):
self._common_unary('cube', tc.api.cube)
def test_pow(self):
self._common_binary('pow', tc.api.pow)
def test_add(self):
self._common_binary('add', tc.api.add)
self._common_binary('add', lambda a, b: a + b)
def test_sub(self):
self._common_binary('sub', tc.api.sub)
self._common_binary('sub', lambda a, b: a - b)
def test_mul(self):
self._common_binary('mul', tc.api.mul)
self._common_binary('mul', lambda a, b: a * b)
def test_div(self):
self._common_binary('div', tc.api.div)
self._common_binary('div', lambda a, b: a / b)
def test_min(self):
self._common_binary('min', tc.api.min)
def test_max(self):
self._common_binary('max', tc.api.max)
def test_eq(self):
self._common_binary('eq',
lambda x, y: tc.api.eq(_round_helper(x), _round_helper(y)))
self._common_binary('eq',
lambda x, y: _round_helper(x) == _round_helper(y))
def test_neq(self):
self._common_binary('neq',
lambda x, y: tc.api.neq(_round_helper(x), _round_helper(y)))
self._common_binary('neq',
lambda x, y: _round_helper(x) != _round_helper(y))
def test_lt(self):
self._common_binary('lt',
lambda x, y: tc.api.lt(_round_helper(x), _round_helper(y)))
self._common_binary('lt',
lambda x, y: _round_helper(x) < _round_helper(y))
def test_gt(self):
self._common_binary('gt',
lambda x, y: tc.api.gt(_round_helper(x), _round_helper(y)))
self._common_binary('gt',
lambda x, y: _round_helper(x) > _round_helper(y))
def test_nelems(self):
self._common_unary('nelems', tc.api.n_elems)
def test_ndims(self):
self._common_unary('ndims', lambda x: tc.api.n_dims(x, 0))
def test_extend(self):
self._common_unary('extend', lambda x: tc.api.extend(x, 1, [3]))
def test_rsum_1d(self):
self._common_unary('rsum_1d',
lambda x: tc.api.reduce_sum_1d(x, 1))
def test_rprod_1d(self):
self._common_unary('rprod_1d',
lambda x: tc.api.reduce_prod_1d(x, 1))
def test_rmin_1d(self):
self._common_unary('rmin_1d',
lambda x: tc.api.reduce_min_1d(x, 1))
def test_rmax_1d(self):
self._common_unary('rmax_1d',
lambda x: tc.api.reduce_max_1d(x, 1))
def test_rsum(self):
self._common_unary('rsum', tc.api.reduce_sum)
self._common_unary('rsum2',
lambda x: tc.api.reduce_sum(x, offset=1))
def test_rprod(self):
self._common_unary('rprod', tc.api.reduce_prod)
self._common_unary('rprod2',
lambda x: tc.api.reduce_prod(x, offset=1))
def test_rmin(self):
self._common_unary('rmin', tc.api.reduce_min)
self._common_unary('rmin2',
lambda x: tc.api.reduce_min(x, offset=1))
def test_rmax(self):
self._common_unary('rmax', tc.api.reduce_max)
self._common_unary('rmax2',
lambda x: tc.api.reduce_max(x, offset=1))
def test_argmax(self):
self._common_unary_nograd('argmax',
lambda x: tc.api.permute(tc.api.argmax(
x, return_dim=1), [0, 2, 1]))
def test_rl2norm(self):
self._common_unary('rl2norm', tc.api.reduce_l2norm)
def test_matmul(self):
fwd_cases = _reader.get_case('matmul_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.matmul(var, var2)
self._array_close(np.array(out), res.get())
bwda_cases = _reader.get_case('matmul_bwda')
assert(len(bwda_cases) > 0)
for inps, out in bwda_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(var, var2)
zero, grad = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
bwdb_cases = _reader.get_case('matmul_bwdb')
assert(len(bwdb_cases) > 0)
for inps, out in bwdb_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(var, var2)
zero, grad = tuple(tc.derive(res, [var3, var2]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
sfwd_cases = _reader.get_case('smatmul_fwd')
for inps, out in sfwd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = tc.api.matmul(var, var)
self._array_close(np.array(out), res.get())
sbwd_cases = _reader.get_case('smatmul_bwd')
for inps, out in sbwd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = tc.api.matmul(var, var)
grad = tuple(tc.derive(res, [var]))[0]
self._array_close(np.array(out), grad.get())
def test_convolution(self):
fwd_cases = _reader.get_case('convolution_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
vkernel = tc.variable(data2, 'vkernel')
res = tc.api.convolution(var, vkernel, list(range(8)))
self._array_close(np.array(out), res.get())
bwda_cases = _reader.get_case('convolution_dimage')
assert(len(bwda_cases) > 0)
for inps, out in bwda_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
vkernel = tc.variable(data2, 'vkernel')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.convolution(var, vkernel, list(range(8)))
zero, grad = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
bwdb_cases = _reader.get_case('convolution_dkernel')
assert(len(bwdb_cases) > 0)
for inps, out in bwdb_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
vkernel = tc.variable(data2, 'vkernel')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.convolution(var, vkernel, list(range(8)))
zero, grad = tuple(tc.derive(res, [var3, vkernel]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
def test_conv2d(self):
fwd_cases = _reader.get_case('conv2d_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
image = tc.variable(data, 'image')
kernel = tc.variable(data2, 'kernel')
res = tc.api.nn.conv2d(image, kernel)
self._array_close(np.array(out), res.get())
bwda_cases = _reader.get_case('conv2d_dimage')
assert(len(bwda_cases) > 0)
for inps, out in bwda_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
image = tc.variable(data, 'image')
kernel = tc.variable(data2, 'kernel')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.nn.conv2d(image, kernel)
zero, grad = tuple(tc.derive(res, [var3, image]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
bwdb_cases = _reader.get_case('conv2d_dkernel')
assert(len(bwdb_cases) > 0)
for inps, out in bwdb_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
image = tc.variable(data, 'image')
kernel = tc.variable(data2, 'kernel')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.nn.conv2d(image, kernel)
zero, grad = tuple(tc.derive(res, [var3, kernel]))
self._array_close(np.array(out), grad.get())
self._array_close(data0, zero.get())
def test_stride(self):
shape = [3, 8, 8, 2]
data = np.random.rand(*shape).astype(np.float32)
image = tc.variable(data, 'image')
strideout = tc.api.stride(image, [1, 2, 2])
self._array_eq([3, 4, 4, 2], strideout.shape())
ex = tc.derive(strideout, [image])[0]
self._array_eq(shape, ex.shape())
def test_avgpool(self):
self._common_unary('avgpool',
lambda x: tc.api.nn.mean_pool2d(x, [1, 2]))
def test_maxpool(self):
self._common_unary('maxpool',
lambda x: tc.api.nn.max_pool2d(x, [1, 2]))
def test_grader_scenario1(self): # REDUCE -> MUL
fwd_cases = _reader.get_case('scenario1_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.mul(tc.api.reduce_sum(var, offset=1, ndims=1), var2)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario1_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.mul(tc.api.reduce_sum(var, offset=1, ndims=1), var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario2(self): # EXTEND -> MUL
fwd_cases = _reader.get_case('scenario2_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.mul(tc.api.extend(var, 1, [3]), var2)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario2_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.mul(tc.api.extend(var, 1, [3]), var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario3(self): # PERMUTE -> MUL
fwd_cases = _reader.get_case('scenario3_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.mul(tc.api.permute(var, [1,0]), var2)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario3_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.mul(tc.api.permute(var, [1,0]), var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario4(self): # MATMUL -> MUL
fwd_cases = _reader.get_case('scenario4_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data3, 'var3')
res = tc.api.mul(tc.api.matmul(var, var2), var3)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario4_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data3, 'var3')
var4 = tc.variable(data, 'var4')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.mul(tc.api.matmul(var, var2), var3)
zero, ex = tuple(tc.derive(res, [var4, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario5(self): # MATMUL -> MATMUL
fwd_cases = _reader.get_case('scenario5_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data3, 'var3')
res = tc.api.matmul(tc.api.matmul(var, var2), var3)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario5_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data3, 'var3')
var4 = tc.variable(data, 'var4')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(tc.api.matmul(var, var2), var3)
zero, ex = tuple(tc.derive(res, [var4, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario6(self): # REDUCE -> MATMUL
fwd_cases = _reader.get_case('scenario6_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.matmul(tc.api.reduce_sum(var, offset=2, ndims=1), var2)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario6_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(tc.api.reduce_sum(var, offset=2, ndims=1), var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario7(self): # EXTEND -> MATMUL
fwd_cases = _reader.get_case('scenario7_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
res = tc.api.matmul(tc.api.extend(var, 1, [10]), var2)
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario7_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data, data2) = inps
data = np.array(data)
data2 = np.array(data2)
var = tc.variable(data, 'var')
var2 = tc.variable(data2, 'var2')
var3 = tc.variable(data, 'var3')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(tc.api.extend(var, 1, [10]), var2)
zero, ex = tuple(tc.derive(res, [var3, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
# A<a> -> EXTEND -> B<a,b>
# A<a> -> EXTEND -> C<a,c> -> PERMUTE -> <c,a>
# B MATMUL C -> D<c,b>
def test_grader_scenario8(self):
fwd_cases = _reader.get_case('scenario8_fwd')
assert(len(fwd_cases) > 0)
for inps, out in fwd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
res = tc.api.matmul(
tc.api.extend(var, 1, [10]),
tc.api.permute(tc.api.extend(var, 1, [3]), [1, 0]))
self._array_close(np.array(out), res.get())
bwd_cases = _reader.get_case('scenario8_bwd')
assert(len(bwd_cases) > 0)
for inps, out in bwd_cases:
(data) = inps
data = np.array(data)
var = tc.variable(data, 'var')
var2 = tc.variable(data, 'var2')
data0 = np.zeros(data.shape, dtype=np.float32)
res = tc.api.matmul(
tc.api.extend(var, 1, [10]),
tc.api.permute(tc.api.extend(var, 1, [3]), [1, 0]))
zero, ex = tuple(tc.derive(res, [var2, var]))
self._array_close(np.array(out), ex.get())
self._array_eq(data0, zero.get())
def test_grader_scenario9(self):
da_cases = _reader.get_case('scenario9_da')
assert(len(da_cases) > 0)
for inps, out in da_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
a = tc.variable(data, 'a')
b = tc.variable(data2, 'b')
c = tc.variable(data3, 'c')
d = tc.api.matmul(a, b)
e = tc.api.matmul(c, d)
f = tc.api.matmul(tc.api.transpose(d), tc.api.transpose(c))
res = tc.api.matmul(e, f)
ex = tc.derive(res, [a])[0]
self._array_close(np.array(out), ex.get())
db_cases = _reader.get_case('scenario9_db')
assert(len(db_cases) > 0)
for inps, out in db_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
a = tc.variable(data, 'a')
b = tc.variable(data2, 'b')
c = tc.variable(data3, 'c')
g = tc.variable(data, 'g')
d = tc.api.matmul(a, b)
e = tc.api.matmul(c, d)
f = tc.api.matmul(tc.api.transpose(d), tc.api.transpose(c))
res = tc.api.matmul(e, f)
ex = tc.derive(res, [b])[0]
self._array_close(np.array(out), ex.get())
dc_cases = _reader.get_case('scenario9_dc')
assert(len(dc_cases) > 0)
for inps, out in dc_cases:
(data, data2, data3) = inps
data = np.array(data)
data2 = np.array(data2)
data3 = np.array(data3)
a = tc.variable(data, 'a')
b = tc.variable(data2, 'b')
c = tc.variable(data3, 'c')
g = tc.variable(data, 'g')
d = tc.api.matmul(a, b)
e = tc.api.matmul(c, d)
f = tc.api.matmul(tc.api.transpose(d), tc.api.transpose(c))
res = tc.api.matmul(e, f)
ex = tc.derive(res, [c])[0]
self._array_close(np.array(out), ex.get())
if __name__ == "__main__":
with open('testutil/ead_template.json') as json_data:
test_template = json.load(json_data)
assert 'test_cases' in test_template
assert 'config_pools' in test_template
# log to file
logging.basicConfig(filename='/tmp/ead_ptest.log',level=logging.DEBUG)
logging.info("running ptest for tc")
_test_data = generate_testcases(
test_template['test_cases'],
test_template['config_pools'])
unittest.main()
| [
"testutil.generate_testcases.generate_testcases",
"tenncor.api.clip_by_l2norm",
"tenncor.api.nn.max_pool2d",
"numpy.ones",
"tenncor.api.nn.mean_pool2d",
"tenncor.api.nn.conv2d",
"glob.glob",
"tenncor.api.softmax",
"unittest.main",
"tenncor.variable",
"tenncor.api.reduce_min_1d",
"tenncor.api.r... | [((354, 399), 'glob.glob', 'glob', (['"""models/test/testcases/tf_cases_*.json"""'], {}), "('models/test/testcases/tf_cases_*.json')\n", (358, 399), False, 'from glob import glob\n'), ((428, 450), 'testutil.compare_testcase.joint_json', 'joint_json', (['_testcases'], {}), '(_testcases)\n', (438, 450), False, 'from testutil.compare_testcase import joint_json\n'), ((538, 553), 'tenncor.api.round', 'tc.api.round', (['x'], {}), '(x)\n', (550, 553), True, 'import tenncor as tc\n'), ((31292, 31363), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""/tmp/ead_ptest.log"""', 'level': 'logging.DEBUG'}), "(filename='/tmp/ead_ptest.log', level=logging.DEBUG)\n", (31311, 31363), False, 'import logging\n'), ((31367, 31403), 'logging.info', 'logging.info', (['"""running ptest for tc"""'], {}), "('running ptest for tc')\n", (31379, 31403), False, 'import logging\n'), ((31422, 31500), 'testutil.generate_testcases.generate_testcases', 'generate_testcases', (["test_template['test_cases']", "test_template['config_pools']"], {}), "(test_template['test_cases'], test_template['config_pools'])\n", (31440, 31500), False, 'from testutil.generate_testcases import generate_testcases\n'), ((31523, 31538), 'unittest.main', 'unittest.main', ([], {}), '()\n', (31536, 31538), False, 'import unittest\n'), ((18840, 18866), 'tenncor.variable', 'tc.variable', (['data', '"""image"""'], {}), "(data, 'image')\n", (18851, 18866), True, 'import tenncor as tc\n'), ((18888, 18919), 'tenncor.api.stride', 'tc.api.stride', (['image', '[1, 2, 2]'], {}), '(image, [1, 2, 2])\n', (18901, 18919), True, 'import tenncor as tc\n'), ((31156, 31176), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (31165, 31176), False, 'import json\n'), ((795, 809), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (803, 809), True, 'import numpy as np\n'), ((830, 845), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (838, 845), True, 'import numpy as np\n'), ((865, 896), 'tenncor.variable', 'tc.variable', (['data', '"""var_target"""'], {}), "(data, 'var_target')\n", (876, 896), True, 'import tenncor as tc\n'), ((915, 947), 'tenncor.variable', 'tc.variable', (['data2', '"""var_source"""'], {}), "(data2, 'var_source')\n", (926, 947), True, 'import tenncor as tc\n'), ((1215, 1229), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1223, 1229), True, 'import numpy as np\n'), ((1250, 1265), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (1258, 1265), True, 'import numpy as np\n'), ((1286, 1318), 'tenncor.variable', 'tc.variable', (['data', '"""var_target2"""'], {}), "(data, 'var_target2')\n", (1297, 1318), True, 'import tenncor as tc\n'), ((1338, 1371), 'tenncor.variable', 'tc.variable', (['data2', '"""var_source2"""'], {}), "(data2, 'var_source2')\n", (1349, 1371), True, 'import tenncor as tc\n'), ((1686, 1700), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1694, 1700), True, 'import numpy as np\n'), ((1719, 1743), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (1730, 1743), True, 'import tenncor as tc\n'), ((1997, 2011), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2005, 2011), True, 'import numpy as np\n'), ((2061, 2085), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (2072, 2085), True, 'import tenncor as tc\n'), ((2133, 2158), 'tenncor.variable', 'tc.variable', (['data', '"""var2"""'], {}), "(data, 'var2')\n", (2144, 2158), True, 'import tenncor as tc\n'), ((2542, 2556), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2550, 2556), True, 'import numpy as np\n'), ((2575, 2599), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (2586, 2599), True, 'import tenncor as tc\n'), ((2901, 2915), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2909, 2915), True, 'import numpy as np\n'), ((2936, 2951), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (2944, 2951), True, 'import numpy as np\n'), ((2970, 2994), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (2981, 2994), True, 'import tenncor as tc\n'), ((3014, 3040), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (3025, 3040), True, 'import tenncor as tc\n'), ((3305, 3319), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3313, 3319), True, 'import numpy as np\n'), ((3338, 3362), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (3349, 3362), True, 'import tenncor as tc\n'), ((3631, 3645), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3639, 3645), True, 'import numpy as np\n'), ((3664, 3688), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (3675, 3688), True, 'import tenncor as tc\n'), ((3957, 3971), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3965, 3971), True, 'import numpy as np\n'), ((3990, 4014), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (4001, 4014), True, 'import tenncor as tc\n'), ((4290, 4304), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4298, 4304), True, 'import numpy as np\n'), ((4325, 4340), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (4333, 4340), True, 'import numpy as np\n'), ((4359, 4383), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (4370, 4383), True, 'import tenncor as tc\n'), ((4403, 4429), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (4414, 4429), True, 'import tenncor as tc\n'), ((4449, 4474), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (4460, 4474), True, 'import tenncor as tc\n'), ((4496, 4534), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (4504, 4534), True, 'import numpy as np\n'), ((4912, 4926), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4920, 4926), True, 'import numpy as np\n'), ((4947, 4962), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (4955, 4962), True, 'import numpy as np\n'), ((4981, 5005), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (4992, 5005), True, 'import tenncor as tc\n'), ((5025, 5051), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (5036, 5051), True, 'import tenncor as tc\n'), ((5071, 5096), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (5082, 5096), True, 'import tenncor as tc\n'), ((5118, 5156), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (5126, 5156), True, 'import numpy as np\n'), ((5528, 5542), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5536, 5542), True, 'import numpy as np\n'), ((5561, 5585), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (5572, 5585), True, 'import tenncor as tc\n'), ((5899, 5913), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5907, 5913), True, 'import numpy as np\n'), ((5932, 5956), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (5943, 5956), True, 'import tenncor as tc\n'), ((6270, 6284), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6278, 6284), True, 'import numpy as np\n'), ((6303, 6327), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (6314, 6327), True, 'import tenncor as tc\n'), ((6665, 6697), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (6672, 6697), True, 'import numpy as np\n'), ((6718, 6751), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (6726, 6751), True, 'import numpy as np\n'), ((6818, 6842), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (6829, 6842), True, 'import tenncor as tc\n'), ((7095, 7120), 'tenncor.variable', 'tc.variable', (['data', '"""var2"""'], {}), "(data, 'var2')\n", (7106, 7120), True, 'import tenncor as tc\n'), ((12964, 12978), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (12972, 12978), True, 'import numpy as np\n'), ((12999, 13014), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (13007, 13014), True, 'import numpy as np\n'), ((13033, 13057), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (13044, 13057), True, 'import tenncor as tc\n'), ((13077, 13103), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (13088, 13103), True, 'import tenncor as tc\n'), ((13123, 13147), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (13136, 13147), True, 'import tenncor as tc\n'), ((13384, 13398), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13392, 13398), True, 'import numpy as np\n'), ((13419, 13434), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (13427, 13434), True, 'import numpy as np\n'), ((13453, 13477), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (13464, 13477), True, 'import tenncor as tc\n'), ((13497, 13523), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (13508, 13523), True, 'import tenncor as tc\n'), ((13543, 13568), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (13554, 13568), True, 'import tenncor as tc\n'), ((13590, 13628), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (13598, 13628), True, 'import numpy as np\n'), ((13647, 13671), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (13660, 13671), True, 'import tenncor as tc\n'), ((14018, 14032), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (14026, 14032), True, 'import numpy as np\n'), ((14053, 14068), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (14061, 14068), True, 'import numpy as np\n'), ((14087, 14111), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (14098, 14111), True, 'import tenncor as tc\n'), ((14131, 14157), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (14142, 14157), True, 'import tenncor as tc\n'), ((14177, 14202), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (14188, 14202), True, 'import tenncor as tc\n'), ((14224, 14262), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (14232, 14262), True, 'import numpy as np\n'), ((14281, 14305), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (14294, 14305), True, 'import tenncor as tc\n'), ((14610, 14624), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (14618, 14624), True, 'import numpy as np\n'), ((14643, 14667), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (14654, 14667), True, 'import tenncor as tc\n'), ((14687, 14710), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var'], {}), '(var, var)\n', (14700, 14710), True, 'import tenncor as tc\n'), ((14904, 14918), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (14912, 14918), True, 'import numpy as np\n'), ((14937, 14961), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (14948, 14961), True, 'import tenncor as tc\n'), ((14981, 15004), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var'], {}), '(var, var)\n', (14994, 15004), True, 'import tenncor as tc\n'), ((15326, 15340), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (15334, 15340), True, 'import numpy as np\n'), ((15361, 15376), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (15369, 15376), True, 'import numpy as np\n'), ((15395, 15419), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (15406, 15419), True, 'import tenncor as tc\n'), ((15442, 15471), 'tenncor.variable', 'tc.variable', (['data2', '"""vkernel"""'], {}), "(data2, 'vkernel')\n", (15453, 15471), True, 'import tenncor as tc\n'), ((15783, 15797), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (15791, 15797), True, 'import numpy as np\n'), ((15818, 15833), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (15826, 15833), True, 'import numpy as np\n'), ((15852, 15876), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (15863, 15876), True, 'import tenncor as tc\n'), ((15899, 15928), 'tenncor.variable', 'tc.variable', (['data2', '"""vkernel"""'], {}), "(data2, 'vkernel')\n", (15910, 15928), True, 'import tenncor as tc\n'), ((15948, 15973), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (15959, 15973), True, 'import tenncor as tc\n'), ((15995, 16033), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (16003, 16033), True, 'import numpy as np\n'), ((16455, 16469), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (16463, 16469), True, 'import numpy as np\n'), ((16490, 16505), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (16498, 16505), True, 'import numpy as np\n'), ((16524, 16548), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (16535, 16548), True, 'import tenncor as tc\n'), ((16571, 16600), 'tenncor.variable', 'tc.variable', (['data2', '"""vkernel"""'], {}), "(data2, 'vkernel')\n", (16582, 16600), True, 'import tenncor as tc\n'), ((16620, 16645), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (16631, 16645), True, 'import tenncor as tc\n'), ((16667, 16705), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (16675, 16705), True, 'import numpy as np\n'), ((17146, 17160), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17154, 17160), True, 'import numpy as np\n'), ((17181, 17196), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (17189, 17196), True, 'import numpy as np\n'), ((17217, 17243), 'tenncor.variable', 'tc.variable', (['data', '"""image"""'], {}), "(data, 'image')\n", (17228, 17243), True, 'import tenncor as tc\n'), ((17265, 17293), 'tenncor.variable', 'tc.variable', (['data2', '"""kernel"""'], {}), "(data2, 'kernel')\n", (17276, 17293), True, 'import tenncor as tc\n'), ((17313, 17344), 'tenncor.api.nn.conv2d', 'tc.api.nn.conv2d', (['image', 'kernel'], {}), '(image, kernel)\n', (17329, 17344), True, 'import tenncor as tc\n'), ((17583, 17597), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17591, 17597), True, 'import numpy as np\n'), ((17618, 17633), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (17626, 17633), True, 'import numpy as np\n'), ((17654, 17680), 'tenncor.variable', 'tc.variable', (['data', '"""image"""'], {}), "(data, 'image')\n", (17665, 17680), True, 'import tenncor as tc\n'), ((17702, 17730), 'tenncor.variable', 'tc.variable', (['data2', '"""kernel"""'], {}), "(data2, 'kernel')\n", (17713, 17730), True, 'import tenncor as tc\n'), ((17750, 17775), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (17761, 17775), True, 'import tenncor as tc\n'), ((17797, 17835), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (17805, 17835), True, 'import numpy as np\n'), ((17854, 17885), 'tenncor.api.nn.conv2d', 'tc.api.nn.conv2d', (['image', 'kernel'], {}), '(image, kernel)\n', (17870, 17885), True, 'import tenncor as tc\n'), ((18237, 18251), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (18245, 18251), True, 'import numpy as np\n'), ((18272, 18287), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (18280, 18287), True, 'import numpy as np\n'), ((18308, 18334), 'tenncor.variable', 'tc.variable', (['data', '"""image"""'], {}), "(data, 'image')\n", (18319, 18334), True, 'import tenncor as tc\n'), ((18356, 18384), 'tenncor.variable', 'tc.variable', (['data2', '"""kernel"""'], {}), "(data2, 'kernel')\n", (18367, 18384), True, 'import tenncor as tc\n'), ((18404, 18429), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (18415, 18429), True, 'import tenncor as tc\n'), ((18451, 18489), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (18459, 18489), True, 'import numpy as np\n'), ((18508, 18539), 'tenncor.api.nn.conv2d', 'tc.api.nn.conv2d', (['image', 'kernel'], {}), '(image, kernel)\n', (18524, 18539), True, 'import tenncor as tc\n'), ((18990, 19019), 'tenncor.derive', 'tc.derive', (['strideout', '[image]'], {}), '(strideout, [image])\n', (18999, 19019), True, 'import tenncor as tc\n'), ((19541, 19555), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (19549, 19555), True, 'import numpy as np\n'), ((19576, 19591), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (19584, 19591), True, 'import numpy as np\n'), ((19610, 19634), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (19621, 19634), True, 'import tenncor as tc\n'), ((19654, 19680), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (19665, 19680), True, 'import tenncor as tc\n'), ((19995, 20009), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (20003, 20009), True, 'import numpy as np\n'), ((20030, 20045), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (20038, 20045), True, 'import numpy as np\n'), ((20064, 20088), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (20075, 20088), True, 'import tenncor as tc\n'), ((20108, 20134), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (20119, 20134), True, 'import tenncor as tc\n'), ((20154, 20179), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (20165, 20179), True, 'import tenncor as tc\n'), ((20201, 20239), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (20209, 20239), True, 'import numpy as np\n'), ((20709, 20723), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (20717, 20723), True, 'import numpy as np\n'), ((20744, 20759), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (20752, 20759), True, 'import numpy as np\n'), ((20778, 20802), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (20789, 20802), True, 'import tenncor as tc\n'), ((20822, 20848), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (20833, 20848), True, 'import tenncor as tc\n'), ((21148, 21162), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (21156, 21162), True, 'import numpy as np\n'), ((21183, 21198), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (21191, 21198), True, 'import numpy as np\n'), ((21217, 21241), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (21228, 21241), True, 'import tenncor as tc\n'), ((21261, 21287), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (21272, 21287), True, 'import tenncor as tc\n'), ((21307, 21332), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (21318, 21332), True, 'import tenncor as tc\n'), ((21354, 21392), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (21362, 21392), True, 'import numpy as np\n'), ((21848, 21862), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (21856, 21862), True, 'import numpy as np\n'), ((21883, 21898), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (21891, 21898), True, 'import numpy as np\n'), ((21917, 21941), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (21928, 21941), True, 'import tenncor as tc\n'), ((21961, 21987), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (21972, 21987), True, 'import tenncor as tc\n'), ((22287, 22301), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22295, 22301), True, 'import numpy as np\n'), ((22322, 22337), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (22330, 22337), True, 'import numpy as np\n'), ((22356, 22380), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (22367, 22380), True, 'import tenncor as tc\n'), ((22400, 22426), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (22411, 22426), True, 'import tenncor as tc\n'), ((22446, 22471), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (22457, 22471), True, 'import tenncor as tc\n'), ((22493, 22531), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (22501, 22531), True, 'import numpy as np\n'), ((22993, 23007), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (23001, 23007), True, 'import numpy as np\n'), ((23028, 23043), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (23036, 23043), True, 'import numpy as np\n'), ((23064, 23079), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (23072, 23079), True, 'import numpy as np\n'), ((23098, 23122), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (23109, 23122), True, 'import tenncor as tc\n'), ((23142, 23168), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (23153, 23168), True, 'import tenncor as tc\n'), ((23188, 23214), 'tenncor.variable', 'tc.variable', (['data3', '"""var3"""'], {}), "(data3, 'var3')\n", (23199, 23214), True, 'import tenncor as tc\n'), ((23519, 23533), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (23527, 23533), True, 'import numpy as np\n'), ((23554, 23569), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (23562, 23569), True, 'import numpy as np\n'), ((23590, 23605), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (23598, 23605), True, 'import numpy as np\n'), ((23624, 23648), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (23635, 23648), True, 'import tenncor as tc\n'), ((23668, 23694), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (23679, 23694), True, 'import tenncor as tc\n'), ((23714, 23740), 'tenncor.variable', 'tc.variable', (['data3', '"""var3"""'], {}), "(data3, 'var3')\n", (23725, 23740), True, 'import tenncor as tc\n'), ((23760, 23785), 'tenncor.variable', 'tc.variable', (['data', '"""var4"""'], {}), "(data, 'var4')\n", (23771, 23785), True, 'import tenncor as tc\n'), ((23807, 23845), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (23815, 23845), True, 'import numpy as np\n'), ((24308, 24322), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (24316, 24322), True, 'import numpy as np\n'), ((24343, 24358), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (24351, 24358), True, 'import numpy as np\n'), ((24379, 24394), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (24387, 24394), True, 'import numpy as np\n'), ((24413, 24437), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (24424, 24437), True, 'import tenncor as tc\n'), ((24457, 24483), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (24468, 24483), True, 'import tenncor as tc\n'), ((24503, 24529), 'tenncor.variable', 'tc.variable', (['data3', '"""var3"""'], {}), "(data3, 'var3')\n", (24514, 24529), True, 'import tenncor as tc\n'), ((24837, 24851), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (24845, 24851), True, 'import numpy as np\n'), ((24872, 24887), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (24880, 24887), True, 'import numpy as np\n'), ((24908, 24923), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (24916, 24923), True, 'import numpy as np\n'), ((24942, 24966), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (24953, 24966), True, 'import tenncor as tc\n'), ((24986, 25012), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (24997, 25012), True, 'import tenncor as tc\n'), ((25032, 25058), 'tenncor.variable', 'tc.variable', (['data3', '"""var3"""'], {}), "(data3, 'var3')\n", (25043, 25058), True, 'import tenncor as tc\n'), ((25078, 25103), 'tenncor.variable', 'tc.variable', (['data', '"""var4"""'], {}), "(data, 'var4')\n", (25089, 25103), True, 'import tenncor as tc\n'), ((25125, 25163), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (25133, 25163), True, 'import numpy as np\n'), ((25622, 25636), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (25630, 25636), True, 'import numpy as np\n'), ((25657, 25672), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (25665, 25672), True, 'import numpy as np\n'), ((25691, 25715), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (25702, 25715), True, 'import tenncor as tc\n'), ((25735, 25761), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (25746, 25761), True, 'import tenncor as tc\n'), ((26079, 26093), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (26087, 26093), True, 'import numpy as np\n'), ((26114, 26129), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (26122, 26129), True, 'import numpy as np\n'), ((26148, 26172), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (26159, 26172), True, 'import tenncor as tc\n'), ((26192, 26218), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (26203, 26218), True, 'import tenncor as tc\n'), ((26238, 26263), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (26249, 26263), True, 'import tenncor as tc\n'), ((26285, 26323), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (26293, 26323), True, 'import numpy as np\n'), ((26799, 26813), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (26807, 26813), True, 'import numpy as np\n'), ((26834, 26849), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (26842, 26849), True, 'import numpy as np\n'), ((26868, 26892), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (26879, 26892), True, 'import tenncor as tc\n'), ((26912, 26938), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (26923, 26938), True, 'import tenncor as tc\n'), ((27242, 27256), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (27250, 27256), True, 'import numpy as np\n'), ((27277, 27292), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (27285, 27292), True, 'import numpy as np\n'), ((27311, 27335), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (27322, 27335), True, 'import tenncor as tc\n'), ((27355, 27381), 'tenncor.variable', 'tc.variable', (['data2', '"""var2"""'], {}), "(data2, 'var2')\n", (27366, 27381), True, 'import tenncor as tc\n'), ((27401, 27426), 'tenncor.variable', 'tc.variable', (['data', '"""var3"""'], {}), "(data, 'var3')\n", (27412, 27426), True, 'import tenncor as tc\n'), ((27448, 27486), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (27456, 27486), True, 'import numpy as np\n'), ((28031, 28045), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (28039, 28045), True, 'import numpy as np\n'), ((28064, 28088), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (28075, 28088), True, 'import tenncor as tc\n'), ((28464, 28478), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (28472, 28478), True, 'import numpy as np\n'), ((28497, 28521), 'tenncor.variable', 'tc.variable', (['data', '"""var"""'], {}), "(data, 'var')\n", (28508, 28521), True, 'import tenncor as tc\n'), ((28541, 28566), 'tenncor.variable', 'tc.variable', (['data', '"""var2"""'], {}), "(data, 'var2')\n", (28552, 28566), True, 'import tenncor as tc\n'), ((28588, 28626), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (28596, 28626), True, 'import numpy as np\n'), ((29151, 29165), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (29159, 29165), True, 'import numpy as np\n'), ((29186, 29201), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (29194, 29201), True, 'import numpy as np\n'), ((29222, 29237), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (29230, 29237), True, 'import numpy as np\n'), ((29254, 29276), 'tenncor.variable', 'tc.variable', (['data', '"""a"""'], {}), "(data, 'a')\n", (29265, 29276), True, 'import tenncor as tc\n'), ((29293, 29316), 'tenncor.variable', 'tc.variable', (['data2', '"""b"""'], {}), "(data2, 'b')\n", (29304, 29316), True, 'import tenncor as tc\n'), ((29333, 29356), 'tenncor.variable', 'tc.variable', (['data3', '"""c"""'], {}), "(data3, 'c')\n", (29344, 29356), True, 'import tenncor as tc\n'), ((29374, 29393), 'tenncor.api.matmul', 'tc.api.matmul', (['a', 'b'], {}), '(a, b)\n', (29387, 29393), True, 'import tenncor as tc\n'), ((29410, 29429), 'tenncor.api.matmul', 'tc.api.matmul', (['c', 'd'], {}), '(c, d)\n', (29423, 29429), True, 'import tenncor as tc\n'), ((29520, 29539), 'tenncor.api.matmul', 'tc.api.matmul', (['e', 'f'], {}), '(e, f)\n', (29533, 29539), True, 'import tenncor as tc\n'), ((29817, 29831), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (29825, 29831), True, 'import numpy as np\n'), ((29852, 29867), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (29860, 29867), True, 'import numpy as np\n'), ((29888, 29903), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (29896, 29903), True, 'import numpy as np\n'), ((29920, 29942), 'tenncor.variable', 'tc.variable', (['data', '"""a"""'], {}), "(data, 'a')\n", (29931, 29942), True, 'import tenncor as tc\n'), ((29959, 29982), 'tenncor.variable', 'tc.variable', (['data2', '"""b"""'], {}), "(data2, 'b')\n", (29970, 29982), True, 'import tenncor as tc\n'), ((29999, 30022), 'tenncor.variable', 'tc.variable', (['data3', '"""c"""'], {}), "(data3, 'c')\n", (30010, 30022), True, 'import tenncor as tc\n'), ((30039, 30061), 'tenncor.variable', 'tc.variable', (['data', '"""g"""'], {}), "(data, 'g')\n", (30050, 30061), True, 'import tenncor as tc\n'), ((30079, 30098), 'tenncor.api.matmul', 'tc.api.matmul', (['a', 'b'], {}), '(a, b)\n', (30092, 30098), True, 'import tenncor as tc\n'), ((30115, 30134), 'tenncor.api.matmul', 'tc.api.matmul', (['c', 'd'], {}), '(c, d)\n', (30128, 30134), True, 'import tenncor as tc\n'), ((30225, 30244), 'tenncor.api.matmul', 'tc.api.matmul', (['e', 'f'], {}), '(e, f)\n', (30238, 30244), True, 'import tenncor as tc\n'), ((30522, 30536), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (30530, 30536), True, 'import numpy as np\n'), ((30557, 30572), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (30565, 30572), True, 'import numpy as np\n'), ((30593, 30608), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (30601, 30608), True, 'import numpy as np\n'), ((30625, 30647), 'tenncor.variable', 'tc.variable', (['data', '"""a"""'], {}), "(data, 'a')\n", (30636, 30647), True, 'import tenncor as tc\n'), ((30664, 30687), 'tenncor.variable', 'tc.variable', (['data2', '"""b"""'], {}), "(data2, 'b')\n", (30675, 30687), True, 'import tenncor as tc\n'), ((30704, 30727), 'tenncor.variable', 'tc.variable', (['data3', '"""c"""'], {}), "(data3, 'c')\n", (30715, 30727), True, 'import tenncor as tc\n'), ((30744, 30766), 'tenncor.variable', 'tc.variable', (['data', '"""g"""'], {}), "(data, 'g')\n", (30755, 30766), True, 'import tenncor as tc\n'), ((30784, 30803), 'tenncor.api.matmul', 'tc.api.matmul', (['a', 'b'], {}), '(a, b)\n', (30797, 30803), True, 'import tenncor as tc\n'), ((30820, 30839), 'tenncor.api.matmul', 'tc.api.matmul', (['c', 'd'], {}), '(c, d)\n', (30833, 30839), True, 'import tenncor as tc\n'), ((30930, 30949), 'tenncor.api.matmul', 'tc.api.matmul', (['e', 'f'], {}), '(e, f)\n', (30943, 30949), True, 'import tenncor as tc\n'), ((1012, 1025), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (1020, 1025), True, 'import numpy as np\n'), ((1401, 1417), 'tenncor.api.neg', 'tc.api.neg', (['src2'], {}), '(src2)\n', (1411, 1417), True, 'import tenncor as tc\n'), ((1450, 1463), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (1458, 1463), True, 'import numpy as np\n'), ((1802, 1815), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (1810, 1815), True, 'import numpy as np\n'), ((2188, 2215), 'tenncor.derive', 'tc.derive', (['res', '[var, var2]'], {}), '(res, [var, var2])\n', (2197, 2215), True, 'import tenncor as tc\n'), ((2245, 2278), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2253, 2278), True, 'import numpy as np\n'), ((2322, 2335), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2330, 2335), True, 'import numpy as np\n'), ((2658, 2671), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2666, 2671), True, 'import numpy as np\n'), ((3106, 3119), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3114, 3119), True, 'import numpy as np\n'), ((3427, 3440), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3435, 3440), True, 'import numpy as np\n'), ((3753, 3766), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (3761, 3766), True, 'import numpy as np\n'), ((4079, 4092), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4087, 4092), True, 'import numpy as np\n'), ((4597, 4624), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (4606, 4624), True, 'import tenncor as tc\n'), ((4657, 4670), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4665, 4670), True, 'import numpy as np\n'), ((5219, 5247), 'tenncor.derive', 'tc.derive', (['res', '[var3, var2]'], {}), '(res, [var3, var2])\n', (5228, 5247), True, 'import tenncor as tc\n'), ((5280, 5293), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (5288, 5293), True, 'import numpy as np\n'), ((5636, 5657), 'tenncor.derive', 'tc.derive', (['res', '[var]'], {}), '(res, [var])\n', (5645, 5657), True, 'import tenncor as tc\n'), ((5692, 5705), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (5700, 5705), True, 'import numpy as np\n'), ((6007, 6028), 'tenncor.derive', 'tc.derive', (['res', '[var]'], {}), '(res, [var])\n', (6016, 6028), True, 'import tenncor as tc\n'), ((6063, 6076), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (6071, 6076), True, 'import numpy as np\n'), ((6378, 6399), 'tenncor.derive', 'tc.derive', (['res', '[var]'], {}), '(res, [var])\n', (6387, 6399), True, 'import tenncor as tc\n'), ((6434, 6447), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (6442, 6447), True, 'import numpy as np\n'), ((6771, 6793), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (6785, 6793), True, 'import numpy as np\n'), ((7151, 7178), 'tenncor.derive', 'tc.derive', (['var', '[var, var2]'], {}), '(var, [var, var2])\n', (7160, 7178), True, 'import tenncor as tc\n'), ((8807, 8840), 'tenncor.api.clip_by_range', 'tc.api.clip_by_range', (['x', '(0.3)', '(0.6)'], {}), '(x, 0.3, 0.6)\n', (8827, 8840), True, 'import tenncor as tc\n'), ((8945, 8972), 'tenncor.api.clip_by_l2norm', 'tc.api.clip_by_l2norm', (['x', '(5)'], {}), '(x, 5)\n', (8966, 8972), True, 'import tenncor as tc\n'), ((9066, 9104), 'tenncor.api.softmax', 'tc.api.softmax', (['arr'], {'offset': '(0)', 'ndims': '(1)'}), '(arr, offset=0, ndims=1)\n', (9080, 9104), True, 'import tenncor as tc\n'), ((9169, 9207), 'tenncor.api.softmax', 'tc.api.softmax', (['arr'], {'offset': '(1)', 'ndims': '(1)'}), '(arr, offset=1, ndims=1)\n', (9183, 9207), True, 'import tenncor as tc\n'), ((11225, 11244), 'tenncor.api.n_dims', 'tc.api.n_dims', (['x', '(0)'], {}), '(x, 0)\n', (11238, 11244), True, 'import tenncor as tc\n'), ((11321, 11345), 'tenncor.api.extend', 'tc.api.extend', (['x', '(1)', '[3]'], {}), '(x, 1, [3])\n', (11334, 11345), True, 'import tenncor as tc\n'), ((11436, 11462), 'tenncor.api.reduce_sum_1d', 'tc.api.reduce_sum_1d', (['x', '(1)'], {}), '(x, 1)\n', (11456, 11462), True, 'import tenncor as tc\n'), ((11555, 11582), 'tenncor.api.reduce_prod_1d', 'tc.api.reduce_prod_1d', (['x', '(1)'], {}), '(x, 1)\n', (11576, 11582), True, 'import tenncor as tc\n'), ((11673, 11699), 'tenncor.api.reduce_min_1d', 'tc.api.reduce_min_1d', (['x', '(1)'], {}), '(x, 1)\n', (11693, 11699), True, 'import tenncor as tc\n'), ((11790, 11816), 'tenncor.api.reduce_max_1d', 'tc.api.reduce_max_1d', (['x', '(1)'], {}), '(x, 1)\n', (11810, 11816), True, 'import tenncor as tc\n'), ((11956, 11986), 'tenncor.api.reduce_sum', 'tc.api.reduce_sum', (['x'], {'offset': '(1)'}), '(x, offset=1)\n', (11973, 11986), True, 'import tenncor as tc\n'), ((12130, 12161), 'tenncor.api.reduce_prod', 'tc.api.reduce_prod', (['x'], {'offset': '(1)'}), '(x, offset=1)\n', (12148, 12161), True, 'import tenncor as tc\n'), ((12301, 12331), 'tenncor.api.reduce_min', 'tc.api.reduce_min', (['x'], {'offset': '(1)'}), '(x, offset=1)\n', (12318, 12331), True, 'import tenncor as tc\n'), ((12471, 12501), 'tenncor.api.reduce_max', 'tc.api.reduce_max', (['x'], {'offset': '(1)'}), '(x, offset=1)\n', (12488, 12501), True, 'import tenncor as tc\n'), ((13179, 13192), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (13187, 13192), True, 'import numpy as np\n'), ((13703, 13730), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (13712, 13730), True, 'import tenncor as tc\n'), ((13763, 13776), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (13771, 13776), True, 'import numpy as np\n'), ((14337, 14365), 'tenncor.derive', 'tc.derive', (['res', '[var3, var2]'], {}), '(res, [var3, var2])\n', (14346, 14365), True, 'import tenncor as tc\n'), ((14398, 14411), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (14406, 14411), True, 'import numpy as np\n'), ((14742, 14755), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (14750, 14755), True, 'import numpy as np\n'), ((15087, 15100), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (15095, 15100), True, 'import numpy as np\n'), ((15571, 15584), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (15579, 15584), True, 'import numpy as np\n'), ((16132, 16159), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (16141, 16159), True, 'import tenncor as tc\n'), ((16192, 16205), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (16200, 16205), True, 'import numpy as np\n'), ((16804, 16835), 'tenncor.derive', 'tc.derive', (['res', '[var3, vkernel]'], {}), '(res, [var3, vkernel])\n', (16813, 16835), True, 'import tenncor as tc\n'), ((16868, 16881), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (16876, 16881), True, 'import numpy as np\n'), ((17376, 17389), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (17384, 17389), True, 'import numpy as np\n'), ((17917, 17946), 'tenncor.derive', 'tc.derive', (['res', '[var3, image]'], {}), '(res, [var3, image])\n', (17926, 17946), True, 'import tenncor as tc\n'), ((17979, 17992), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (17987, 17992), True, 'import numpy as np\n'), ((18571, 18601), 'tenncor.derive', 'tc.derive', (['res', '[var3, kernel]'], {}), '(res, [var3, kernel])\n', (18580, 18601), True, 'import tenncor as tc\n'), ((18634, 18647), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (18642, 18647), True, 'import numpy as np\n'), ((18782, 18804), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (18796, 18804), True, 'import numpy as np\n'), ((19154, 19186), 'tenncor.api.nn.mean_pool2d', 'tc.api.nn.mean_pool2d', (['x', '[1, 2]'], {}), '(x, [1, 2])\n', (19175, 19186), True, 'import tenncor as tc\n'), ((19277, 19308), 'tenncor.api.nn.max_pool2d', 'tc.api.nn.max_pool2d', (['x', '[1, 2]'], {}), '(x, [1, 2])\n', (19297, 19308), True, 'import tenncor as tc\n'), ((19711, 19752), 'tenncor.api.reduce_sum', 'tc.api.reduce_sum', (['var'], {'offset': '(1)', 'ndims': '(1)'}), '(var, offset=1, ndims=1)\n', (19728, 19752), True, 'import tenncor as tc\n'), ((19791, 19804), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (19799, 19804), True, 'import numpy as np\n'), ((20269, 20310), 'tenncor.api.reduce_sum', 'tc.api.reduce_sum', (['var'], {'offset': '(1)', 'ndims': '(1)'}), '(var, offset=1, ndims=1)\n', (20286, 20310), True, 'import tenncor as tc\n'), ((20347, 20374), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (20356, 20374), True, 'import tenncor as tc\n'), ((20407, 20420), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (20415, 20420), True, 'import numpy as np\n'), ((20879, 20905), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[3]'], {}), '(var, 1, [3])\n', (20892, 20905), True, 'import tenncor as tc\n'), ((20944, 20957), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (20952, 20957), True, 'import numpy as np\n'), ((21422, 21448), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[3]'], {}), '(var, 1, [3])\n', (21435, 21448), True, 'import tenncor as tc\n'), ((21485, 21512), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (21494, 21512), True, 'import tenncor as tc\n'), ((21545, 21558), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (21553, 21558), True, 'import numpy as np\n'), ((22018, 22045), 'tenncor.api.permute', 'tc.api.permute', (['var', '[1, 0]'], {}), '(var, [1, 0])\n', (22032, 22045), True, 'import tenncor as tc\n'), ((22083, 22096), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (22091, 22096), True, 'import numpy as np\n'), ((22561, 22588), 'tenncor.api.permute', 'tc.api.permute', (['var', '[1, 0]'], {}), '(var, [1, 0])\n', (22575, 22588), True, 'import tenncor as tc\n'), ((22624, 22651), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (22633, 22651), True, 'import tenncor as tc\n'), ((22684, 22697), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (22692, 22697), True, 'import numpy as np\n'), ((23245, 23269), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (23258, 23269), True, 'import tenncor as tc\n'), ((23308, 23321), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (23316, 23321), True, 'import numpy as np\n'), ((23875, 23899), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (23888, 23899), True, 'import tenncor as tc\n'), ((23936, 23963), 'tenncor.derive', 'tc.derive', (['res', '[var4, var]'], {}), '(res, [var4, var])\n', (23945, 23963), True, 'import tenncor as tc\n'), ((23996, 24009), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (24004, 24009), True, 'import numpy as np\n'), ((24563, 24587), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (24576, 24587), True, 'import tenncor as tc\n'), ((24626, 24639), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (24634, 24639), True, 'import numpy as np\n'), ((25196, 25220), 'tenncor.api.matmul', 'tc.api.matmul', (['var', 'var2'], {}), '(var, var2)\n', (25209, 25220), True, 'import tenncor as tc\n'), ((25257, 25284), 'tenncor.derive', 'tc.derive', (['res', '[var4, var]'], {}), '(res, [var4, var])\n', (25266, 25284), True, 'import tenncor as tc\n'), ((25317, 25330), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (25325, 25330), True, 'import numpy as np\n'), ((25795, 25836), 'tenncor.api.reduce_sum', 'tc.api.reduce_sum', (['var'], {'offset': '(2)', 'ndims': '(1)'}), '(var, offset=2, ndims=1)\n', (25812, 25836), True, 'import tenncor as tc\n'), ((25875, 25888), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (25883, 25888), True, 'import numpy as np\n'), ((26356, 26397), 'tenncor.api.reduce_sum', 'tc.api.reduce_sum', (['var'], {'offset': '(2)', 'ndims': '(1)'}), '(var, offset=2, ndims=1)\n', (26373, 26397), True, 'import tenncor as tc\n'), ((26434, 26461), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (26443, 26461), True, 'import tenncor as tc\n'), ((26494, 26507), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (26502, 26507), True, 'import numpy as np\n'), ((26972, 26999), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[10]'], {}), '(var, 1, [10])\n', (26985, 26999), True, 'import tenncor as tc\n'), ((27038, 27051), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (27046, 27051), True, 'import numpy as np\n'), ((27519, 27546), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[10]'], {}), '(var, 1, [10])\n', (27532, 27546), True, 'import tenncor as tc\n'), ((27583, 27610), 'tenncor.derive', 'tc.derive', (['res', '[var3, var]'], {}), '(res, [var3, var])\n', (27592, 27610), True, 'import tenncor as tc\n'), ((27643, 27656), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (27651, 27656), True, 'import numpy as np\n'), ((28139, 28166), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[10]'], {}), '(var, 1, [10])\n', (28152, 28166), True, 'import tenncor as tc\n'), ((28267, 28280), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (28275, 28280), True, 'import numpy as np\n'), ((28676, 28703), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[10]'], {}), '(var, 1, [10])\n', (28689, 28703), True, 'import tenncor as tc\n'), ((28802, 28829), 'tenncor.derive', 'tc.derive', (['res', '[var2, var]'], {}), '(res, [var2, var])\n', (28811, 28829), True, 'import tenncor as tc\n'), ((28862, 28875), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (28870, 28875), True, 'import numpy as np\n'), ((29460, 29479), 'tenncor.api.transpose', 'tc.api.transpose', (['d'], {}), '(d)\n', (29476, 29479), True, 'import tenncor as tc\n'), ((29481, 29500), 'tenncor.api.transpose', 'tc.api.transpose', (['c'], {}), '(c)\n', (29497, 29500), True, 'import tenncor as tc\n'), ((29557, 29576), 'tenncor.derive', 'tc.derive', (['res', '[a]'], {}), '(res, [a])\n', (29566, 29576), True, 'import tenncor as tc\n'), ((29611, 29624), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (29619, 29624), True, 'import numpy as np\n'), ((30165, 30184), 'tenncor.api.transpose', 'tc.api.transpose', (['d'], {}), '(d)\n', (30181, 30184), True, 'import tenncor as tc\n'), ((30186, 30205), 'tenncor.api.transpose', 'tc.api.transpose', (['c'], {}), '(c)\n', (30202, 30205), True, 'import tenncor as tc\n'), ((30262, 30281), 'tenncor.derive', 'tc.derive', (['res', '[b]'], {}), '(res, [b])\n', (30271, 30281), True, 'import tenncor as tc\n'), ((30316, 30329), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (30324, 30329), True, 'import numpy as np\n'), ((30870, 30889), 'tenncor.api.transpose', 'tc.api.transpose', (['d'], {}), '(d)\n', (30886, 30889), True, 'import tenncor as tc\n'), ((30891, 30910), 'tenncor.api.transpose', 'tc.api.transpose', (['c'], {}), '(c)\n', (30907, 30910), True, 'import tenncor as tc\n'), ((30967, 30986), 'tenncor.derive', 'tc.derive', (['res', '[c]'], {}), '(res, [c])\n', (30976, 30986), True, 'import tenncor as tc\n'), ((31021, 31034), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (31029, 31034), True, 'import numpy as np\n'), ((12612, 12642), 'tenncor.api.argmax', 'tc.api.argmax', (['x'], {'return_dim': '(1)'}), '(x, return_dim=1)\n', (12625, 12642), True, 'import tenncor as tc\n'), ((15030, 15051), 'tenncor.derive', 'tc.derive', (['res', '[var]'], {}), '(res, [var])\n', (15039, 15051), True, 'import tenncor as tc\n'), ((28199, 28225), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[3]'], {}), '(var, 1, [3])\n', (28212, 28225), True, 'import tenncor as tc\n'), ((28736, 28762), 'tenncor.api.extend', 'tc.api.extend', (['var', '(1)', '[3]'], {}), '(var, 1, [3])\n', (28749, 28762), True, 'import tenncor as tc\n')] |
# Make a plot of age vs J_z for Kepler-TGAS.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
import os
from gyro import gyro_age
from actions import action
from granola import get_properties
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 13,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def calc_dispersion(age, jz, nbins):
hist_age, bins = np.histogram(age, nbins) # make histogram
dispersions, Ns, means = [], [], []
m = age < bins[0]
dispersions.append(RMS(jz[m]))
means.append(np.median(jz[m]))
Ns.append(len(age[m]))
for i in range(len(bins)-1):
m = (bins[i] < age) * (age < bins[i+1])
if len(age[m]):
dispersions.append(RMS(jz[m]))
Ns.append(len(age[m]))
means.append(np.median(jz[m]))
return bins, np.array(dispersions), np.array(Ns), np.array(means)
def RMS(x):
# return (np.median(x**2))**.5
return np.std(x)**2
def dispersion(ages, Jzs, minage, maxage):
"""
Dispersion in a single bin.
"""
m = (minage < ages) * (ages < maxage)
return RMS(Jzs[m]), len(ages[m])
def x_and_y(ages, Jzs):
xs = np.linspace(min(ages), max(ages), 1000)
ys = []
for x in xs:
y, N = dispersion(ages, Jzs, x-.5, x+.5)
ys.append(y)
return xs, ys
if __name__ == "__main__":
DATA_DIR = "/Users/ruthangus/granola/granola/data"
d = pd.read_csv("data/ages_and_actions.csv")
# d = pd.read_csv("ages_and_actions_vansaders.csv")
m = (d.age.values > 0) * (d.age.values < 14)
df = d.iloc[m]
ages, dispersions, Ns, means = calc_dispersion(df.age.values, df.Jz.values, 8)
d_err = dispersions / (2 * Ns - 2)**.5
print(dispersions[:10], means[:10])
assert 0
plt.clf()
# plt.errorbar(ages - .5*(ages[1] - ages[0]), np.array(dispersions),
# yerr=d_err, fmt="k.", capsize=0, ms=.1)
# plt.step(ages, dispersions, color="k")
plt.step(ages, means, color="k")
plt.errorbar(ages - .5*(ages[1] - ages[0]), np.array(means),
yerr=d_err, fmt="k.", capsize=0, ms=.1)
plt.xlabel("$\mathrm{Age~Gyr}$")
plt.ylabel("$\sigma J_z~(\mathrm{Kpc~kms}^{-1})$")
plt.savefig("linear_age_dispersion.pdf")
# plt.savefig("linear_age_dispersion_vansaders.pdf")
m = np.log(df.age.values) > - 1
lnages, dispersions, Ns, means = calc_dispersion(np.log10(df.age.values[m]),
df.Jz.values[m], 8)
d_err = dispersions / (2 * Ns - 2)**.5
plt.clf()
plt.errorbar(lnages - .5*(lnages[1] - lnages[0]), np.array(dispersions),
yerr=d_err, fmt="k.", capsize=0, ms=.1)
plt.step(lnages, dispersions, color="k")
# plt.errorbar(lnages - .5*(lnages[1] - lnages[0]), np.array(means),
# yerr=d_err, fmt="k.", capsize=0, ms=.1)
# plt.step(lnages, means, color="k")
plt.xlabel("$\log_{10}(\mathrm{Age,~Gyr})$")
plt.ylabel("$\sigma J_z~(\mathrm{Kpc~kms}^{-1})$")
# plt.xlim(-1, 2.6)
plt.subplots_adjust(left=.15, bottom=.15)
plt.savefig("log_age_dispersion.pdf")
# plt.savefig("log_age_dispersion_vansaders.pdf")
m = np.log(df.age.values) > - 1
x, y = x_and_y(np.log(df.age.values[m]), df.Jz.values[m])
plt.clf()
plt.plot(x, y)
plt.savefig("cont_age_dispersion.pdf")
# plt.savefig("cont_age_dispersion_vansaders.pdf")
"""
Plot vansaders model and barnes model on the same axes.
"""
DATA_DIR = "/Users/ruthangus/granola/granola/data"
d1 = pd.read_csv("data/ages_and_actions.csv")
d2 = pd.read_csv("data/ages_and_actions_vansaders.csv")
print(np.shape(d1), np.shape(d2))
d = pd.merge(d1, d2, on="KIC", how="inner", suffixes=("", "_vs"))
m1 = (d.age.values > 0) * (d.age.values < 14)
d1 = d.iloc[m1]
m2 = (d.age_vs.values > 0) * (d.age_vs.values < 14)
d2 = d.iloc[m2]
m1 = np.log(d1.age.values) > - 1
lnages1, dispersions1, Ns1 = calc_dispersion(np.log10(d1.age.values[m1]),
d1.Jz.values[m1], 8)
d_err1 = dispersions / (2 * Ns1 - 2)**.5
m2 = (np.log(d2.age_vs.values) > - 1) #& (df2.Jz.values < max(df1.Jz.values))
lnages2, dispersions2, Ns2 = calc_dispersion(np.log10(d2.age_vs.values[m2]),
d2.Jz_vs.values[m2], 8)
# lnages2, dispersions2, Ns2 = calc_dispersion(np.log10(d2.age_vs.values),
# d2.Jz_vs.values, 8)
d_err2 = dispersions2 / (2 * Ns2 - 2)**.5
plt.clf()
# # Random stars.
# import random
# rdisps = np.zeros((1000, 9))
# for i in range(1000):
# rage, rdisp, rNs = calc_dispersion(np.random.choice(
# (d1.age.values[m1]),
# size=len(np.log10(d1.age.values[m1]))),
# d1.Jz.values[m1], 8)
# rd_err = rdisp / (2 * rNs - 2)**.5
# rdisps[i, :] = rdisp
# # plt.step(rage, rdisp, color="k", alpha=.01)
# # plt.errorbar(rage - .5*(rage[1] - rage[0]),
# # np.array(rdisp), yerr=rd_err, fmt=".", capsize=0,
# # ms=.1, color="k", alpha=.1)
# # plt.step(rage, rdisp, label=("$\mathrm{Random}$"), color="k", alpha=.1)
# rdisp_av = np.mean(rdisps, axis=0)
# rdisp_std = np.std(rdisps, axis=0)
# print(np.mean(d1.Jz.values))
# plt.axhline(np.mean(d1.Jz.values), color=".5", ls="--")
# # plt.fill_between(rage, rdisp_av-rdisp_std, rdisp_av+rdisp_std,
# # label="$\mathrm{Random}$", alpha=.2, color="k",
# # edgecolor=".9", linewidth=0)
plt.errorbar(lnages1 - .5*(lnages1[1] - lnages1[0]),
np.array(dispersions1), yerr=d_err1, ms=5, fmt="o", capsize=0,
color="cornflowerblue", label="$\mathrm{Model}~1$")
# plt.step(lnages1, dispersions1, label="$\mathrm{Model}~1$",
# color="cornflowerblue")
plt.errorbar(lnages2 - .5*(lnages2[1] - lnages2[0]),
np.array(dispersions2), yerr=d_err2, ms=5, fmt="o", capsize=0,
color="orange", label="$\mathrm{Model}~2$")
# plt.step(lnages2, dispersions2, label="$\mathrm{Model}~2$",
# color="orange")
plt.legend(loc="lower right")
# plt.xlim(-.2, 1)
plt.xlabel("$\log_{10}(\mathrm{Age,~Gyr})$")
plt.ylabel("$\sigma J_z~(\mathrm{Kpc~kms}^{-1})$")
plt.subplots_adjust(left=.15, bottom=.15)
# plt.ylim(.75, 1.4)
plt.ylim(0, 8)
dw = pd.read_csv("data/dwarf.txt")
plt.plot(np.log10(dw.age.values), dw.jz.values, ".7", ls="--")
plt.savefig("log_age_dispersion_both.pdf")
plt.savefig("log_age_dispersion_both")
plt.clf()
plt.hist(d1.Jz.values[m1], 20, alpha=.5, edgecolor="k")
plt.hist(d2.Jz_vs.values[m2], 1000, alpha=.5, edgecolor="k")
plt.xlim(0, 100)
plt.savefig("jz_hist_compare")
| [
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.shape",
"matplotlib.pyplot.step",
"numpy.histogram",
"numpy.std",
"pandas.merge",
"matplotlib.pyplot.rcParams.update",
"numpy.log10",
"matplotlib.pyplot.ylim",
"numpy.median",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots_adjust",
... | [((425, 453), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plotpar'], {}), '(plotpar)\n', (444, 453), True, 'import matplotlib.pyplot as plt\n'), ((514, 538), 'numpy.histogram', 'np.histogram', (['age', 'nbins'], {}), '(age, nbins)\n', (526, 538), True, 'import numpy as np\n'), ((1542, 1582), 'pandas.read_csv', 'pd.read_csv', (['"""data/ages_and_actions.csv"""'], {}), "('data/ages_and_actions.csv')\n", (1553, 1582), True, 'import pandas as pd\n'), ((1892, 1901), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1899, 1901), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2115), 'matplotlib.pyplot.step', 'plt.step', (['ages', 'means'], {'color': '"""k"""'}), "(ages, means, color='k')\n", (2091, 2115), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathrm{Age~Gyr}$"""'], {}), "('$\\\\mathrm{Age~Gyr}$')\n", (2252, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$"""'], {}), "('$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$')\n", (2289, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2334, 2374), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""linear_age_dispersion.pdf"""'], {}), "('linear_age_dispersion.pdf')\n", (2345, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2665), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2663, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2844), 'matplotlib.pyplot.step', 'plt.step', (['lnages', 'dispersions'], {'color': '"""k"""'}), "(lnages, dispersions, color='k')\n", (2812, 2844), True, 'import matplotlib.pyplot as plt\n'), ((3022, 3068), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log_{10}(\\\\mathrm{Age,~Gyr})$"""'], {}), "('$\\\\log_{10}(\\\\mathrm{Age,~Gyr})$')\n", (3032, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3123), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$"""'], {}), "('$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$')\n", (3081, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3193), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.15)'}), '(left=0.15, bottom=0.15)\n', (3169, 3193), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3233), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""log_age_dispersion.pdf"""'], {}), "('log_age_dispersion.pdf')\n", (3207, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3401), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3399, 3401), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3420), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (3414, 3420), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3463), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cont_age_dispersion.pdf"""'], {}), "('cont_age_dispersion.pdf')\n", (3436, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3661, 3701), 'pandas.read_csv', 'pd.read_csv', (['"""data/ages_and_actions.csv"""'], {}), "('data/ages_and_actions.csv')\n", (3672, 3701), True, 'import pandas as pd\n'), ((3711, 3761), 'pandas.read_csv', 'pd.read_csv', (['"""data/ages_and_actions_vansaders.csv"""'], {}), "('data/ages_and_actions_vansaders.csv')\n", (3722, 3761), True, 'import pandas as pd\n'), ((3808, 3869), 'pandas.merge', 'pd.merge', (['d1', 'd2'], {'on': '"""KIC"""', 'how': '"""inner"""', 'suffixes': "('', '_vs')"}), "(d1, d2, on='KIC', how='inner', suffixes=('', '_vs'))\n", (3816, 3869), True, 'import pandas as pd\n'), ((4686, 4695), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4693, 4695), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6478), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6459, 6478), True, 'import matplotlib.pyplot as plt\n'), ((6506, 6552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log_{10}(\\\\mathrm{Age,~Gyr})$"""'], {}), "('$\\\\log_{10}(\\\\mathrm{Age,~Gyr})$')\n", (6516, 6552), True, 'import matplotlib.pyplot as plt\n'), ((6555, 6607), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$"""'], {}), "('$\\\\sigma J_z~(\\\\mathrm{Kpc~kms}^{-1})$')\n", (6565, 6607), True, 'import matplotlib.pyplot as plt\n'), ((6610, 6653), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.15)'}), '(left=0.15, bottom=0.15)\n', (6629, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6681, 6695), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(8)'], {}), '(0, 8)\n', (6689, 6695), True, 'import matplotlib.pyplot as plt\n'), ((6706, 6735), 'pandas.read_csv', 'pd.read_csv', (['"""data/dwarf.txt"""'], {}), "('data/dwarf.txt')\n", (6717, 6735), True, 'import pandas as pd\n'), ((6808, 6850), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""log_age_dispersion_both.pdf"""'], {}), "('log_age_dispersion_both.pdf')\n", (6819, 6850), True, 'import matplotlib.pyplot as plt\n'), ((6855, 6893), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""log_age_dispersion_both"""'], {}), "('log_age_dispersion_both')\n", (6866, 6893), True, 'import matplotlib.pyplot as plt\n'), ((6899, 6908), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6906, 6908), True, 'import matplotlib.pyplot as plt\n'), ((6913, 6969), 'matplotlib.pyplot.hist', 'plt.hist', (['d1.Jz.values[m1]', '(20)'], {'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(d1.Jz.values[m1], 20, alpha=0.5, edgecolor='k')\n", (6921, 6969), True, 'import matplotlib.pyplot as plt\n'), ((6973, 7034), 'matplotlib.pyplot.hist', 'plt.hist', (['d2.Jz_vs.values[m2]', '(1000)'], {'alpha': '(0.5)', 'edgecolor': '"""k"""'}), "(d2.Jz_vs.values[m2], 1000, alpha=0.5, edgecolor='k')\n", (6981, 7034), True, 'import matplotlib.pyplot as plt\n'), ((7038, 7054), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (7046, 7054), True, 'import matplotlib.pyplot as plt\n'), ((7059, 7089), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jz_hist_compare"""'], {}), "('jz_hist_compare')\n", (7070, 7089), True, 'import matplotlib.pyplot as plt\n'), ((671, 687), 'numpy.median', 'np.median', (['jz[m]'], {}), '(jz[m])\n', (680, 687), True, 'import numpy as np\n'), ((959, 980), 'numpy.array', 'np.array', (['dispersions'], {}), '(dispersions)\n', (967, 980), True, 'import numpy as np\n'), ((982, 994), 'numpy.array', 'np.array', (['Ns'], {}), '(Ns)\n', (990, 994), True, 'import numpy as np\n'), ((996, 1011), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (1004, 1011), True, 'import numpy as np\n'), ((1072, 1081), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1078, 1081), True, 'import numpy as np\n'), ((2164, 2179), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (2172, 2179), True, 'import numpy as np\n'), ((2441, 2462), 'numpy.log', 'np.log', (['df.age.values'], {}), '(df.age.values)\n', (2447, 2462), True, 'import numpy as np\n'), ((2522, 2548), 'numpy.log10', 'np.log10', (['df.age.values[m]'], {}), '(df.age.values[m])\n', (2530, 2548), True, 'import numpy as np\n'), ((2720, 2741), 'numpy.array', 'np.array', (['dispersions'], {}), '(dispersions)\n', (2728, 2741), True, 'import numpy as np\n'), ((3297, 3318), 'numpy.log', 'np.log', (['df.age.values'], {}), '(df.age.values)\n', (3303, 3318), True, 'import numpy as np\n'), ((3344, 3368), 'numpy.log', 'np.log', (['df.age.values[m]'], {}), '(df.age.values[m])\n', (3350, 3368), True, 'import numpy as np\n'), ((3772, 3784), 'numpy.shape', 'np.shape', (['d1'], {}), '(d1)\n', (3780, 3784), True, 'import numpy as np\n'), ((3786, 3798), 'numpy.shape', 'np.shape', (['d2'], {}), '(d2)\n', (3794, 3798), True, 'import numpy as np\n'), ((4027, 4048), 'numpy.log', 'np.log', (['d1.age.values'], {}), '(d1.age.values)\n', (4033, 4048), True, 'import numpy as np\n'), ((4104, 4131), 'numpy.log10', 'np.log10', (['d1.age.values[m1]'], {}), '(d1.age.values[m1])\n', (4112, 4131), True, 'import numpy as np\n'), ((4259, 4283), 'numpy.log', 'np.log', (['d2.age_vs.values'], {}), '(d2.age_vs.values)\n', (4265, 4283), True, 'import numpy as np\n'), ((4380, 4410), 'numpy.log10', 'np.log10', (['d2.age_vs.values[m2]'], {}), '(d2.age_vs.values[m2])\n', (4388, 4410), True, 'import numpy as np\n'), ((5911, 5933), 'numpy.array', 'np.array', (['dispersions1'], {}), '(dispersions1)\n', (5919, 5933), True, 'import numpy as np\n'), ((6223, 6245), 'numpy.array', 'np.array', (['dispersions2'], {}), '(dispersions2)\n', (6231, 6245), True, 'import numpy as np\n'), ((6749, 6772), 'numpy.log10', 'np.log10', (['dw.age.values'], {}), '(dw.age.values)\n', (6757, 6772), True, 'import numpy as np\n'), ((924, 940), 'numpy.median', 'np.median', (['jz[m]'], {}), '(jz[m])\n', (933, 940), True, 'import numpy as np\n')] |
"""This module generates a pruned set of the data set. The procedure of pruning is as follows:
1. Import the data and have a low level energy associated with each sample.
2. Run a clustering algorithm. The centres of each cluster are what is kept as a point in the pruned data set.
3. Run a furthest first traversal algorithm to keep the samples that are as different from each other as possible.
4. Calculate the energy at a higher level of theory and calculate the energy difference for each point in the pruned
data set. Plot the distribution of the errors and decide what to do with the outliers.
"""
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics.pairwise import euclidean_distances
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
class Pruning():
"""
This class contains functions that help pruning the data that will then be used to fit a neural network.
It takes as an input the X and the y part of the data. The X part can contain only the coordinates (not the atom labels).
The y part contains the energies.
It has been created with the idea that it will be used in a jupyter notebook.
:X: array of shape (n_samples, dim x n_atoms)
:y: numpy array of shape (n_samples,)
"""
def __init__(self, X, y):
self.X = X
self.y = y
self.dim = X.shape[1]
def elbow(self,n_centres):
"""
This function does the elbow procedure to work out the best number of centres to use. The n_centres parameter
contains a list with a list of values to try. It makes a plot to let the user decide the best number of centres
to use.
:n_centres: list of int
"""
tot_sum_of_sq = []
for i in n_centres:
kmeans = KMeans(n_clusters=i).fit(self.X)
clusters_idx = kmeans.predict(self.X) # indices of which cluster each point belongs to
centres = kmeans.cluster_centers_
sum_of_squares = 0
for j, item in enumerate(clusters_idx):
dist = euclidean_distances(self.X[i].reshape(-1, 1), centres[item].reshape(-1, 1))[0][0]
sum_of_squares = sum_of_squares + dist ** 2
tot_sum_of_sq.append(sum_of_squares)
self.__plot_elbow(n_centres, tot_sum_of_sq)
def get_X(self):
return self.X
def clustering(self, n_clusters):
"""
This function clusters the data into n_clusters and returns the indexes of the data points in each cluster that
are closest to the centre.
:n_clusters: int
:return: array of int
"""
if n_clusters < 5000:
kmeans = KMeans(n_clusters=n_clusters).fit(self.X)
else:
kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(self.X)
clusters_idx = kmeans.predict(self.X) # indices of which cluster each point belongs to
self.centres = kmeans.cluster_centers_
dist_mat = kmeans.transform(self.X) # (n_samples, n_clusters) matrix of distances of each sample to each centre
self.idx_clust = np.zeros((n_clusters,))
for i in range(n_clusters):
self.idx_clust[i] = np.argmin(dist_mat[:,i])
self.idx_clust = self.idx_clust.astype(int)
self.X_cl = np.zeros((len(self.idx_clust), self.X.shape[1]))
for i, item in enumerate(self.idx_clust):
self.X_cl[i, :] = self.X[item, :]
if self.dim == 2:
self.__plot_centres()
return self.idx_clust
def __plot_centres(self):
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(self.X[:,0],self.X[:,1], label="Points", color="yellow")
ax.scatter(self.centres[:,0],self.centres[:,1], label="Centres", color="black")
ax.scatter(self.X_cl[:, 0], self.X_cl[:, 1], label="Points to keep", color="red")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.legend()
plt.show()
def __plot_fft(self):
fig, ax = plt.subplots(figsize=(6, 6))
ax.scatter(self.X[:,0],self.X[:,1], label="Points", color="yellow")
ax.scatter(self.centres[:, 0], self.centres[:, 1], label="Centres", color="black")
ax.scatter(self.X_fft[:, 0], self.X_fft[:, 1], label="Points to keep", color="red")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.legend()
plt.show()
def __plot_elbow(self,n_centres,tot_sum_of_sq):
k_df = pd.DataFrame()
k_df['n of clusters'] = n_centres
k_df['sum of squares'] = tot_sum_of_sq
fig, ax = plt.subplots(figsize=(10, 7))
sns.pointplot(x="n of clusters", y="sum of squares", data=k_df)
ax.set_ylabel('Sum of distance squares')
ax.set_xlabel('Number of clusters')
plt.show()
def fft_idx(self, n_points, save=False):
"""
This function goes through all the points in the data set and returns the **indices** of the samples to put into
the training set. n_points is the number of points that should be put into the training set. There is the option
of printing to a file the indexes of the samples to keep.
:n_points: int (smaller than n_samples)
:save: bool
:return: list of int
"""
# Creating the matrix of the distances
dist_mat = euclidean_distances(self.X_cl, self.X_cl)
n_samples = self.X_cl.shape[0]
self.idx_fft = np.zeros((n_points, )).astype(int)
# Adding a first random sample to the set
idx = np.int32(np.random.uniform(n_samples))
self.idx_fft[0] = idx
for i in range(1, n_points):
dist_list = []
for index in self.idx_fft:
dist_list.append(dist_mat[index, :])
dist_set = np.amin(dist_list, axis=0)
dist_idx = np.int32(np.argmax(dist_set))
self.idx_fft[i] = dist_idx
self.X_fft = np.zeros((len(self.idx_fft), self.dim))
for i, item in enumerate(self.idx_fft):
self.X_fft[i, :] = self.X_cl[item, :]
if save == True:
np.save("idx_fft.npy",self.idx_fft)
if self.dim == 2:
self.__plot_fft()
return self.idx_fft
if __name__ == "__main__":
from sklearn.datasets.samples_generator import make_blobs
centers = [[18, 18], [-18, -18], [18, -18]]
n_clusters = len(centers)
X, y = make_blobs(n_samples=8000, centers=centers, cluster_std=5)
pr = Pruning(X, y)
# pr.elbow(range(1,30))
n_clusters = 6000
idx_clust = pr.clustering(n_clusters=n_clusters)
idx_fft = pr.fft_idx(50)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"sklearn.cluster.MiniBatchKMeans",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.argmax",
"seaborn.pointplot",
"sklearn.cluster.KMeans",
"numpy.zeros",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.argmin",
"sklearn.datasets... | [((6474, 6532), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(8000)', 'centers': 'centers', 'cluster_std': '(5)'}), '(n_samples=8000, centers=centers, cluster_std=5)\n', (6484, 6532), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((3164, 3187), 'numpy.zeros', 'np.zeros', (['(n_clusters,)'], {}), '((n_clusters,))\n', (3172, 3187), True, 'import numpy as np\n'), ((3643, 3671), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3655, 3671), True, 'import matplotlib.pyplot as plt\n'), ((4010, 4020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4018, 4020), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4095), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4079, 4095), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4447, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4519, 4533), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4531, 4533), True, 'import pandas as pd\n'), ((4641, 4670), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (4653, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4742), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': '"""n of clusters"""', 'y': '"""sum of squares"""', 'data': 'k_df'}), "(x='n of clusters', y='sum of squares', data=k_df)\n", (4692, 4742), True, 'import seaborn as sns\n'), ((4844, 4854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4852, 4854), True, 'import matplotlib.pyplot as plt\n'), ((5398, 5439), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['self.X_cl', 'self.X_cl'], {}), '(self.X_cl, self.X_cl)\n', (5417, 5439), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((3257, 3282), 'numpy.argmin', 'np.argmin', (['dist_mat[:, i]'], {}), '(dist_mat[:, i])\n', (3266, 3282), True, 'import numpy as np\n'), ((5613, 5641), 'numpy.random.uniform', 'np.random.uniform', (['n_samples'], {}), '(n_samples)\n', (5630, 5641), True, 'import numpy as np\n'), ((5853, 5879), 'numpy.amin', 'np.amin', (['dist_list'], {'axis': '(0)'}), '(dist_list, axis=0)\n', (5860, 5879), True, 'import numpy as np\n'), ((6171, 6207), 'numpy.save', 'np.save', (['"""idx_fft.npy"""', 'self.idx_fft'], {}), "('idx_fft.npy', self.idx_fft)\n", (6178, 6207), True, 'import numpy as np\n'), ((5504, 5525), 'numpy.zeros', 'np.zeros', (['(n_points,)'], {}), '((n_points,))\n', (5512, 5525), True, 'import numpy as np\n'), ((5912, 5931), 'numpy.argmax', 'np.argmax', (['dist_set'], {}), '(dist_set)\n', (5921, 5931), True, 'import numpy as np\n'), ((1844, 1864), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'i'}), '(n_clusters=i)\n', (1850, 1864), False, 'from sklearn.cluster import KMeans\n'), ((2745, 2774), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (2751, 2774), False, 'from sklearn.cluster import KMeans\n'), ((2822, 2860), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (2837, 2860), False, 'from sklearn.cluster import MiniBatchKMeans\n')] |
# -*- coding: utf-8 -*-
""" Authors: <NAME>, <NAME> """
import logging
import pickle
import numpy as np
import plotly.graph_objs as go
import plotly.plotly as py
import sys
from pathlib import Path
from models import *
import datetime
project_dir = Path(__file__).resolve().parents[2]
def main():
"""
Create, train, and save 4 closed form models and
4 gradient descent models for the following versions:
1. 'no_text': no text features
2. '60': top 60 words
3. '160': top 160 words (basic, with stop words like 'the' or 'a' included)
4. Top 160 words + newly added features included
"""
logger = logging.getLogger(__name__)
logger.info('training models')
features_path = project_dir / 'src' / 'features'
output_path = project_dir / 'models'
predictions_path = project_dir / 'reports'
X_train, \
X_train_160, \
X_train_60, \
X_train_no_text, \
Y_train = get_XY_train(features_path)
closedForm, closedForm160, closedForm60, closedFormNoText = [ClosedForm()] * 4
gradientDescent, gradientDescent160, gradientDescent60, gradientDescentNoText = [GradientDescent()] * 4
"""
Train closed form top 160, top 60, no text and full models.
The full model is:
top 160 words + newly added features included
We'll pick 42 as the degree of freedom for the stem feature
(see notebook `3.0-lnh-task-3-experimentation`)
"""
optimal_size = 42
# Reducing stem vector's size of X_train
X_train = reduce_stem(X_train, optimal_size)
# Train models
model_X_pairs = (
(closedForm, X_train),
(closedForm160, X_train_160),
(closedForm60, X_train_60),
(closedFormNoText, X_train_no_text),
)
closedForm, \
closedForm160, \
closedForm60, \
closedFormNoText = train_models(model_X_pairs, Y_train)
# Sve models
model_filename_pairs = (
(closedForm, 'ClosedForm.pkl'),
(closedForm160, 'ClosedForm_160.pkl'),
(closedForm60, 'ClosedForm_60.pkl'),
(closedFormNoText, 'ClosedForm_no_text.pkl'),
)
save_models(model_filename_pairs, output_path)
logger.info('finished closed form no text, top 160, top 60 models training')
"""
Train gradient descent models
"""
model_X_pairs = (
(gradientDescent, X_train),
(gradientDescent160, X_train_160),
(gradientDescent60, X_train_60),
(gradientDescentNoText, X_train_no_text),
)
# Hyperparameters
hparams = {
'beta': 1e-4, # prof: < 1e-3
'eta_0': 1e-5, # prof: < 1e-5
'eps': 1e-6,
}
gradientDescent, \
gradientDescent160, \
gradientDescent60, \
gradientDescentNoText = train_models(model_X_pairs, Y_train, hparams=hparams)
model_filename_pairs = (
(gradientDescent, 'GradientDescent.pkl'),
(gradientDescent160, 'GradientDescent_160.pkl'),
(gradientDescent60, 'GradientDescent_60.pkl'),
(gradientDescentNoText, 'GradientDescent_no_text.pkl'),
)
save_models(model_filename_pairs, output_path)
logger.info('finished gradient descent models training')
def train_models(model_X_pairs, Y, hparams=None):
"""
Train (model, X) pairs with target Y.
Return trained models
"""
models = []
for model, X in model_X_pairs:
if hparams is None:
time_start = datetime.datetime.now()
model.train(X, Y)
time_end = datetime.datetime.now()
model = ClosedForm(w=model.w)
else:
hparams['w_0'] = np.zeros((X.shape[1], 1))
time_start = datetime.datetime.now()
model.train(X, Y, **hparams)
time_end = datetime.datetime.now()
model = GradientDescent(
w=model.w,
hparams=model.get_hyperparams(),
num_iterations=model.num_iterations)
print('time: ', end='')
print((time_end - time_start).microseconds, end=' ')
print('ms')
models.append(model)
return models
def get_XY_train(features_path):
files = [
'training_X.pkl',
'training_X_160.pkl',
'training_X_60.pkl',
'training_X_no_text.pkl',
'training_y.pkl',
]
XY_train = []
for file in files:
XY_train.append(pickle.load(open(features_path / file, 'rb')))
return XY_train
def reduce_stem(X, stem_size):
"""
Structure of a row of X_train:
index 0: is_root
1: controversiality
2: children
3: length
4-163: x_counts
164-323: stem
324: bias term
"""
stem_start = 164
X_temp = np.array([ # 1 X matrix
np.concatenate(( # One row
X[i][:stem_start], # Begining of row
X[i][stem_start:stem_start + stem_size], # Stem
np.array([1]) # Bias term of row
)) for i in range(X.shape[0]) # 10000 rows to create 1 X matrix
])
return X_temp
def save_models(model_filename_pairs, output_path):
for model, name in model_filename_pairs:
if model.is_trained():
pickle.dump(model, open(output_path / name, 'wb'))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| [
"logging.basicConfig",
"numpy.zeros",
"pathlib.Path",
"numpy.array",
"datetime.datetime.now",
"logging.getLogger"
] | [((649, 676), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (666, 676), False, 'import logging\n'), ((5308, 5363), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (5327, 5363), False, 'import logging\n'), ((3405, 3428), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3426, 3428), False, 'import datetime\n'), ((3482, 3505), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3503, 3505), False, 'import datetime\n'), ((3591, 3616), 'numpy.zeros', 'np.zeros', (['(X.shape[1], 1)'], {}), '((X.shape[1], 1))\n', (3599, 3616), True, 'import numpy as np\n'), ((3642, 3665), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3663, 3665), False, 'import datetime\n'), ((3730, 3753), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3751, 3753), False, 'import datetime\n'), ((250, 264), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (254, 264), False, 'from pathlib import Path\n'), ((4883, 4896), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4891, 4896), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#print "*** FM PIANO VERSION WITH NOTE CACHING ***"
"""
##########################################################################
# * * * PySynth * * *
# A very basic audio synthesizer in Python (www.python.org)
#
# <NAME>, 2012-11-29 (<EMAIL>)
##########################################################################
# Based on a program by <NAME> (tyler at tylereaves.com) found at
# http://mail.python.org/pipermail/python-list/2000-August/041308.html
##########################################################################
# 'song' is a Python list (or tuple) in which the song is defined,
# the format is [['note', value]]
# Notes are 'a' through 'g' of course,
# optionally with '#' or 'b' appended for sharps or flats.
# Finally the octave number (defaults to octave 4 if not given).
# An asterisk at the end makes the note a little louder (useful for the beat).
# 'r' is a rest.
# Note value is a number:
# 1=Whole Note; 2=Half Note; 4=Quarter Note, etc.
# Dotted notes can be written in two ways:
# 1.33 = -2 = dotted half
# 2.66 = -4 = dotted quarter
# 5.33 = -8 = dotted eighth
"""
import wave, struct
import numpy as np
from math import sin, cos, pi, log, exp
from mixfiles import mix_files
from demosongs import *
from mkfreq import getfreq
pitchhz, keynum = getfreq()
# Harmonic intensities (dB) for selected piano keys,
# measured with output from a Yamaha P-85
harmo = (
(1, -15.8, -3., -15.3, -22.8, -40.7),
(16, -15.8, -3., -15.3, -22.8, -40.7),
(28, -5.7, -4.4, -17.7, -16., -38.7),
(40, -6.8, -17.2, -22.4, -16.8, -75.6),
(52, -8.4, -19.7, -23.5, -21.6, -76.8),
(64, -9.3, -20.8, -37.2, -36.3, -76.4),
(76, -18., -64.5, -74.4, -77.3, -80.8),
(88, -24.8, -53.8, -77.2, -80.8, -90.),
)
def linint(arr, x):
"Interpolate an (X, Y) array linearly."
for v in arr:
if v[0] == x: return v[1]
xvals = [v[0] for v in arr]
ux = max(xvals)
lx = min(xvals)
try: assert lx <= x <= ux
except:
#print lx, x, ux
raise
for v in arr:
if v[0] > x and v[0] - x <= ux - x:
ux = v[0]
uy = v[1]
if v[0] < x and x - v[0] >= lx - x:
lx = v[0]
ly = v[1]
#print lx, ly, ux, uy
return (float(x) - lx) / (ux - lx) * (uy - ly) + ly
harmtab = np.zeros((88, 20))
for h in range(1, len(harmo[0])):
dat = []
for n in range(len(harmo)):
dat.append((float(harmo[n][0]), harmo[n][h]))
for h2 in range(88):
harmtab[h2,h] = linint(dat, h2+1)
#print harmtab[keynum['c4'],:]
for h2 in range(88):
for n in range(20):
ref = harmtab[h2,1]
harmtab[h2,n] = 10.**((harmtab[h2,n] - ref)/20.)
#print harmtab[keynum['c4'],:]
##########################################################################
#### Main program starts below
##########################################################################
# Some parameters:
# Beats (quarters) per minute
# e.g. bpm = 95
# Octave shift (neg. integer -> lower; pos. integer -> higher)
# e.g. transpose = 0
# Playing style (e.g., 0.8 = very legato and e.g., 0.3 = very staccato)
# e.g. leg_stac = 0.6
# Volume boost for asterisk notes (1. = no boost)
# e.g. boost = 1.2
# Output file name
#fn = 'pysynth_output.wav'
# Other parameters:
# Influences the decay of harmonics over frequency. Lowering the
# value eliminates even more harmonics at high frequencies.
# Suggested range: between 3. and 5., depending on the frequency response
# of speakers/headphones used
harm_max = 5.
##########################################################################
def make_wav(song,bpm=120,transpose=0,leg_stac=.9,boost=1.1,repeat=0,fn="out.wav", silent=False):
data = []
note_cache = {}
cache_this = {}
f=wave.open(fn,'w')
f.setnchannels(1)
f.setsampwidth(2)
f.setframerate(44100)
f.setcomptype('NONE','Not Compressed')
bpmfac = 120./bpm
def length(l):
return 88200./l*bpmfac
def waves2(hz,l):
a=44100./hz
b=float(l)/44100.*hz
return [a,round(b)]
att_len = 3000
att_bass = np.zeros(att_len)
att_treb = np.zeros(att_len)
for n in range(att_len):
att_treb[n] = linint(((0,0.), (100, .2), (300, .7), (400, .6), (600, .25), (800, .9), (1000, 1.25), (2000,1.15), (3000, 1.)), n)
att_bass[n] = linint(((0,0.), (100, .1), (300, .2), (400, .15), (600, .1), (800, .9), (1000, 1.25), (2000,1.15), (3000, 1.)), n)
decay = np.zeros(1000)
for n in range(900):
decay[n] = exp(linint(( (0,log(3)), (3,log(5)), (5, log(1.)), (6, log(.8)), (9,log(.1)) ), n/100.))
def zz(a):
for q in range(len(a)):
if a[q] < 0: a[q] = 0
def render2(a, b, vol, pos, knum, note):
l=waves2(a, b)
q=int(l[0]*l[1])
lf = log(a)
snd_len = max(int(3.1*q), 44100)
raw_note = 12*44100
if note not in list(note_cache.keys()):
x2 = np.arange(raw_note)
sina = 2. * pi * x2 / float(l[0])
sina14 = 14. * 2. * pi * x2 / float(l[0])
amp1 = 1. - (x2/snd_len)
amp2 = 1. - (4*x2/snd_len)
amp_3to6 = 1. - (.25*x2/snd_len)
zz(amp1)
zz(amp2)
zz(amp_3to6)
new = (
amp1 * np.sin(sina+.58*amp2*np.sin(sina14))
+ amp_3to6 * np.sin(sina+.89*amp_3to6*np.sin(sina))
+ amp_3to6 * np.sin(sina+.79*amp_3to6*np.sin(sina))
)
new *= np.exp(-x2/decay[int(lf*100)]/44100.)
if cache_this[note] > 1:
note_cache[note] = new.copy()
else:
new = note_cache[note].copy()
dec_ind = int(leg_stac*q)
new[dec_ind:] *= np.exp(-np.arange(raw_note-dec_ind)/3000.)
if snd_len > raw_note:
print("Warning, note too long:", snd_len, raw_note)
snd_len = raw_note
data[pos:pos+snd_len] += ( new[:snd_len] * vol )
ex_pos = 0.
t_len = 0
for y, x in song:
if x < 0:
t_len+=length(-2.*x/3.)
else:
t_len+=length(x)
if y[-1] == '*':
y = y[:-1]
if not y[-1].isdigit():
y += '4'
cache_this[y] = cache_this.get(y, 0) + 1
#print "Note frequencies in song:", cache_this
data = np.zeros(int((repeat+1)*t_len + 441000))
#print len(data)/44100., "s allocated"
for rp in range(repeat+1):
for nn, x in enumerate(song):
if not nn % 4 and silent == False:
print("[%u/%u]\t" % (nn+1,len(song)))
if x[0]!='r':
if x[0][-1] == '*':
vol = boost
note = x[0][:-1]
else:
vol = 1.
note = x[0]
if not note[-1].isdigit():
note += '4' # default to fourth octave
a=pitchhz[note]
kn = keynum[note]
a = a * 2**transpose
if x[1] < 0:
b=length(-2.*x[1]/3.)
else:
b=length(x[1])
render2(a, b, vol, int(ex_pos), kn, note)
ex_pos = ex_pos + b
if x[0]=='r':
b=length(x[1])
ex_pos = ex_pos + b
##########################################################################
# Write to output file (in WAV format)
##########################################################################
if silent == False:
print("Writing to file", fn)
data = data / (data.max() * 2.)
out_len = int(2. * 44100. + ex_pos+.5)
data2 = np.zeros(out_len, np.short)
data2[:] = 32000. * data[:out_len]
f.writeframes(data2.tostring())
f.close()
print()
##########################################################################
# Synthesize demo songs
##########################################################################
if __name__ == '__main__':
print("*** FM PIANO VERSION WITH NOTE CACHING ***")
print()
print("Creating Demo Songs... (this might take about a minute)")
print()
#make_wav((('c', 4), ('e', 4), ('g', 4), ('c5', 1)))
#make_wav(song1, fn = "pysynth_scale.wav")
make_wav((('c1', 1), ('r', 1),('c2', 1), ('r', 1),('c3', 1), ('r', 1), ('c4', 1), ('r', 1),('c5', 1), ('r', 1),('c6', 1), ('r', 1),('c7', 1), ('r', 1),('c8', 1), ('r', 1), ('r', 1), ('r', 1), ('c4', 1),('r', 1), ('c4*', 1), ('r', 1), ('r', 1), ('r', 1), ('c4', 16), ('r', 1), ('c4', 8), ('r', 1),('c4', 4), ('r', 1),('c4', 1), ('r', 1),('c4', 1), ('r', 1)), fn = "all_cs.wav")
make_wav(song4_rh, bpm = 130, transpose = 1, boost = 1.15, repeat = 1, fn = "pysynth_bach_rh.wav")
make_wav(song4_lh, bpm = 130, transpose = 1, boost = 1.15, repeat = 1, fn = "pysynth_bach_lh.wav")
mix_files("pysynth_bach_rh.wav", "pysynth_bach_lh.wav", "pysynth_bach.wav")
make_wav(song3, bpm = 132/2, leg_stac = 0.9, boost = 1.1, fn = "pysynth_chopin.wav")
| [
"wave.open",
"mixfiles.mix_files",
"numpy.zeros",
"mkfreq.getfreq",
"numpy.sin",
"numpy.arange",
"math.log"
] | [((1350, 1359), 'mkfreq.getfreq', 'getfreq', ([], {}), '()\n', (1357, 1359), False, 'from mkfreq import getfreq\n'), ((2262, 2280), 'numpy.zeros', 'np.zeros', (['(88, 20)'], {}), '((88, 20))\n', (2270, 2280), True, 'import numpy as np\n'), ((3673, 3691), 'wave.open', 'wave.open', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (3682, 3691), False, 'import wave, struct\n'), ((3975, 3992), 'numpy.zeros', 'np.zeros', (['att_len'], {}), '(att_len)\n', (3983, 3992), True, 'import numpy as np\n'), ((4005, 4022), 'numpy.zeros', 'np.zeros', (['att_len'], {}), '(att_len)\n', (4013, 4022), True, 'import numpy as np\n'), ((4320, 4334), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (4328, 4334), True, 'import numpy as np\n'), ((6862, 6889), 'numpy.zeros', 'np.zeros', (['out_len', 'np.short'], {}), '(out_len, np.short)\n', (6870, 6889), True, 'import numpy as np\n'), ((7998, 8073), 'mixfiles.mix_files', 'mix_files', (['"""pysynth_bach_rh.wav"""', '"""pysynth_bach_lh.wav"""', '"""pysynth_bach.wav"""'], {}), "('pysynth_bach_rh.wav', 'pysynth_bach_lh.wav', 'pysynth_bach.wav')\n", (8007, 8073), False, 'from mixfiles import mix_files\n'), ((4609, 4615), 'math.log', 'log', (['a'], {}), '(a)\n', (4612, 4615), False, 'from math import sin, cos, pi, log, exp\n'), ((4724, 4743), 'numpy.arange', 'np.arange', (['raw_note'], {}), '(raw_note)\n', (4733, 4743), True, 'import numpy as np\n'), ((5372, 5401), 'numpy.arange', 'np.arange', (['(raw_note - dec_ind)'], {}), '(raw_note - dec_ind)\n', (5381, 5401), True, 'import numpy as np\n'), ((4386, 4392), 'math.log', 'log', (['(3)'], {}), '(3)\n', (4389, 4392), False, 'from math import sin, cos, pi, log, exp\n'), ((4398, 4404), 'math.log', 'log', (['(5)'], {}), '(5)\n', (4401, 4404), False, 'from math import sin, cos, pi, log, exp\n'), ((4411, 4419), 'math.log', 'log', (['(1.0)'], {}), '(1.0)\n', (4414, 4419), False, 'from math import sin, cos, pi, log, exp\n'), ((4425, 4433), 'math.log', 'log', (['(0.8)'], {}), '(0.8)\n', (4428, 4433), False, 'from math import sin, cos, pi, log, exp\n'), ((4438, 4446), 'math.log', 'log', (['(0.1)'], {}), '(0.1)\n', (4441, 4446), False, 'from math import sin, cos, pi, log, exp\n'), ((5141, 5153), 'numpy.sin', 'np.sin', (['sina'], {}), '(sina)\n', (5147, 5153), True, 'import numpy as np\n'), ((5003, 5017), 'numpy.sin', 'np.sin', (['sina14'], {}), '(sina14)\n', (5009, 5017), True, 'import numpy as np\n'), ((5073, 5085), 'numpy.sin', 'np.sin', (['sina'], {}), '(sina)\n', (5079, 5085), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2021 martinpflaum
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#%%
import torch
import numpy as np
import torch
import torch.nn as nn
import random
import matplotlib.pyplot as plt
def calc_random_metric(valid_ds,learn,num_iter=10,loss_func = nn.MSELoss()):
learn.model = learn.model.to("cuda:0")
learn.model.eval()
rand_loss = 0
num_iter = 10
idx = np.arange(len(valid_ds))
for z in range(num_iter):
#print(f"iteration {z} of {num_iter}")
loss_one_iter = 0
np.random.shuffle(idx)
for k in range(len(valid_ds)):
with torch.no_grad():
input =valid_ds[k][0]
pred_scene_depth = learn.model(input[None].to("cuda:0"))[0]
target = valid_ds[idx[k]][1]
target = target.to("cuda:0")
loss = loss_func(pred_scene_depth,target)
loss = loss/len(valid_ds)
loss_one_iter += loss
loss_one_iter = loss_one_iter / num_iter
rand_loss += loss_one_iter
rand_loss = rand_loss.detach().cpu().numpy()
print("rand_loss",rand_loss)
return f"rand_loss {rand_loss}\n"
def calc_perc_metric(valid_ds,learn):
learn.model = learn.model.to("cuda:0")
learn.model.eval()
preds = []
for k in range(len(valid_ds)):
with torch.no_grad():
input = valid_ds[k][0]
pred_scene_depth = learn.model(input[None].to("cuda:0"))[0].to("cpu")
preds += [pred_scene_depth[None]]
preds = torch.cat(preds,dim = 0)
#print(preds.shape)
N = preds.shape[0]
top1 = 0
top5 = 0
top10 = 0
for k in range(len(valid_ds)):
target = valid_ds[k][1]
target = target.to("cpu")[None]#.reshape(1,1,64,64)
#print(target.shape)
diff = torch.mean(((preds-target)**2).reshape(N,-1),dim = -1)
#print(diff.shape)
argmins = torch.argsort(diff)
argmins = list(argmins)
percentil = argmins.index(k)
if percentil < 10:
top10 += 1/len(valid_ds)
if percentil < 5:
top5 += 1/len(valid_ds)
if percentil < 1:
top1 += 1/len(valid_ds)
#top1 = top1.detach().cpu().numpy()
#top5 = top5.detach().cpu().numpy()
#top10 = top10.detach().cpu().numpy()
print("top1",top1,"top5",top5,"top10",top10)
return f"top1 {top1} top5 {top5} top10 {top10}\n"
def get_mean(valid_ds):
out = torch.zeros_like(valid_ds[0][1])
for k in range(len(valid_ds)):
with torch.no_grad():
target = valid_ds[k][1]
target = target / len(valid_ds)
out += target
return out
def get_mean_loss(valid_ds,loss_func = nn.MSELoss()):
mean = get_mean(valid_ds)
loss_out = 0
for k in range(len(valid_ds)):
with torch.no_grad():
target = valid_ds[k][1]
loss = loss_func(target,mean)
loss = loss / len(valid_ds)
loss_out += loss
loss_out = loss_out.detach().cpu().numpy()
print("constant loss of dset: ",loss_out)
return f"constant loss of dset: {loss_out}\n"
def save_single(save_folder,learn,valid_ds,test_name,k,save_k,reverse_func):
learn.model = learn.model.to("cuda:0")
learn.model.eval()
with torch.no_grad():
input = valid_ds[k][0]
pred_scene_depth = learn.model(input[None].to("cuda:0"))[0]
img_vis = reverse_func(pred_scene_depth)
img_vis = np.clip(img_vis,0,1)
if img_vis.shape[2] == 1:
img_vis = img_vis[:,:,0]
img = valid_ds[k][1]
scene_depth = img[None]
img_vis_target = reverse_func(scene_depth[0])
img_vis_target = np.clip(img_vis_target,0,1)
if img_vis_target.shape[2] == 1:
img_vis_target = img_vis_target[:,:,0]
plt.imsave(f"{save_folder}/{test_name}_{save_k}.png",img_vis)
plt.imsave(f"{save_folder}/target_{save_k}.png",img_vis_target)
def post_load_reverse_identity(img):
img = ((img).cpu().permute(1,2,0))
return np.array(img)
def save_images(save_folder,learn,valid_ds,test_name,idx = [124,168,3,4],reverse_func=post_load_reverse_identity):
for save_k,k in enumerate(idx):
save_single(save_folder,learn,valid_ds,test_name,k,save_k,reverse_func)
def eval_model(val_ds,learn,loss_func=nn.MSELoss()):
model = learn.model
model = model.to("cuda:0")
model = model.eval()
loss_out = 0
for x_org,target in val_ds:
x_org = x_org.to("cuda:0")
out = model(x_org[None]).detach()
loss = loss_func(out, target.to("cuda:0"))
loss = loss/len(val_ds)
loss_out += loss
loss_out = loss_out.detach().cpu().numpy()
print("model val loss: ",loss_out)
return f"model val loss: {loss_out}\n"
def run_all_metrics(save_folder,learn,valid_ds,file_name,reverse_func=post_load_reverse_identity):
save_images(save_folder,learn,valid_ds,file_name,reverse_func=reverse_func)
out = ""
out += get_mean_loss(valid_ds)
out += calc_perc_metric(valid_ds,learn)
out += calc_random_metric(valid_ds,learn)
out += eval_model(valid_ds,learn)
return out
| [
"torch.nn.MSELoss",
"torch.zeros_like",
"torch.argsort",
"torch.cat",
"numpy.clip",
"numpy.array",
"matplotlib.pyplot.imsave",
"torch.no_grad",
"numpy.random.shuffle"
] | [((1261, 1273), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1271, 1273), True, 'import torch.nn as nn\n'), ((2533, 2556), 'torch.cat', 'torch.cat', (['preds'], {'dim': '(0)'}), '(preds, dim=0)\n', (2542, 2556), False, 'import torch\n'), ((3474, 3506), 'torch.zeros_like', 'torch.zeros_like', (['valid_ds[0][1]'], {}), '(valid_ds[0][1])\n', (3490, 3506), False, 'import torch\n'), ((3733, 3745), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3743, 3745), True, 'import torch.nn as nn\n'), ((4480, 4502), 'numpy.clip', 'np.clip', (['img_vis', '(0)', '(1)'], {}), '(img_vis, 0, 1)\n', (4487, 4502), True, 'import numpy as np\n'), ((4689, 4718), 'numpy.clip', 'np.clip', (['img_vis_target', '(0)', '(1)'], {}), '(img_vis_target, 0, 1)\n', (4696, 4718), True, 'import numpy as np\n'), ((4811, 4873), 'matplotlib.pyplot.imsave', 'plt.imsave', (['f"""{save_folder}/{test_name}_{save_k}.png"""', 'img_vis'], {}), "(f'{save_folder}/{test_name}_{save_k}.png', img_vis)\n", (4821, 4873), True, 'import matplotlib.pyplot as plt\n'), ((4877, 4941), 'matplotlib.pyplot.imsave', 'plt.imsave', (['f"""{save_folder}/target_{save_k}.png"""', 'img_vis_target'], {}), "(f'{save_folder}/target_{save_k}.png', img_vis_target)\n", (4887, 4941), True, 'import matplotlib.pyplot as plt\n'), ((5029, 5042), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5037, 5042), True, 'import numpy as np\n'), ((5316, 5328), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5326, 5328), True, 'import torch.nn as nn\n'), ((1525, 1547), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1542, 1547), True, 'import numpy as np\n'), ((2917, 2936), 'torch.argsort', 'torch.argsort', (['diff'], {}), '(diff)\n', (2930, 2936), False, 'import torch\n'), ((4305, 4320), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4318, 4320), False, 'import torch\n'), ((2339, 2354), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2352, 2354), False, 'import torch\n'), ((3556, 3571), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3569, 3571), False, 'import torch\n'), ((3844, 3859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3857, 3859), False, 'import torch\n'), ((1605, 1620), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1618, 1620), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
##################################################################
# Documentation
##################################################################
# Imports
from __future__ import absolute_import, unicode_literals, print_function
try:
from cPickle import dump, load
except ImportError:
from _pickle import dump, load
from collections import Counter
from copy import deepcopy
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.layers.embeddings import Embedding
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.regularizers import l2
from keras.utils import to_categorical
from six import iteritems
from sklearn.utils.class_weight import compute_class_weight
from tempfile import mkstemp
import abc
import numpy as np
import os
from cgsa.base import BaseAnalyzer
from cgsa.utils.common import LOGGER, is_relevant, normlex
from .layers import CUSTOM_OBJECTS, DFLT_INITIALIZER, EMPTY_IDX, UNK_IDX
from .layers.word2vec import Word2Vec
from .utils import ModelMGPU, N_GPUS
##################################################################
# Variables and Constants
# default dimensionality for task-specific vectors
DFLT_VDIM = 100
DFLT_N_EPOCHS = 24 # 24
EMPTY_TOK = "%EMPTY%"
UNK_TOK = "%UNK%"
DICT_OFFSET = 1
UNK_PROB = 1e-4
L2_COEFF = 1e-4
EMB_INDICES_NAME = "embedding_indices"
# LBA Results for Different Optimizers:
# sgd: Macro: 10.33%; Micro: 36.2623%;
# rmsprop: Macro: 30.84%; Micro: 44.5902%;
# adagrad: Macro: 35.45%; Micro: 61.5738%;
# adadelta: 30.84%; Micro: 44.5902%;
# adam: Macro: 30.84%; Micro: 44.5902%;
# nadam: 30.84%; Micro: 44.5902%;
DFLT_TRAIN_PARAMS = {"optimizer": "adagrad",
"metrics": ["categorical_accuracy"],
"loss": "categorical_hinge"}
##################################################################
# Methods
##################################################################
# Class
class DLBaseAnalyzer(BaseAnalyzer):
"""Class for DeepLearning-based sentiment analysis.
Attributes:
"""
def __init__(self, w2v=False, lstsq=False, embeddings=None, **kwargs):
"""Class constructor.
Args:
w2v (bool): use word2vec embeddings
lstsq (bool): use the least squares method
embeddings (cgsa.utils.word2vec.Word2Vec or None): pretrained
embeddings
"""
super(DLBaseAnalyzer, self).__init__()
self.name = "DLBaseAnalyzer"
# boolean flags indicating whether to use external embeddings
self._w2v = w2v
self._lstsq = lstsq
# actual external embeddings
self._embeddings = embeddings
# mapping from words to their embedding indices in `self._embs` or
# `self.W_EMB`
self._w2i = {EMPTY_TOK: EMPTY_IDX, UNK_TOK: UNK_IDX}
self._pad_value = EMPTY_IDX
# mapping from words to their embeddings (will be initialized after
# training the network, if `w2v` or `lstsq` are true)
self._embs = None
# least squares matrix (will be initialized after training the network,
# if true)
self._lstsq_mtx = None
self.ndim = -1 # vector dimensionality will be initialized later
self.intm_dim = -1
self._model = None
self._model_path = None
self._trained = False
self._n_epochs = DFLT_N_EPOCHS
# mapping from word to its embedding index
self._aux_keys = set((0, 1))
self._max_seq_len = -1
self._min_width = 0
self._n_y = 0
self._train_params = deepcopy(DFLT_TRAIN_PARAMS)
self._fit_params = {}
# variables needed for training
self._w_stat = self._pred_class = None
self.W_EMB = self._cost = self._dev_cost = None
# initialize functions to None
self._reset_funcs()
# set up functions for obtaining word embeddings at train and test
# times
self._init_wemb_funcs()
def train(self, train_x, train_y, dev_x, dev_y,
a_grid_search, a_multi_gpu):
self._start_training()
self._logger.debug("Training %s...", self.name)
self._logger.debug("Preparing dataset...")
train_x, train_y, dev_x, dev_y = self._prepare_data(
train_x, train_y, dev_x, dev_y
)
self._logger.debug("Dataset ready...")
# initialize the network
self._logger.debug("Initializing the network...")
# self._update_fit_params(train_y)
self._init_nn()
self._logger.debug("Network ready...")
# initialize callbacks
_, ofname = mkstemp(suffix=".hdf5", prefix=self.name + '.')
try:
early_stop = EarlyStopping(patience=3, verbose=1)
chck_point = ModelCheckpoint(
filepath=ofname, monitor="val_categorical_accuracy",
mode="auto", verbose=1,
save_weights_only=True,
save_best_only=True
)
tensorboard = TensorBoard(
log_dir=os.environ.get("TENSORBOARD_DIR", "/tmp"),
histogram_freq=1, batch_size=32,
write_graph=True, write_grads=True
)
if a_multi_gpu:
train_model = ModelMGPU(self._model)
self._fit_params["batch_size"] = 32 * N_GPUS
train_model.compile(**self._train_params)
else:
train_model = self._model
train_model.fit(train_x, train_y,
validation_data=(dev_x, dev_y),
epochs=self._n_epochs,
callbacks=[early_stop, chck_point, tensorboard],
**self._fit_params)
self._model.load_weights(ofname)
self._finish_training()
finally:
os.remove(ofname)
self._logger.debug("%s trained", self.name)
def predict_proba(self, msg, yvec):
wseq = self._tweet2wseq(msg)
embs = np.array(
self._pad(len(wseq), self._pad_value)
+ [self.get_test_w_emb(w) for w in wseq], dtype="int32")
ret = self._model.predict(np.asarray([embs]),
batch_size=1,
verbose=2)
yvec[:] = ret[0]
def predict_proba_raw(self, messages):
yvecs = np.zeros((len(messages), self._n_y))
for i, msg_i in enumerate(messages):
self.predict_proba(msg_i, yvecs[i])
return yvecs
def restore(self, embs):
"""Restore members which could not be serialized.
Args:
embs (cgsa.utils.word2vec.Word2Vec or None): pretrained
embeddings
"""
self._embeddings = embs
self._logger = LOGGER
self._init_wemb_funcs()
def reset(self):
"""Remove members which cannot be serialized.
"""
# set functions to None
self._reset_funcs()
self._embeddings = None
self.W_EMB = None
super(DLBaseAnalyzer, self).reset()
def save(self, path):
"""Dump model to disc.
Args:
a_path (str): file path at which to store the model
Returns:
void:
"""
# set functions to None
model_path = path + ".h5"
self._model.save(model_path)
self._model_path = os.path.basename(model_path)
# all paths are relative
model = self._model
self._model = None
with open(path, "wb") as ofile:
dump(self, ofile)
self._model = model
def _load(self, a_path):
super(DLBaseAnalyzer, self)._load(a_path)
self._model = load_model(
os.path.join(a_path, self._model_path),
custom_objects=CUSTOM_OBJECTS
)
@abc.abstractmethod
def _init_nn(self):
"""Initialize neural network.
"""
raise NotImplementedError
def _extract_feats(self, a_tweet):
pass
def _start_training(self):
"""Prepare for training.
"""
self._trained = False
def _finish_training(self):
"""Finalize the trained network.
"""
self._logger.info("Finalizing network")
if self._lstsq or self._w2v:
emb_layer_idx = self._get_layer_idx()
if self._lstsq:
# Extract embeddings from the network
task_embs = self._model.layers[emb_layer_idx].get_weights()
assert len(task_embs) == 1, \
("Unmatching number of trained paramaters:"
" {:d} instead of {:d}").format(
len(task_embs), 1)
task_embs = task_embs[0]
# extract only embeddings of known words
START_IDX = UNK_IDX + 1
w2v_embs = self._embs
# Compute the least square matrix
self._logger.info("Computing transform matrix for"
" task-specific embeddings.")
self._lstsq_mtx, res, rank, _ = np.linalg.lstsq(
w2v_embs[START_IDX:], task_embs[START_IDX:]
)
self._logger.info("Transform matrix computed"
" (rank: %d, residuals: %f).",
rank, sum(res))
self._embs = task_embs
# pop embedding layer and modify the first layer coming after it to
# accept plaing embeddings as input
self._recompile_model(emb_layer_idx)
self._pad_value = self._embs[EMPTY_IDX]
self._logger.info("Network finalized")
self._trained = True
def _get_layer_idx(self):
"""Return the index of embedding layer in the model.
Args:
name (str): name of the layer (IGNORED)
Returns:
int: index of embedding layer
"""
return 0
def _recompile_model(self, emb_layer_idx):
"""Change model by removing the embedding layer and .
Args:
emb_layer_idx (int): index of the embedding layer
Returns:
void:
Note:
modifies `self._model` in place
"""
layers = self._model.layers
emb_layer = layers.pop(emb_layer_idx)
first_layer = layers.pop(emb_layer_idx)
layer_config = first_layer.get_config()
layer_config["input_shape"] = (None, emb_layer.output_dim)
new_layer = first_layer.__class__.from_config(
layer_config
)
new_layer.build((emb_layer.input_dim, emb_layer.output_dim))
new_layer.set_weights(first_layer.get_weights())
layers.insert(emb_layer_idx, new_layer)
self._model = self._model.__class__(layers=layers)
self._model.compile(**self._train_params)
def _init_wemb_funcs(self):
"""Initialize functions for obtaining word embeddings.
"""
if self.ndim < 0:
self.ndim = DFLT_VDIM
if self._w2v:
self._embeddings.load()
self.ndim = self._embeddings.ndim
self.init_w_emb = self._init_w2v_emb
self.get_train_w_emb_i = self._get_train_w2v_emb_i
if self._trained:
self.get_test_w_emb = self._get_test_w2v_emb
else:
self.get_test_w_emb = self._get_train_w2v_emb_i
elif self._lstsq:
self._embeddings.load()
self.ndim = self._embeddings.ndim
self.init_w_emb = self._init_w2v_emb
self.get_train_w_emb_i = self._get_train_w2v_emb_i
if self._trained:
self.get_test_w_emb = self._get_test_w2v_lstsq_emb
else:
self.get_test_w_emb = self._get_train_w2v_emb_i
else:
# checked
self.init_w_emb = self._init_w_emb
self.get_train_w_emb_i = self._get_train_w_emb_i
self.get_test_w_emb = self._get_test_w_emb_i
def _reset_funcs(self):
"""Set all compiled theano functions to None.
Note:
modifies instance variables in place
"""
self.get_train_w_emb_i = None
self.get_test_w_emb_i = None
self.init_w_emb = None
def _init_w_emb(self):
"""Initialize task-specific word embeddings.
"""
self.W_EMB = Embedding(len(self._w2i), self.ndim,
embeddings_initializer=DFLT_INITIALIZER,
embeddings_regularizer=l2(L2_COEFF))
def _init_w2v_emb(self):
"""Initialize word2vec embedding matrix.
"""
self._embeddings.load()
self.ndim = self._embeddings.ndim
self._embs = np.empty((len(self._w2i), self.ndim))
self._embs[EMPTY_IDX, :] *= 0
self._embs[UNK_IDX, :] = 1e-2 # prevent zeros in this row
for w, i in iteritems(self._w2i):
if i == EMPTY_IDX or i == UNK_IDX:
continue
self._embs[i] = self._embeddings[w]
# initialize custom keras layer
self.W_EMB = Word2Vec(self._embs, trainable=self._lstsq)
# We unload embeddings every time before the training to free more
# memory. Feel free to comment the line below, if you have plenty of
# RAM.
self._embeddings.unload()
def _get_train_w_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int:
embedding index of the given word
"""
a_word = normlex(a_word)
if a_word in self._w2i:
return self._w2i[a_word]
elif self._w_stat[a_word] < 2 and np.random.binomial(1, UNK_PROB):
return UNK_IDX
else:
i = self._w2i[a_word] = len(self._w2i)
return i
def _get_test_w_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int:
embedding index od the given word
"""
a_word = normlex(a_word)
return self._w2i.get(a_word, UNK_IDX)
def _get_train_w2v_emb_i(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
int: embedding index of the given word
"""
a_word = normlex(a_word)
if a_word in self._w2i:
return self._w2i[a_word]
elif a_word in self._embeddings:
i = self._w2i[a_word] = len(self._w2i)
return i
else:
return UNK_IDX
def _get_test_w2v_emb(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
np.array:
embedding of the input word
"""
a_word = normlex(a_word)
emb_i = self._w2i.get(a_word)
if emb_i is None:
if a_word in self._embeddings:
return self._embeddings[a_word]
return self._embs[UNK_IDX]
return self._embs[emb_i]
def _get_test_w2v_lstsq_emb(self, a_word):
"""Obtain embedding index for the given word.
Args:
a_word (str):
word whose embedding index should be retrieved
Returns:
np.array:
embedding of the input word
"""
a_word = normlex(a_word)
emb_i = self._w2i.get(a_word)
if emb_i is None:
if a_word in self._embeddings:
return np.dot(self._embeddings[a_word],
self._lstsq_mtx)
return self._embs[UNK_IDX]
return self._embs[emb_i]
def _prepare_data(self, train_x, train_y, dev_x, dev_y):
"""Provide train/test split and digitize the data.
"""
if not dev_x:
n = len(train_x)
n_dev = int(n / 15)
idcs = list(range(n))
np.random.shuffle(idcs)
def get_split(data, idcs):
return [data[i] for i in idcs]
dev_x = get_split(train_x, idcs[:n_dev])
dev_y = get_split(train_y, idcs[:n_dev])
train_x = get_split(train_x, idcs[n_dev:])
train_y = get_split(train_y, idcs[n_dev:])
# convert tweets to word indices
train_x, dev_x = self._digitize_data(train_x, dev_x)
self._n_y = len(set(train_y) | set(dev_y))
train_y = to_categorical(np.asarray(train_y))
dev_y = to_categorical(np.asarray(dev_y))
return (train_x, train_y, dev_x, dev_y)
def _compute_w_stat(self, train_x):
"""Compute word frequencies on the corpus.
Args:
train_x (list[list[str]]): training instances
Returns:
void:
Note:
modifies instance variables in place
"""
self._w_stat = Counter(w for t in train_x for w in t)
def _digitize_data(self, train_x, dev_x):
"""Convert sequences of words to sequences of word indices.
Args:
train_x (list[list[str]]): training set
dev_x (list[list[str]]): development set
Returns:
2-tuple[list, list]: digitized training and development sets
"""
train_x = [self._tweet2wseq(x) for x in train_x]
dev_x = [self._tweet2wseq(x) for x in dev_x]
self._compute_w_stat(train_x)
self._wseq2emb_ids(train_x, self.get_train_w_emb_i)
self._wseq2emb_ids(dev_x, self.get_test_w_emb)
train_x = self._pad_sequences(train_x)
dev_x = self._pad_sequences(dev_x)
return (train_x, dev_x)
def _pad(self, xlen, pad_value=EMPTY_IDX):
"""Add indices or vectors of empty words to match minimum filter length.
Args:
xlen (int): length of the input instance
"""
return [pad_value] * max(0, self._min_width - xlen)
def _pad_sequences(self, x):
"""Make all input instances of equal length.
Args:
x (list[np.array]): list of embedding indices
Returns:
x: list of embedding indices of equal lengths
"""
return pad_sequences(x)
def _tweet2wseq(self, msg):
"""Convert tweet to a sequence of word lemmas if these words are informative.
Args:
msg (cgsa.data.Tweet): input message
Return:
list: lemmas of informative words
"""
return [normlex(w.lemma)
for w in msg if is_relevant(w.form)]
def _wseq2emb_ids(self, data, w2i):
"""Convert sequence of words to embedding indices.
Args:
data (list[str]): list of input words
w2i (func): function to convert words to embedding indices
Return:
list[int]: list of embedding indices
"""
for i, inst_i in enumerate(data):
data[i] = np.asarray(
self._pad(len(inst_i))
+ [w2i(w) for w in inst_i], dtype="int32")
def _update_fit_params(self, train_y):
"""Add class weights to the training parameters.
Args:
train_y (list[np.array]): labels of training instances
Returns:
void:
Note:
modifies `self._train_params` in place
"""
return
y_labels = np.argmax(train_y, axis=-1)
class_weights = compute_class_weight("balanced",
np.unique(y_labels),
y_labels)
sample_weights = np.array([class_weights[y_i]
for y_i in y_labels])
self._fit_params["sample_weight"] = sample_weights
self._logger.debug("Class weights: %r", class_weights)
| [
"keras.regularizers.l2",
"os.remove",
"numpy.argmax",
"cgsa.utils.common.normlex",
"keras.preprocessing.sequence.pad_sequences",
"six.iteritems",
"os.path.join",
"numpy.unique",
"collections.Counter",
"numpy.random.shuffle",
"copy.deepcopy",
"numpy.random.binomial",
"os.path.basename",
"ke... | [((3673, 3700), 'copy.deepcopy', 'deepcopy', (['DFLT_TRAIN_PARAMS'], {}), '(DFLT_TRAIN_PARAMS)\n', (3681, 3700), False, 'from copy import deepcopy\n'), ((4716, 4763), 'tempfile.mkstemp', 'mkstemp', ([], {'suffix': '""".hdf5"""', 'prefix': "(self.name + '.')"}), "(suffix='.hdf5', prefix=self.name + '.')\n", (4723, 4763), False, 'from tempfile import mkstemp\n'), ((7489, 7517), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (7505, 7517), False, 'import os\n'), ((13046, 13066), 'six.iteritems', 'iteritems', (['self._w2i'], {}), '(self._w2i)\n', (13055, 13066), False, 'from six import iteritems\n'), ((13799, 13814), 'cgsa.utils.common.normlex', 'normlex', (['a_word'], {}), '(a_word)\n', (13806, 13814), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((14375, 14390), 'cgsa.utils.common.normlex', 'normlex', (['a_word'], {}), '(a_word)\n', (14382, 14390), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((14731, 14746), 'cgsa.utils.common.normlex', 'normlex', (['a_word'], {}), '(a_word)\n', (14738, 14746), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((15272, 15287), 'cgsa.utils.common.normlex', 'normlex', (['a_word'], {}), '(a_word)\n', (15279, 15287), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((15823, 15838), 'cgsa.utils.common.normlex', 'normlex', (['a_word'], {}), '(a_word)\n', (15830, 15838), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((17313, 17351), 'collections.Counter', 'Counter', (['(w for t in train_x for w in t)'], {}), '(w for t in train_x for w in t)\n', (17320, 17351), False, 'from collections import Counter\n'), ((18599, 18615), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x'], {}), '(x)\n', (18612, 18615), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((19766, 19793), 'numpy.argmax', 'np.argmax', (['train_y'], {'axis': '(-1)'}), '(train_y, axis=-1)\n', (19775, 19793), True, 'import numpy as np\n'), ((19997, 20047), 'numpy.array', 'np.array', (['[class_weights[y_i] for y_i in y_labels]'], {}), '([class_weights[y_i] for y_i in y_labels])\n', (20005, 20047), True, 'import numpy as np\n'), ((4802, 4838), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'verbose': '(1)'}), '(patience=3, verbose=1)\n', (4815, 4838), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((4864, 5006), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'ofname', 'monitor': '"""val_categorical_accuracy"""', 'mode': '"""auto"""', 'verbose': '(1)', 'save_weights_only': '(True)', 'save_best_only': '(True)'}), "(filepath=ofname, monitor='val_categorical_accuracy', mode=\n 'auto', verbose=1, save_weights_only=True, save_best_only=True)\n", (4879, 5006), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\n'), ((5952, 5969), 'os.remove', 'os.remove', (['ofname'], {}), '(ofname)\n', (5961, 5969), False, 'import os\n'), ((6278, 6296), 'numpy.asarray', 'np.asarray', (['[embs]'], {}), '([embs])\n', (6288, 6296), True, 'import numpy as np\n'), ((7658, 7675), '_pickle.dump', 'dump', (['self', 'ofile'], {}), '(self, ofile)\n', (7662, 7675), False, 'from _pickle import dump, load\n'), ((7830, 7868), 'os.path.join', 'os.path.join', (['a_path', 'self._model_path'], {}), '(a_path, self._model_path)\n', (7842, 7868), False, 'import os\n'), ((16384, 16407), 'numpy.random.shuffle', 'np.random.shuffle', (['idcs'], {}), '(idcs)\n', (16401, 16407), True, 'import numpy as np\n'), ((16899, 16918), 'numpy.asarray', 'np.asarray', (['train_y'], {}), '(train_y)\n', (16909, 16918), True, 'import numpy as np\n'), ((16951, 16968), 'numpy.asarray', 'np.asarray', (['dev_y'], {}), '(dev_y)\n', (16961, 16968), True, 'import numpy as np\n'), ((18887, 18903), 'cgsa.utils.common.normlex', 'normlex', (['w.lemma'], {}), '(w.lemma)\n', (18894, 18903), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((19896, 19915), 'numpy.unique', 'np.unique', (['y_labels'], {}), '(y_labels)\n', (19905, 19915), True, 'import numpy as np\n'), ((9210, 9270), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['w2v_embs[START_IDX:]', 'task_embs[START_IDX:]'], {}), '(w2v_embs[START_IDX:], task_embs[START_IDX:])\n', (9225, 9270), True, 'import numpy as np\n'), ((12682, 12694), 'keras.regularizers.l2', 'l2', (['L2_COEFF'], {}), '(L2_COEFF)\n', (12684, 12694), False, 'from keras.regularizers import l2\n'), ((13926, 13957), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'UNK_PROB'], {}), '(1, UNK_PROB)\n', (13944, 13957), True, 'import numpy as np\n'), ((15969, 16018), 'numpy.dot', 'np.dot', (['self._embeddings[a_word]', 'self._lstsq_mtx'], {}), '(self._embeddings[a_word], self._lstsq_mtx)\n', (15975, 16018), True, 'import numpy as np\n'), ((18936, 18955), 'cgsa.utils.common.is_relevant', 'is_relevant', (['w.form'], {}), '(w.form)\n', (18947, 18955), False, 'from cgsa.utils.common import LOGGER, is_relevant, normlex\n'), ((5143, 5184), 'os.environ.get', 'os.environ.get', (['"""TENSORBOARD_DIR"""', '"""/tmp"""'], {}), "('TENSORBOARD_DIR', '/tmp')\n", (5157, 5184), False, 'import os\n')] |
import model
import time
import numpy as np
def run_epoch(session, epoch_model, input_batches, target_batches, eval_op=None, verbose=False):
"""Runs the model on the given data.
:type epoch_model: model.WordRNN
"""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(epoch_model.initial_state)
fetches = {
"cost": epoch_model.cost,
"final_state": epoch_model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
epoch_size = len(input_batches)
for step in range(epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(epoch_model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
feed_dict[epoch_model.input_data] = input_batches[step]
feed_dict[epoch_model.targets] = target_batches[step]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += epoch_model.config.batch_size
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * epoch_model.config.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
| [
"numpy.exp",
"time.time"
] | [((247, 258), 'time.time', 'time.time', ([], {}), '()\n', (256, 258), False, 'import time\n'), ((1344, 1365), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (1350, 1365), True, 'import numpy as np\n'), ((1221, 1242), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (1227, 1242), True, 'import numpy as np\n'), ((1304, 1315), 'time.time', 'time.time', ([], {}), '()\n', (1313, 1315), False, 'import time\n')] |
"""
> Modules for processing training/validation data
> Maintainer: https://github.com/xahidbuffon
"""
import os
import glob
import random
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class GetTrainingPairs(Dataset):
""" Common data pipeline to organize and generate
training pairs for various datasets
"""
def __init__(self, root, dataset_name, transforms_=None):
self.transform = transforms.Compose(transforms_)
self.filesA, self.filesB, self.files_ann = self.get_file_paths(root, dataset_name)
self.len = min(len(self.filesA), len(self.filesB))
def __getitem__(self, index):
img_A = Image.open(self.filesA[index % self.len])
img_B = Image.open(self.filesB[index % self.len])
if np.random.random() < 0.5:
img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], "RGB")
img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], "RGB")
img_A = self.transform(img_A)
img_B = self.transform(img_B)
return {"A": img_A, "B": img_B}
def __len__(self):
return self.len
def get_file_paths(self, root, dataset_name):
if dataset_name=='EUVP':
filesA, filesB = [], []
sub_dirs = ['underwater_imagenet', 'underwater_dark', 'underwater_scenes']
for sd in sub_dirs:
filesA += sorted(glob.glob(os.path.join(root, sd, 'trainA') + "/*.*"))
filesB += sorted(glob.glob(os.path.join(root, sd, 'trainB') + "/*.*"))
elif dataset_name=='UFO-120':
filesA = sorted(glob.glob(os.path.join(root, 'lrd') + "/*.*"))
filesB = sorted(glob.glob(os.path.join(root, 'hr') + "/*.*"))
elif dataset_name=='Diver':
filesA, filesB, annotations = [], [], []
sub_dirs = ['underwater_imagenet', 'underwater_dark', 'underwater_scenes']
for sd in sub_dirs:
filesA += sorted(glob.glob(os.path.join(root, sd, 'trainA') + "/*.*"))
filesB += sorted(glob.glob(os.path.join(root, sd, 'trainB') + "/*.*"))
annotations += sorted(glob.glob(os.path.join(root, sd, 'train_annotations') + "/*.*"))
return filesA, filesB, annotations
class GetValImage(Dataset):
""" Common data pipeline to organize and generate
vaditaion samples for various datasets
"""
def __init__(self, root, dataset_name, transforms_=None, sub_dir='validation'):
self.transform = transforms.Compose(transforms_)
self.files = self.get_file_paths(root, dataset_name)
self.len = len(self.files)
def __getitem__(self, index):
img_val = Image.open(self.files[index % self.len])
img_val = self.transform(img_val)
return {"val": img_val}
def __len__(self):
return self.len
def get_file_paths(self, root, dataset_name):
if dataset_name=='EUVP':
files = []
sub_dirs = ['underwater_imagenet', 'underwater_dark', 'underwater_scenes']
for sd in sub_dirs:
files += sorted(glob.glob(os.path.join(root, sd, 'validation') + "/*.*"))
elif dataset_name=='UFO-120':
files = sorted(glob.glob(os.path.join(root, 'lrd') + "/*.*"))
return files
| [
"PIL.Image.open",
"numpy.random.random",
"torchvision.transforms.Compose",
"numpy.array",
"os.path.join"
] | [((498, 529), 'torchvision.transforms.Compose', 'transforms.Compose', (['transforms_'], {}), '(transforms_)\n', (516, 529), True, 'import torchvision.transforms as transforms\n'), ((731, 772), 'PIL.Image.open', 'Image.open', (['self.filesA[index % self.len]'], {}), '(self.filesA[index % self.len])\n', (741, 772), False, 'from PIL import Image\n'), ((789, 830), 'PIL.Image.open', 'Image.open', (['self.filesB[index % self.len]'], {}), '(self.filesB[index % self.len])\n', (799, 830), False, 'from PIL import Image\n'), ((2565, 2596), 'torchvision.transforms.Compose', 'transforms.Compose', (['transforms_'], {}), '(transforms_)\n', (2583, 2596), True, 'import torchvision.transforms as transforms\n'), ((2746, 2786), 'PIL.Image.open', 'Image.open', (['self.files[index % self.len]'], {}), '(self.files[index % self.len])\n', (2756, 2786), False, 'from PIL import Image\n'), ((842, 860), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (858, 860), True, 'import numpy as np\n'), ((904, 919), 'numpy.array', 'np.array', (['img_A'], {}), '(img_A)\n', (912, 919), True, 'import numpy as np\n'), ((976, 991), 'numpy.array', 'np.array', (['img_B'], {}), '(img_B)\n', (984, 991), True, 'import numpy as np\n'), ((1458, 1490), 'os.path.join', 'os.path.join', (['root', 'sd', '"""trainA"""'], {}), "(root, sd, 'trainA')\n", (1470, 1490), False, 'import os\n'), ((1545, 1577), 'os.path.join', 'os.path.join', (['root', 'sd', '"""trainB"""'], {}), "(root, sd, 'trainB')\n", (1557, 1577), False, 'import os\n'), ((1669, 1694), 'os.path.join', 'os.path.join', (['root', '"""lrd"""'], {}), "(root, 'lrd')\n", (1681, 1694), False, 'import os\n'), ((1748, 1772), 'os.path.join', 'os.path.join', (['root', '"""hr"""'], {}), "(root, 'hr')\n", (1760, 1772), False, 'import os\n'), ((3177, 3213), 'os.path.join', 'os.path.join', (['root', 'sd', '"""validation"""'], {}), "(root, sd, 'validation')\n", (3189, 3213), False, 'import os\n'), ((3300, 3325), 'os.path.join', 'os.path.join', (['root', '"""lrd"""'], {}), "(root, 'lrd')\n", (3312, 3325), False, 'import os\n'), ((2035, 2067), 'os.path.join', 'os.path.join', (['root', 'sd', '"""trainA"""'], {}), "(root, sd, 'trainA')\n", (2047, 2067), False, 'import os\n'), ((2122, 2154), 'os.path.join', 'os.path.join', (['root', 'sd', '"""trainB"""'], {}), "(root, sd, 'trainB')\n", (2134, 2154), False, 'import os\n'), ((2214, 2257), 'os.path.join', 'os.path.join', (['root', 'sd', '"""train_annotations"""'], {}), "(root, sd, 'train_annotations')\n", (2226, 2257), False, 'import os\n')] |
import json
import numpy as np
HTTPS_token = '<PASSWORD>'
RT_token = '<PASSWORD>'
at_token = '<PASSWORD>'
hashtag_token = '<PASSWORD>'
gt_token = '<PASSWORD>'
lt_token = '<PASSWORD>'
amp_token = '<PASSWORD>'
question_token = '<PASSWORD>'
esclamation_token = '<PASSWORD>'
period_token = '<PASSWORD>'
two_periods_token = '<PASSWORD>'
coma_token = '<PASSWORD>'
dollar_token = '<PASSWORD>'
period_coma_token = '<PASSWORD>'
parenthesis_open_token = '<PASSWORD>'
parenthesis_closed_token = '<PASSWORD>'
star_token = '<PASSWORD>'
slash_token = '<PASSWORD>'
line_token = '118'
underscore_token = '<PASSWORD>'
tilde_token = '<PASSWORD>'
virgolette_token = '<PASSWORD>'
square_parenthesis_open_token = '<PASSWORD>'
square_parenthesis_closed_token = '<PASSWORD>'
unk_token = '<PASSWORD>'
others_tokens = ['<PASSWORD>', '<PASSWORD>']
special_tokens_list = [
at_token,
hashtag_token,
gt_token,
lt_token,
amp_token,
question_token,
esclamation_token,
period_token,
coma_token,
dollar_token,
period_coma_token,
two_periods_token,
parenthesis_open_token,
parenthesis_closed_token,
star_token,
slash_token,
line_token,
underscore_token,
tilde_token,
virgolette_token,
square_parenthesis_open_token,
square_parenthesis_closed_token,
unk_token
]
special_tokens = {
'https': HTTPS_token,
'RT': RT_token,
'@': at_token,
'>': gt_token,
'<': lt_token,
'&': amp_token,
'?': question_token,
'!': esclamation_token,
'.': period_token,
':': two_periods_token,
'#': hashtag_token,
',': coma_token,
'$': dollar_token,
';': period_coma_token,
'(': parenthesis_open_token,
')': parenthesis_closed_token,
'*': star_token,
'/': slash_token,
'-': line_token,
'_': underscore_token,
'~': tilde_token,
'"': virgolette_token,
'[': square_parenthesis_open_token,
']': square_parenthesis_closed_token,
'[UNK]': unk_token
}
# convert list of strings to list of int
f_to_int = lambda x: int(x)
f_int = lambda x: list(map(f_to_int, x.split('\t')))
# save tweet_id along with its corresponding tokens
def save_tweet(identifier, text_tokens, output_file):
string = identifier + ',' + '\t'.join(map(str, text_tokens)) + '\n'
output_file.write(string)
# save mentions, hashtags or links
def save(identifier, text_tokens, text, mapped, count, output_file):
for i in range(len(text_tokens)):
text_tokens[i] = '\t'.join(map(str, text_tokens[i]))
# each mentions is separated by a ";"
# each token in a mention is separated by a "\t"
string = identifier + '\x01' + str(count) + '\x01' + ';'.join(text_tokens) + '\x01' + ''.join(text) + '\x01' + '\t'.join(map(str, mapped)) + '\n'
output_file.write(string)
def save_tweet_length(tweet_id, length, output_file):
output_file.write(tweet_id + ',' + str(length) + '\n')
def split_line(l):
l = l.split(',')
t_id = l[0]
t_list = l[1].split('\t') # replace("\\n",'').replace("\\t",'\t')
return t_id, t_list
def load_mapping(path):
with open(path, 'r') as json_file:
mapping_dict = json.loads(json_file.read())
key, current_mapping = max(mapping_dict.items(), key = lambda x: x[1])
print("Loaded mapping : ", path)
return mapping_dict, current_mapping, key
def save_mapping(path, mapping_dict):
json_mapping = json.dumps(mapping_dict)
with open(path, 'w+') as f:
f.write(json_mapping)
print("Saved mapping : ", path)
def map_to_unique_ids(_list, _dict, _current_mapping):
mapped = []
for elem in _list:
if elem not in _dict:
_dict[elem] = _current_mapping
_current_mapping += 1
mapped.append(_dict[elem])
return mapped, _current_mapping
def read_sentences(input_file, lines_num, header_first_line):
tweet_ids = []
sentences = []
row = 0
with open(input_file, "r", encoding='utf-8') as reader:
if header_first_line:
reader.readline() # ignore the first line since it contains the CSV header
while True:
#if row % 100000 == 0:
# print("\tReading line: ", row)
if row == lines_num:
print("\tLines : ", row)
return tweet_ids, sentences
line = reader.readline()
if not line:
break
# produce embeddings not for all the rows
#if row % 100 == 0:
line = line.strip().split(',')
tweet_id = line[0]
input_ids = f_int(line[1])
tweet_ids.append(tweet_id)
sentences.append(input_ids)
row += 1
print("\tLines : ", row)
return tweet_ids, sentences
# to reconstruct hashtags and mentions text
# return a single string without spaces
# @param _list : list containing lists of tokens (one list per each hashtag/mention)
def decode_hashtags_mentions(tokenizer, _list):
strings_list = []
for elem in _list:
elem = tokenizer.decode(elem)
elem = elem.replace(' ', '')
strings_list.append(elem) # otherwise last string not added
return strings_list
def replace_escaped_chars(line):
gt_string = "111\t175\t10123\t132" # "& gt ;"
lt_string = "111\t43900\t132" # "& lt ;"
amp_string = "111\t10392\t10410\t132" # "& amp ;"
if gt_string in line:
line = line.replace(gt_string, gt_token)
if lt_string in line:
line = line.replace(lt_string, lt_token)
if amp_string in line:
line = line.replace(amp_string, amp_token)
return line
# return text_tokens, mentions_list, mentions_count
# in case the tweet is a retweet
def get_RT_mentions(tokens, mentions):
length = len(tokens)-1
i = 2 # exclude CLS and the 56898 ('RT') token
while tokens[i] != special_tokens[':'] and i < length:
i += 1
#print('i: ' + str(i))
mentions.append(tokens[2:i])
#mentions.append('102\n') # append SEP \n
tokens = tokens[i+1:]
tokens.insert(0, '101') # insert CLS at beginning
return tokens, mentions
def get_remove_mentions_hashtags(tokenizer, tokens, mentions, hashtags):
found_initial = False
mask = []
initial_index = 0
final_index = 0
is_mention = False
for i in range(len(tokens)):
t = tokens[i]
if found_initial and i == initial_index+1:
mask.append(False)
elif found_initial and i > initial_index+1:
decoded_t = tokenizer.convert_tokens_to_strings([t])[0]
if '##' in decoded_t:
mask.append(False)
elif '_' == decoded_t:
mask.append(False)
elif tokenizer.convert_tokens_to_strings([tokens[i-1]])[0] == '_':
mask.append(False)
else:
final_index = i
if is_mention:
mentions.append(tokens[initial_index:final_index])
else:
hashtags.append(tokens[initial_index:final_index])
found_initial = False
# mask.append(True)
if not found_initial and (t == special_tokens['@'] or t == special_tokens['#']):
if t == special_tokens['@']:
is_mention = True
elif t == special_tokens['#']:
is_mention = False
initial_index = i
found_initial = True
mask.append(False)
elif not found_initial:
mask.append(True)
#print(decoded_t)
tokens_arr = np.array(tokens)
tokens_arr = tokens_arr[mask]
tokens = tokens_arr.tolist()
return tokens, mentions, hashtags
def get_remove_links(tokenizer, tokens):
links_strings = []
encoded_links = []
if special_tokens['https'] in tokens:
decoded_tokens = tokenizer.decode(tokens).split(' ')
mask = []
length = len(decoded_tokens)
finished = False
index = 0
i = 0
while i < length and not finished:
dec_t = decoded_tokens[i]
if dec_t == 'https':
try:
index = i + 7
links_strings.append(decoded_tokens[index])
for j in range(8):
mask.append(False) # link splittato in 8 elementi tutti da rimuovere
i += 8
#print(initial_index, final_index)
except:
#print(decoded_tokens)
#print(i)
#print(index)
#print(decoded_tokens[i])
for j in range(i, length):
mask.append(False)
finished = True
else:
mask.append(True)
i += 1
#print(decoded_tokens)
#print(len(decoded_tokens))
#print(mask)
#print(len(mask))
tokens_arr = np.array(decoded_tokens)
tokens_arr = tokens_arr[mask]
decoded_tokens = tokens_arr.tolist()
tokens = tokenizer.encode(' '.join(decoded_tokens))
tokens = tokens[1:-1] # ignore CLS ans SEP (they are duplicated)
#print(tokenizer.decode(tokens))
# encode only the last string in each link (ignore the "https://t.co")
for l in range(len(links_strings)):
if links_strings[l] == '[SEP]':
links_strings.pop(l)
else:
links_strings[l] = links_strings[l].replace(',', '').replace('.','') # a link cointained ',' (in un csv spostava tutte le colonne)
enc_l = tokenizer.encode(links_strings[l])
encoded_links.append(enc_l[1:-1]) # ignore CLS ans SEP
#print(links_strings)
else:
tokens[-1] = '102' # remove the last "\n" (tokens ends with "102\n")
return tokens, encoded_links, links_strings
def reduce_num_special_tokens(tokens_list):
for special_token in special_tokens_list:
if special_token in tokens_list:
#print('special token: ' + special_token)
count = 0
index = 0
old_token = '101'
for token in tokens_list:
if token != old_token and count > 1:
#print('index: ' + str(index))
#print('count: ' + str(count))
#print('current token: ' + token)
#print('old token: ' + old_token)
tokens_list[index:index+count-1] = [special_token]
count = 0
elif token == special_token:
count += 1
else:
index += 1
count = 0
old_token = token
return tokens_list | [
"numpy.array",
"json.dumps"
] | [((3448, 3472), 'json.dumps', 'json.dumps', (['mapping_dict'], {}), '(mapping_dict)\n', (3458, 3472), False, 'import json\n'), ((7881, 7897), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (7889, 7897), True, 'import numpy as np\n'), ((9308, 9332), 'numpy.array', 'np.array', (['decoded_tokens'], {}), '(decoded_tokens)\n', (9316, 9332), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Showcases gamut section plotting examples.
"""
import numpy as np
from matplotlib.lines import Line2D
import colour.plotting
from colour.plotting import (
colour_style,
plot_visible_spectrum_section,
plot_RGB_colourspace_section,
)
from colour.utilities import message_box
message_box('Gamut Section Plots')
colour_style()
message_box('Plotting a "Visible Spectrum" section at 50% "Lightness" in the '
'"CIE Luv" colourspace.')
plot_visible_spectrum_section(
model='CIE Luv',
origin=0.5,
)
print('\n')
message_box('Plotting a "Visible Spectrum" section at 50% "Lightness" in the '
'"CIE Luv" colourspace and customising the section styling.')
plot_visible_spectrum_section(
model='CIE Luv', origin=0.5, section_colours='RGB', section_opacity=0.15)
print('\n')
message_box('Plotting a "Visible Spectrum" section at 50% "Lightness" in the '
'"CIE Luv" colourspace.')
plot_visible_spectrum_section(model='CIE Luv', origin=0.5)
print('\n')
message_box(
'Plotting a "Visible Spectrum" section at 25% along the "u" axis in the '
'"CIE Luv" colourspace.')
plot_visible_spectrum_section(
model='CIE Luv',
axis='+x',
origin=0.25,
section_colours='RGB',
section_opacity=0.15)
print('\n')
message_box('Plotting a "sRGB" colourspace section at 50% "Lightness" in the '
'"ICtCp" colourspace using section normalisation.')
plot_RGB_colourspace_section(
colourspace='sRGB', model='ICtCp', origin=0.5, normalise=True)
print('\n')
message_box(
'Combining multiple hull sections together at 25% "Lightness" in the '
'"Oklab" colourspace.')
figure, axes = plot_visible_spectrum_section(
model='Oklab', origin=0.25, section_opacity=0.15, standalone=False)
plot_RGB_colourspace_section(
colourspace='sRGB',
model='Oklab',
origin=0.25,
section_colours='RGB',
section_opacity=0.15,
contour_colours='RGB',
axes=axes)
print('\n')
message_box(
'Combining multiple hull sections together at varying "Lightness" in the '
'"DIN99" colourspace.')
figure, axes = plot_visible_spectrum_section(
model='DIN99', origin=0.5, section_opacity=0.15, standalone=False)
bounding_box = [
axes.get_xlim()[0],
axes.get_xlim()[1],
axes.get_ylim()[0],
axes.get_ylim()[1]
]
section_colours = colour.notation.HEX_to_RGB(
colour.plotting.CONSTANTS_COLOUR_STYLE.colour.cycle[:4])
origins = []
legend_lines = []
for i, RGB in zip(np.arange(0.5, 0.9, 0.1), section_colours):
origins.append(i * 100)
plot_RGB_colourspace_section(
colourspace='sRGB',
model='DIN99',
origin=i,
section_colours=RGB,
section_opacity=0.15,
contour_colours=RGB,
axes=axes,
standalone=False)
legend_lines.append(
Line2D([0], [0], color=RGB, label='{0}%'.format(i * 100)))
axes.legend(handles=legend_lines)
colour.plotting.render(
title='Visible Spectrum - 50% - sRGB Sections - {0}% - DIN99'.format(
origins),
axes=axes,
bounding_box=bounding_box)
| [
"colour.utilities.message_box",
"colour.plotting.colour_style",
"colour.plotting.plot_visible_spectrum_section",
"numpy.arange",
"colour.plotting.plot_RGB_colourspace_section"
] | [((316, 350), 'colour.utilities.message_box', 'message_box', (['"""Gamut Section Plots"""'], {}), "('Gamut Section Plots')\n", (327, 350), False, 'from colour.utilities import message_box\n'), ((352, 366), 'colour.plotting.colour_style', 'colour_style', ([], {}), '()\n', (364, 366), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((368, 479), 'colour.utilities.message_box', 'message_box', (['"""Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace."""'], {}), '(\n \'Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace.\'\n )\n', (379, 479), False, 'from colour.utilities import message_box\n'), ((486, 544), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""CIE Luv"""', 'origin': '(0.5)'}), "(model='CIE Luv', origin=0.5)\n", (515, 544), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((570, 717), 'colour.utilities.message_box', 'message_box', (['"""Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace and customising the section styling."""'], {}), '(\n \'Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace and customising the section styling.\'\n )\n', (581, 717), False, 'from colour.utilities import message_box\n'), ((724, 832), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""CIE Luv"""', 'origin': '(0.5)', 'section_colours': '"""RGB"""', 'section_opacity': '(0.15)'}), "(model='CIE Luv', origin=0.5, section_colours=\n 'RGB', section_opacity=0.15)\n", (753, 832), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((847, 958), 'colour.utilities.message_box', 'message_box', (['"""Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace."""'], {}), '(\n \'Plotting a "Visible Spectrum" section at 50% "Lightness" in the "CIE Luv" colourspace.\'\n )\n', (858, 958), False, 'from colour.utilities import message_box\n'), ((965, 1023), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""CIE Luv"""', 'origin': '(0.5)'}), "(model='CIE Luv', origin=0.5)\n", (994, 1023), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((1038, 1156), 'colour.utilities.message_box', 'message_box', (['"""Plotting a "Visible Spectrum" section at 25% along the "u" axis in the "CIE Luv" colourspace."""'], {}), '(\n \'Plotting a "Visible Spectrum" section at 25% along the "u" axis in the "CIE Luv" colourspace.\'\n )\n', (1049, 1156), False, 'from colour.utilities import message_box\n'), ((1160, 1279), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""CIE Luv"""', 'axis': '"""+x"""', 'origin': '(0.25)', 'section_colours': '"""RGB"""', 'section_opacity': '(0.15)'}), "(model='CIE Luv', axis='+x', origin=0.25,\n section_colours='RGB', section_opacity=0.15)\n", (1189, 1279), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((1311, 1448), 'colour.utilities.message_box', 'message_box', (['"""Plotting a "sRGB" colourspace section at 50% "Lightness" in the "ICtCp" colourspace using section normalisation."""'], {}), '(\n \'Plotting a "sRGB" colourspace section at 50% "Lightness" in the "ICtCp" colourspace using section normalisation.\'\n )\n', (1322, 1448), False, 'from colour.utilities import message_box\n'), ((1455, 1550), 'colour.plotting.plot_RGB_colourspace_section', 'plot_RGB_colourspace_section', ([], {'colourspace': '"""sRGB"""', 'model': '"""ICtCp"""', 'origin': '(0.5)', 'normalise': '(True)'}), "(colourspace='sRGB', model='ICtCp', origin=0.5,\n normalise=True)\n", (1483, 1550), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((1566, 1679), 'colour.utilities.message_box', 'message_box', (['"""Combining multiple hull sections together at 25% "Lightness" in the "Oklab" colourspace."""'], {}), '(\n \'Combining multiple hull sections together at 25% "Lightness" in the "Oklab" colourspace.\'\n )\n', (1577, 1679), False, 'from colour.utilities import message_box\n'), ((1698, 1800), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""Oklab"""', 'origin': '(0.25)', 'section_opacity': '(0.15)', 'standalone': '(False)'}), "(model='Oklab', origin=0.25, section_opacity=\n 0.15, standalone=False)\n", (1727, 1800), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((1801, 1964), 'colour.plotting.plot_RGB_colourspace_section', 'plot_RGB_colourspace_section', ([], {'colourspace': '"""sRGB"""', 'model': '"""Oklab"""', 'origin': '(0.25)', 'section_colours': '"""RGB"""', 'section_opacity': '(0.15)', 'contour_colours': '"""RGB"""', 'axes': 'axes'}), "(colourspace='sRGB', model='Oklab', origin=0.25,\n section_colours='RGB', section_opacity=0.15, contour_colours='RGB',\n axes=axes)\n", (1829, 1964), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((2000, 2117), 'colour.utilities.message_box', 'message_box', (['"""Combining multiple hull sections together at varying "Lightness" in the "DIN99" colourspace."""'], {}), '(\n \'Combining multiple hull sections together at varying "Lightness" in the "DIN99" colourspace.\'\n )\n', (2011, 2117), False, 'from colour.utilities import message_box\n'), ((2136, 2237), 'colour.plotting.plot_visible_spectrum_section', 'plot_visible_spectrum_section', ([], {'model': '"""DIN99"""', 'origin': '(0.5)', 'section_opacity': '(0.15)', 'standalone': '(False)'}), "(model='DIN99', origin=0.5, section_opacity=\n 0.15, standalone=False)\n", (2165, 2237), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n'), ((2511, 2535), 'numpy.arange', 'np.arange', (['(0.5)', '(0.9)', '(0.1)'], {}), '(0.5, 0.9, 0.1)\n', (2520, 2535), True, 'import numpy as np\n'), ((2587, 2762), 'colour.plotting.plot_RGB_colourspace_section', 'plot_RGB_colourspace_section', ([], {'colourspace': '"""sRGB"""', 'model': '"""DIN99"""', 'origin': 'i', 'section_colours': 'RGB', 'section_opacity': '(0.15)', 'contour_colours': 'RGB', 'axes': 'axes', 'standalone': '(False)'}), "(colourspace='sRGB', model='DIN99', origin=i,\n section_colours=RGB, section_opacity=0.15, contour_colours=RGB, axes=\n axes, standalone=False)\n", (2615, 2762), False, 'from colour.plotting import colour_style, plot_visible_spectrum_section, plot_RGB_colourspace_section\n')] |
import sys
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="filename", help="f0 file", metavar="FILE")
args = parser.parse_args()
if args.filename is not None:
with open(args.filename, 'r') as f:
data = np.loadtxt(f, dtype=np.float32)
else:
data = np.loadtxt(sys.stdin, dtype=np.float32)
v = data[data >= -100]
data[data >= -100] = np.power(np.e, v)
data[data < -100] = 0
np.savetxt(sys.stdout, data, fmt='%.10f')
| [
"numpy.loadtxt",
"numpy.power",
"numpy.savetxt",
"argparse.ArgumentParser"
] | [((109, 125), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (123, 125), False, 'from argparse import ArgumentParser\n'), ((458, 475), 'numpy.power', 'np.power', (['np.e', 'v'], {}), '(np.e, v)\n', (466, 475), True, 'import numpy as np\n'), ((498, 539), 'numpy.savetxt', 'np.savetxt', (['sys.stdout', 'data'], {'fmt': '"""%.10f"""'}), "(sys.stdout, data, fmt='%.10f')\n", (508, 539), True, 'import numpy as np\n'), ((373, 412), 'numpy.loadtxt', 'np.loadtxt', (['sys.stdin'], {'dtype': 'np.float32'}), '(sys.stdin, dtype=np.float32)\n', (383, 412), True, 'import numpy as np\n'), ((324, 355), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'dtype': 'np.float32'}), '(f, dtype=np.float32)\n', (334, 355), True, 'import numpy as np\n')] |
from typing import List, Tuple, Dict, Any, Optional, Union, Callable
import copy
from datetime import datetime
from pathos.multiprocessing import ProcessingPool as Pool
import numpy as np
from scipy.special import expit
import matplotlib.pyplot as plt
import matplotlib.cm as plt_cm
import matplotlib.colors as plt_colors
import plotly.graph_objects as go
import gym
from gym import spaces
from gym.utils import seeding
from ..simulator import MtSimulator, OrderType
class MtEnv(gym.Env):
metadata = {'render.modes': ['human', 'simple_figure', 'advanced_figure']}
def __init__(
self, original_simulator: MtSimulator, trading_symbols: List[str],
window_size: int, time_points: Optional[List[datetime]]=None,
hold_threshold: float=0.5, close_threshold: float=0.5,
fee: Union[float, Callable[[str], float]]=0.0005,
symbol_max_orders: int=1, multiprocessing_processes: Optional[int]=None
) -> None:
# validations
assert len(original_simulator.symbols_data) > 0, "no data available"
assert len(original_simulator.symbols_info) > 0, "no data available"
assert len(trading_symbols) > 0, "no trading symbols provided"
assert 0. <= hold_threshold <= 1., "'hold_threshold' must be in range [0., 1.]"
if not original_simulator.hedge:
symbol_max_orders = 1
for symbol in trading_symbols:
assert symbol in original_simulator.symbols_info, f"symbol '{symbol}' not found"
currency_profit = original_simulator.symbols_info[symbol].currency_profit
assert original_simulator._get_unit_symbol_info(currency_profit) is not None, \
f"unit symbol for '{currency_profit}' not found"
if time_points is None:
time_points = original_simulator.symbols_data[trading_symbols[0]].index.to_pydatetime().tolist()
assert len(time_points) > window_size, "not enough time points provided"
# attributes
self.seed()
self.original_simulator = original_simulator
self.trading_symbols = trading_symbols
self.window_size = window_size
self.time_points = time_points
self.hold_threshold = hold_threshold
self.close_threshold = close_threshold
self.fee = fee
self.symbol_max_orders = symbol_max_orders
self.multiprocessing_pool = Pool(multiprocessing_processes) if multiprocessing_processes else None
self.prices = self._get_prices()
self.signal_features = self._process_data()
self.features_shape = (window_size, self.signal_features.shape[1])
# spaces
self.action_space = spaces.Box(
low=-np.inf, high=np.inf,
shape=(len(self.trading_symbols) * (self.symbol_max_orders + 2),)
) # symbol -> [close_order_i(logit), hold(logit), volume]
self.observation_space = spaces.Dict({
'balance': spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
'equity': spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
'margin': spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
'features': spaces.Box(low=-np.inf, high=np.inf, shape=self.features_shape),
'orders': spaces.Box(
low=-np.inf, high=np.inf,
shape=(len(self.trading_symbols), self.symbol_max_orders, 3)
) # symbol, order_i -> [entry_price, volume, profit]
})
# episode
self._start_tick = self.window_size - 1
self._end_tick = len(self.time_points) - 1
self._done: bool = NotImplemented
self._current_tick: int = NotImplemented
self.simulator: MtSimulator = NotImplemented
self.history: List[Dict[str, Any]] = NotImplemented
def seed(self, seed: Optional[int]=None) -> List[int]:
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self) -> Dict[str, np.ndarray]:
self._done = False
self._current_tick = self._start_tick
self.simulator = copy.deepcopy(self.original_simulator)
self.simulator.current_time = self.time_points[self._current_tick]
self.history = [self._create_info()]
return self._get_observation()
def step(self, action: np.ndarray) -> Tuple[Dict[str, np.ndarray], float, bool, Dict[str, Any]]:
orders_info, closed_orders_info = self._apply_action(action)
self._current_tick += 1
if self._current_tick == self._end_tick:
self._done = True
dt = self.time_points[self._current_tick] - self.time_points[self._current_tick - 1]
self.simulator.tick(dt)
step_reward = self._calculate_reward()
info = self._create_info(
orders=orders_info, closed_orders=closed_orders_info, step_reward=step_reward
)
observation = self._get_observation()
self.history.append(info)
return observation, step_reward, self._done, info
def _apply_action(self, action: np.ndarray) -> Tuple[Dict, Dict]:
orders_info = {}
closed_orders_info = {symbol: [] for symbol in self.trading_symbols}
k = self.symbol_max_orders + 2
for i, symbol in enumerate(self.trading_symbols):
symbol_action = action[k*i:k*(i+1)]
close_orders_logit = symbol_action[:-2]
hold_logit = symbol_action[-2]
volume = symbol_action[-1]
close_orders_probability = expit(close_orders_logit)
hold_probability = expit(hold_logit)
hold = bool(hold_probability > self.hold_threshold)
modified_volume = self._get_modified_volume(symbol, volume)
symbol_orders = self.simulator.symbol_orders(symbol)
orders_to_close_index = np.where(
close_orders_probability[:len(symbol_orders)] > self.close_threshold
)[0]
orders_to_close = np.array(symbol_orders)[orders_to_close_index]
for j, order in enumerate(orders_to_close):
self.simulator.close_order(order)
closed_orders_info[symbol].append(dict(
order_id=order.id, symbol=order.symbol, order_type=order.type,
volume=order.volume, fee=order.fee,
margin=order.margin, profit=order.profit,
close_probability=close_orders_probability[orders_to_close_index][j],
))
orders_capacity = self.symbol_max_orders - (len(symbol_orders) - len(orders_to_close))
orders_info[symbol] = dict(
order_id=None, symbol=symbol, hold_probability=hold_probability,
hold=hold, volume=volume, capacity=orders_capacity, order_type=None,
modified_volume=modified_volume, fee=float('nan'), margin=float('nan'),
error='',
)
if self.simulator.hedge and orders_capacity == 0:
orders_info[symbol].update(dict(
error="cannot add more orders"
))
elif not hold:
order_type = OrderType.Buy if volume > 0. else OrderType.Sell
fee = self.fee if type(self.fee) is float else self.fee(symbol)
try:
order = self.simulator.create_order(order_type, symbol, modified_volume, fee)
new_info = dict(
order_id=order.id, order_type=order_type,
fee=fee, margin=order.margin,
)
except ValueError as e:
new_info = dict(error=str(e))
orders_info[symbol].update(new_info)
return orders_info, closed_orders_info
def _get_prices(self, keys: List[str]=['Close', 'Open']) -> Dict[str, np.ndarray]:
prices = {}
for symbol in self.trading_symbols:
get_price_at = lambda time: \
self.original_simulator.price_at(symbol, time)[keys]
if self.multiprocessing_pool is None:
p = list(map(get_price_at, self.time_points))
else:
p = self.multiprocessing_pool.map(get_price_at, self.time_points)
prices[symbol] = np.array(p)
return prices
def _process_data(self) -> np.ndarray:
data = self.prices
signal_features = np.column_stack(list(data.values()))
return signal_features
def _get_observation(self) -> Dict[str, np.ndarray]:
features = self.signal_features[(self._current_tick-self.window_size+1):(self._current_tick+1)]
orders = np.zeros(self.observation_space['orders'].shape)
for i, symbol in enumerate(self.trading_symbols):
symbol_orders = self.simulator.symbol_orders(symbol)
for j, order in enumerate(symbol_orders):
orders[i, j] = [order.entry_price, order.volume, order.profit]
observation = {
'balance': np.array([self.simulator.balance]),
'equity': np.array([self.simulator.equity]),
'margin': np.array([self.simulator.margin]),
'features': features,
'orders': orders,
}
return observation
def _calculate_reward(self) -> float:
prev_equity = self.history[-1]['equity']
current_equity = self.simulator.equity
step_reward = current_equity - prev_equity
return step_reward
def _create_info(self, **kwargs: Any) -> Dict[str, Any]:
info = {k: v for k, v in kwargs.items()}
info['balance'] = self.simulator.balance
info['equity'] = self.simulator.equity
info['margin'] = self.simulator.margin
info['free_margin'] = self.simulator.free_margin
info['margin_level'] = self.simulator.margin_level
return info
def _get_modified_volume(self, symbol: str, volume: float) -> float:
si = self.simulator.symbols_info[symbol]
v = abs(volume)
v = np.clip(v, si.volume_min, si.volume_max)
v = round(v / si.volume_step) * si.volume_step
return v
def render(self, mode: str='human', **kwargs: Any) -> Any:
if mode == 'simple_figure':
return self._render_simple_figure(**kwargs)
if mode == 'advanced_figure':
return self._render_advanced_figure(**kwargs)
return self.simulator.get_state(**kwargs)
def _render_simple_figure(
self, figsize: Tuple[float, float]=(14, 6), return_figure: bool=False
) -> Any:
fig, ax = plt.subplots(figsize=figsize, facecolor='white')
cmap_colors = np.array(plt_cm.tab10.colors)[[0, 1, 4, 5, 6, 8]]
cmap = plt_colors.LinearSegmentedColormap.from_list('mtsim', cmap_colors)
symbol_colors = cmap(np.linspace(0, 1, len(self.trading_symbols)))
for j, symbol in enumerate(self.trading_symbols):
close_price = self.prices[symbol][:, 0]
symbol_color = symbol_colors[j]
ax.plot(self.time_points, close_price, c=symbol_color, marker='.', label=symbol)
buy_ticks = []
buy_error_ticks = []
sell_ticks = []
sell_error_ticks = []
close_ticks = []
for i in range(1, len(self.history)):
tick = self._start_tick + i - 1
order = self.history[i]['orders'].get(symbol, {})
if order and not order['hold']:
if order['order_type'] == OrderType.Buy:
if order['error']:
buy_error_ticks.append(tick)
else:
buy_ticks.append(tick)
else:
if order['error']:
sell_error_ticks.append(tick)
else:
sell_ticks.append(tick)
closed_orders = self.history[i]['closed_orders'].get(symbol, [])
if len(closed_orders) > 0:
close_ticks.append(tick)
tp = np.array(self.time_points)
ax.plot(tp[buy_ticks], close_price[buy_ticks], '^', color='green')
ax.plot(tp[buy_error_ticks], close_price[buy_error_ticks], '^', color='gray')
ax.plot(tp[sell_ticks], close_price[sell_ticks], 'v', color='red')
ax.plot(tp[sell_error_ticks], close_price[sell_error_ticks], 'v', color='gray')
ax.plot(tp[close_ticks], close_price[close_ticks], '|', color='black')
ax.tick_params(axis='y', labelcolor=symbol_color)
ax.yaxis.tick_left()
if j < len(self.trading_symbols) - 1:
ax = ax.twinx()
fig.suptitle(
f"Balance: {self.simulator.balance:.6f} {self.simulator.unit} ~ "
f"Equity: {self.simulator.equity:.6f} ~ "
f"Margin: {self.simulator.margin:.6f} ~ "
f"Free Margin: {self.simulator.free_margin:.6f} ~ "
f"Margin Level: {self.simulator.margin_level:.6f}"
)
fig.legend(loc='right')
if return_figure:
return fig
plt.show()
def _render_advanced_figure(
self, figsize: Tuple[float, float]=(1400, 600), time_format: str="%Y-%m-%d %H:%m",
return_figure: bool=False
) -> Any:
fig = go.Figure()
cmap_colors = np.array(plt_cm.tab10.colors)[[0, 1, 4, 5, 6, 8]]
cmap = plt_colors.LinearSegmentedColormap.from_list('mtsim', cmap_colors)
symbol_colors = cmap(np.linspace(0, 1, len(self.trading_symbols)))
get_color_string = lambda color: "rgba(%s, %s, %s, %s)" % tuple(color)
extra_info = [
f"balance: {h['balance']:.6f} {self.simulator.unit}<br>"
f"equity: {h['equity']:.6f}<br>"
f"margin: {h['margin']:.6f}<br>"
f"free margin: {h['free_margin']:.6f}<br>"
f"margin level: {h['margin_level']:.6f}"
for h in self.history
]
extra_info = [extra_info[0]] * (self.window_size - 1) + extra_info
for j, symbol in enumerate(self.trading_symbols):
close_price = self.prices[symbol][:, 0]
symbol_color = symbol_colors[j]
fig.add_trace(
go.Scatter(
x=self.time_points,
y=close_price,
mode='lines+markers',
line_color=get_color_string(symbol_color),
opacity=1.0,
hovertext=extra_info,
name=symbol,
yaxis=f'y{j+1}',
legendgroup=f'g{j+1}',
),
)
fig.update_layout(**{
f'yaxis{j+1}': dict(
tickfont=dict(color=get_color_string(symbol_color * [1, 1, 1, 0.8])),
overlaying='y' if j > 0 else None,
# position=0.035*j
),
})
trade_ticks = []
trade_markers = []
trade_colors = []
trade_sizes = []
trade_extra_info = []
trade_max_volume = max([
h.get('orders', {}).get(symbol, {}).get('modified_volume') or 0
for h in self.history
])
close_ticks = []
close_extra_info = []
for i in range(1, len(self.history)):
tick = self._start_tick + i - 1
order = self.history[i]['orders'].get(symbol)
if order and not order['hold']:
marker = None
color = None
size = 8 + 22 * (order['modified_volume'] / trade_max_volume)
info = (
f"order id: {order['order_id'] or ''}<br>"
f"hold probability: {order['hold_probability']:.4f}<br>"
f"hold: {order['hold']}<br>"
f"volume: {order['volume']:.6f}<br>"
f"modified volume: {order['modified_volume']:.4f}<br>"
f"fee: {order['fee']:.6f}<br>"
f"margin: {order['margin']:.6f}<br>"
f"error: {order['error']}"
)
if order['order_type'] == OrderType.Buy:
marker = 'triangle-up'
color = 'gray' if order['error'] else 'green'
else:
marker = 'triangle-down'
color = 'gray' if order['error'] else 'red'
trade_ticks.append(tick)
trade_markers.append(marker)
trade_colors.append(color)
trade_sizes.append(size)
trade_extra_info.append(info)
closed_orders = self.history[i]['closed_orders'].get(symbol, [])
if len(closed_orders) > 0:
info = []
for order in closed_orders:
info_i = (
f"order id: {order['order_id']}<br>"
f"order type: {order['order_type'].name}<br>"
f"close probability: {order['close_probability']:.4f}<br>"
f"margin: {order['margin']:.6f}<br>"
f"profit: {order['profit']:.6f}"
)
info.append(info_i)
info = '<br>---------------------------------<br>'.join(info)
close_ticks.append(tick)
close_extra_info.append(info)
fig.add_trace(
go.Scatter(
x=np.array(self.time_points)[trade_ticks],
y=close_price[trade_ticks],
mode='markers',
hovertext=trade_extra_info,
marker_symbol=trade_markers,
marker_color=trade_colors,
marker_size=trade_sizes,
name=symbol,
yaxis=f'y{j+1}',
showlegend=False,
legendgroup=f'g{j+1}',
),
)
fig.add_trace(
go.Scatter(
x=np.array(self.time_points)[close_ticks],
y=close_price[close_ticks],
mode='markers',
hovertext=close_extra_info,
marker_symbol='line-ns',
marker_color='black',
marker_size=7,
marker_line_width=1.5,
name=symbol,
yaxis=f'y{j+1}',
showlegend=False,
legendgroup=f'g{j+1}',
),
)
title = (
f"Balance: {self.simulator.balance:.6f} {self.simulator.unit} ~ "
f"Equity: {self.simulator.equity:.6f} ~ "
f"Margin: {self.simulator.margin:.6f} ~ "
f"Free Margin: {self.simulator.free_margin:.6f} ~ "
f"Margin Level: {self.simulator.margin_level:.6f}"
)
fig.update_layout(
title=title,
xaxis_tickformat=time_format,
width=figsize[0],
height=figsize[1],
)
if return_figure:
return fig
fig.show()
def close(self) -> None:
plt.close()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"copy.deepcopy",
"matplotlib.pyplot.show",
"plotly.graph_objects.Figure",
"matplotlib.pyplot.close",
"pathos.multiprocessing.ProcessingPool",
"numpy.zeros",
"numpy.clip",
"scipy.special.expit",
"numpy.array",
"gym.spaces.Box",
"matplotlib.... | [((3895, 3918), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3912, 3918), False, 'from gym.utils import seeding\n'), ((4087, 4125), 'copy.deepcopy', 'copy.deepcopy', (['self.original_simulator'], {}), '(self.original_simulator)\n', (4100, 4125), False, 'import copy\n'), ((8669, 8717), 'numpy.zeros', 'np.zeros', (["self.observation_space['orders'].shape"], {}), "(self.observation_space['orders'].shape)\n", (8677, 8717), True, 'import numpy as np\n'), ((10042, 10082), 'numpy.clip', 'np.clip', (['v', 'si.volume_min', 'si.volume_max'], {}), '(v, si.volume_min, si.volume_max)\n', (10049, 10082), True, 'import numpy as np\n'), ((10601, 10649), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'facecolor': '"""white"""'}), "(figsize=figsize, facecolor='white')\n", (10613, 10649), True, 'import matplotlib.pyplot as plt\n'), ((10738, 10804), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'plt_colors.LinearSegmentedColormap.from_list', (['"""mtsim"""', 'cmap_colors'], {}), "('mtsim', cmap_colors)\n", (10782, 10804), True, 'import matplotlib.colors as plt_colors\n'), ((13199, 13209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13207, 13209), True, 'import matplotlib.pyplot as plt\n'), ((13411, 13422), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (13420, 13422), True, 'import plotly.graph_objects as go\n'), ((13511, 13577), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'plt_colors.LinearSegmentedColormap.from_list', (['"""mtsim"""', 'cmap_colors'], {}), "('mtsim', cmap_colors)\n", (13555, 13577), True, 'import matplotlib.colors as plt_colors\n'), ((19538, 19549), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19547, 19549), True, 'import matplotlib.pyplot as plt\n'), ((2416, 2447), 'pathos.multiprocessing.ProcessingPool', 'Pool', (['multiprocessing_processes'], {}), '(multiprocessing_processes)\n', (2420, 2447), True, 'from pathos.multiprocessing import ProcessingPool as Pool\n'), ((5512, 5537), 'scipy.special.expit', 'expit', (['close_orders_logit'], {}), '(close_orders_logit)\n', (5517, 5537), False, 'from scipy.special import expit\n'), ((5569, 5586), 'scipy.special.expit', 'expit', (['hold_logit'], {}), '(hold_logit)\n', (5574, 5586), False, 'from scipy.special import expit\n'), ((8287, 8298), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8295, 8298), True, 'import numpy as np\n'), ((9022, 9056), 'numpy.array', 'np.array', (['[self.simulator.balance]'], {}), '([self.simulator.balance])\n', (9030, 9056), True, 'import numpy as np\n'), ((9080, 9113), 'numpy.array', 'np.array', (['[self.simulator.equity]'], {}), '([self.simulator.equity])\n', (9088, 9113), True, 'import numpy as np\n'), ((9137, 9170), 'numpy.array', 'np.array', (['[self.simulator.margin]'], {}), '([self.simulator.margin])\n', (9145, 9170), True, 'import numpy as np\n'), ((10673, 10702), 'numpy.array', 'np.array', (['plt_cm.tab10.colors'], {}), '(plt_cm.tab10.colors)\n', (10681, 10702), True, 'import numpy as np\n'), ((12134, 12160), 'numpy.array', 'np.array', (['self.time_points'], {}), '(self.time_points)\n', (12142, 12160), True, 'import numpy as np\n'), ((13446, 13475), 'numpy.array', 'np.array', (['plt_cm.tab10.colors'], {}), '(plt_cm.tab10.colors)\n', (13454, 13475), True, 'import numpy as np\n'), ((2968, 3016), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1,)'}), '(low=-np.inf, high=np.inf, shape=(1,))\n', (2978, 3016), False, 'from gym import spaces\n'), ((3040, 3088), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1,)'}), '(low=-np.inf, high=np.inf, shape=(1,))\n', (3050, 3088), False, 'from gym import spaces\n'), ((3112, 3160), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1,)'}), '(low=-np.inf, high=np.inf, shape=(1,))\n', (3122, 3160), False, 'from gym import spaces\n'), ((3186, 3249), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'self.features_shape'}), '(low=-np.inf, high=np.inf, shape=self.features_shape)\n', (3196, 3249), False, 'from gym import spaces\n'), ((5967, 5990), 'numpy.array', 'np.array', (['symbol_orders'], {}), '(symbol_orders)\n', (5975, 5990), True, 'import numpy as np\n'), ((17824, 17850), 'numpy.array', 'np.array', (['self.time_points'], {}), '(self.time_points)\n', (17832, 17850), True, 'import numpy as np\n'), ((18400, 18426), 'numpy.array', 'np.array', (['self.time_points'], {}), '(self.time_points)\n', (18408, 18426), True, 'import numpy as np\n')] |
import os, sys, numpy
variables_size = 22
dataset_name = "AUSLAN_CUTTED"
ts_len = 45
#initializations
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
train_test_str = ["_TRAIN","_TEST"]
serie_counter = 0
line_saver_vector = []
variable_test = []
for i in range(0,variables_size):
variable_test.append([])
for i in range(0,2):
file_location = dirname + "/" + dataset_name + train_test_str[i]
with open(file_location) as fin:
newfile_name = dataset_name + "_CONST_ELIMINATED" + train_test_str[i]
with open(newfile_name, "w") as newfile:
for line in fin:
aux_line = line.split()
#file not standardized
if len(aux_line) <= 4:
continue
#end of ts reached, analyse
if serie_counter == ts_len:
for j in range(0,variables_size):
if numpy.var(numpy.array(variable_test[j]).astype(numpy.float)) == 0:
line_saver_vector = []
if len(line_saver_vector) != 0:
for j in line_saver_vector:
newfile.write(j)
for w in range(0,variables_size):
variable_test[w]=[]
line_saver_vector = []
serie_counter =0
line_saver_vector.append(line)
for j in range(0,variables_size):
variable_test[j].append(aux_line[3+j])
serie_counter = serie_counter +1
for j in range(0,variables_size):
if numpy.mean(numpy.array(variable_test[j]).astype(numpy.float)) == 0:
line_saver_vector = []
if len(line_saver_vector) != 0:
for j in line_saver_vector:
newfile.write(j)
for w in range(0,variables_size):
variable_test[w]=[]
line_saver_vector = []
serie_counter =0
| [
"os.path.dirname",
"numpy.array"
] | [((130, 158), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (145, 158), False, 'import os, sys, numpy\n'), ((1649, 1678), 'numpy.array', 'numpy.array', (['variable_test[j]'], {}), '(variable_test[j])\n', (1660, 1678), False, 'import os, sys, numpy\n'), ((931, 960), 'numpy.array', 'numpy.array', (['variable_test[j]'], {}), '(variable_test[j])\n', (942, 960), False, 'import os, sys, numpy\n')] |
import numpy as np
from rlscore.measure import cindex
#Concordance index is a pairwise ranking measure
#Equivalent to AUC for bi-partite ranking problems
Y = [-1, -1, -1, 1, 1]
P = [-5, 2, -1, 1, 3.2]
cind1 = cindex(Y, P)
print("My cindex is %f" %cind1)
#Can handle also real-valued Y-values
Y2 = [-2.2, -1.3, -0.2, 0.5, 1.1]
#Almost correct ranking, but last two inverted
P2 = [-2.7, -1.1, 0.3, 0.6, 0.5]
cind2 = cindex(Y2, P2)
print("My cindex is %f" %cind2)
#Most performance measures take average over the columns for multi-target problems:
Y_big = np.vstack((Y, Y2)).T
P_big = np.vstack((P, P2)).T
print(Y_big)
print(P_big)
print("(cind1+cind2)/2 %f" %((cind1+cind2)/2.))
print("is the same as cindex(Y_big, P_big) %f" %cindex(Y_big, P_big))
| [
"rlscore.measure.cindex",
"numpy.vstack"
] | [((213, 225), 'rlscore.measure.cindex', 'cindex', (['Y', 'P'], {}), '(Y, P)\n', (219, 225), False, 'from rlscore.measure import cindex\n'), ((422, 436), 'rlscore.measure.cindex', 'cindex', (['Y2', 'P2'], {}), '(Y2, P2)\n', (428, 436), False, 'from rlscore.measure import cindex\n'), ((564, 582), 'numpy.vstack', 'np.vstack', (['(Y, Y2)'], {}), '((Y, Y2))\n', (573, 582), True, 'import numpy as np\n'), ((593, 611), 'numpy.vstack', 'np.vstack', (['(P, P2)'], {}), '((P, P2))\n', (602, 611), True, 'import numpy as np\n'), ((736, 756), 'rlscore.measure.cindex', 'cindex', (['Y_big', 'P_big'], {}), '(Y_big, P_big)\n', (742, 756), False, 'from rlscore.measure import cindex\n')] |
#!/usr/bin/python3
#-*- coding:utf-8 -*-
'''
created on Feb.,2020
@Author:<NAME>
'''
import numpy as np
import os
import cv2
from cv2 import VideoWriter,VideoWriter_fourcc,imread,resize
from moviepy.editor import *
from moviepy.audio.fx import all
from PIL import Image
from pydub import AudioSegment
import time
# f = open('FolderName.csv', 'r')
# csvreader = csv.reader(f)
# FNs = list(csvreader)
# FolderName = FNs[0]
FolderName = 'course'
numP = len(os.listdir('{}/picture/'.format(FolderName)))
numA = len(os.listdir('{}/audio/'.format(FolderName)))
# global numP,numA
def AudioConcat():
if not numA:
return '0'
result = AudioSegment.from_mp3('{}/audio/0.mp3'.format(FolderName))
for k in range(1, numA):
result += AudioSegment.from_mp3('{}/audio/{}.mp3'.format(FolderName,k))
result.export('{}/video/result.mp3'.format(FolderName), format='mp3')
def vframe():
img_list=os.listdir('{}/picture/'.format(FolderName))
img_list.sort()
img_list.sort(key = lambda x: int(x[0:-4]))
cc = cv2.VideoWriter_fourcc(*'mp4v')
image = Image.open('{}/picture/'.format(FolderName) + img_list[0])
videoWriter = cv2.VideoWriter('{}/video/course_video.mp4'.format(FolderName),cc,1,image.size)
audio_t = np.zeros(numA,dtype='float')
audio_t[0]=int(len(AudioSegment.from_mp3('{}/audio/{}.mp3'.format(FolderName,0)))/1000)
for i in range(1,numA):
audio_t[i] = audio_t[i-1] + int(len(AudioSegment.from_mp3('{}/audio/{}.mp3'.format(FolderName,i)))/1000)
for m in range(int(audio_t[int(img_list[0][0:-4])])):
img_name = '{}/picture/'.format(FolderName)+img_list[0]
frame = cv2.imread(img_name)
videoWriter.write(frame)
for i in range(numP):
for k in range(int(audio_t[int(img_list[int(i+1)*(i<numP-1)][0:-4])]*(i<numP-1)+audio_t[-1]*(i==numP-1) - audio_t[int(img_list[i][0:-4])])):
img_name = '{}/picture/'.format(FolderName)+img_list[i]
frame = cv2.imread(img_name)
videoWriter.write(frame)
videoWriter.release()
audio_clip = AudioFileClip('{}/video/{}.mp3'.format(FolderName,'result'))
text_clip0 = TextClip('', fontsize = 13, color = 'black')
text_clip0 = text_clip0.set_position('bottom')
text_clip0 = text_clip0.set_duration(1).set_audio(audio_clip)
CompositeVideoClip(text_clip0).write_videofile('{}/video/{}.mp4'.format(FolderName,'course_video'),codec = 'mp4v', fps = 1)
if __name__ == '__main__':
Time_start=time.time()
# AudioConcat()
vframe()
Time_end=time.time()
print('running time:',Time_end-Time_start,'s')
| [
"cv2.imread",
"numpy.zeros",
"cv2.VideoWriter_fourcc",
"time.time"
] | [((1006, 1037), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (1028, 1037), False, 'import cv2\n'), ((1212, 1241), 'numpy.zeros', 'np.zeros', (['numA'], {'dtype': '"""float"""'}), "(numA, dtype='float')\n", (1220, 1241), True, 'import numpy as np\n'), ((2353, 2364), 'time.time', 'time.time', ([], {}), '()\n', (2362, 2364), False, 'import time\n'), ((2402, 2413), 'time.time', 'time.time', ([], {}), '()\n', (2411, 2413), False, 'import time\n'), ((1585, 1605), 'cv2.imread', 'cv2.imread', (['img_name'], {}), '(img_name)\n', (1595, 1605), False, 'import cv2\n'), ((1869, 1889), 'cv2.imread', 'cv2.imread', (['img_name'], {}), '(img_name)\n', (1879, 1889), False, 'import cv2\n')] |
from parallel_env import ParallelEnv
import parl
import argparse
import numpy as np
from parl.utils import logger
# example of ParallelEnv
def parallel_env():
logger.info("Running example of RemoteEnv in atari_env: {}".format(
args.atari_env))
parl.connect('localhost')
env_list = ParallelEnv(env_name=args.atari_env, env_num=args.env_num)
seed_list = [np.random.randint(0, 100) for i in range(args.env_num)]
env_list.seed(seed_list)
obs_list = env_list.reset()
episode_reward_list = [0] * args.env_num
steps_list = [0] * args.env_num
episodes = 0
# Run episodes with a random policy
while episodes < args.max_episodes:
action_list = env_list.sample_actions()
next_obs_list, reward_list, done_list, info_list = env_list.step(
action_list)
for i in range(args.env_num):
steps_list[i] += 1
episode_reward_list[i] += reward_list[i]
if done_list[i]:
episodes += 1
logger.info(
'Env{} done, total_steps {}, episode_reward {}'.format(
i, steps_list[i], episode_reward_list[i]))
obs_list = next_obs_list
def main():
parallel_env()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--atari_env",
default='BreakoutNoFrameskip-v4',
help='OpenAI gym/atari environment name')
parser.add_argument(
"--env_num", default=2, type=int, help='number of environment')
parser.add_argument(
"--max_episodes", default=2, type=int, help='episode of running')
args = parser.parse_args()
main()
| [
"parl.connect",
"numpy.random.randint",
"argparse.ArgumentParser",
"parallel_env.ParallelEnv"
] | [((263, 288), 'parl.connect', 'parl.connect', (['"""localhost"""'], {}), "('localhost')\n", (275, 288), False, 'import parl\n'), ((304, 362), 'parallel_env.ParallelEnv', 'ParallelEnv', ([], {'env_name': 'args.atari_env', 'env_num': 'args.env_num'}), '(env_name=args.atari_env, env_num=args.env_num)\n', (315, 362), False, 'from parallel_env import ParallelEnv\n'), ((1286, 1311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1309, 1311), False, 'import argparse\n'), ((381, 406), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (398, 406), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
'''
Internal simulation of steering-wheel robots
'''
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState
from std_msgs.msg import Float32MultiArray
from geometry_msgs.msg import Twist
import numpy as np
dt = 0.05
class Wheel:
def __init__(self, joint):
self.sign = joint.wheel_sign
def move(self, val, w):
return val + self.sign*w*dt
class SteeringJoint:
def __init__(self, joint):
self.low = joint.low
self.up = joint.up
def move(self, val, w):
val += w*dt
if self.low is not None:
if val > self.up:
return self.up
elif val < self.low:
return self.low
return val
class Robot(Node):
def __init__(self, joints, cmd_dim):
super().__init__('kinematics')
self.state = JointState()
self.state.name = joints
self.state.position = [0. for _ in range(len(joints))]
self.joint_pub = self.create_publisher(JointState, 'joint_states', 5)
self.cmd = [0. for _ in range(cmd_dim)]
self.cmd_vel = Twist()
if self.declare_parameter('pub_cmd', False).value:
self.cmd_vel_pub = self.create_publisher(Twist, 'cmd_vel', 5)
else:
self.cmd_vel_pub = None
if cmd_dim:
self.cmd_sub = self.create_subscription(Float32MultiArray, 'cmd', self.cmd_callback, 5)
self.timer = self.create_timer(dt, self.timer_callback)
def timer_callback(self):
self.update()
self.publish()
def cmd_callback(self, msg):
self.cmd[:] = msg.data
def publish(self):
self.state.header.stamp = self.get_clock().now().to_msg()
self.joint_pub.publish(self.state)
if self.cmd_vel_pub is not None:
self.cmd_vel_pub.publish(self.cmd_vel)
def set_params(self, names):
for name in names:
self.declare_parameter(name, getattr(self, name))
class Unicycle(Robot):
def __init__(self, names, left, right, b, r):
super().__init__(names, 0)
self.b = b
self.r = r
self.left = left
self.right = right
self.set_params(['b','r'])
# unicycle actually subscribes to cmd_vel
self.cmd_vel_sub = self.create_subscription(Twist, 'cmd_vel', self.cmd_vel_callback, 5)
def cmd_vel_callback(self, msg):
self.cmd_vel = msg
def update(self):
# forward latest cmd_vel to joint update
v = self.cmd_vel.linear.x
w = self.cmd_vel.angular.z
for idx, joint, vel in ((0, self.left, (v+self.b*w)/self.r),
(1, self.right, (v-self.b*w)/self.r)):
self.state.position[idx] = joint.move(self.state.position[idx], vel)
class Bicycle(Robot):
def __init__(self, names, front, rear, beta, L, r):
super().__init__(names, 2)
self.L = L
self.r = r
self.front = front
self.rear = rear
self.beta = beta
self.set_params(['L','r'])
def update(self):
# got (v, beta dot) in self.cmd
v, bdot = self.cmd
beta = self.state.position[2]
# Twist
self.cmd_vel.linear.x = v * np.cos(beta)
self.cmd_vel.angular.z = v * np.sin(beta) / self.L
# wheel velocities
wf = v/self.r
wr = self.cmd_vel.linear.x / self.r
for idx, joint, vel in ((0, self.front, wf),
(1, self.rear, wr),
(2, self.beta, bdot)):
self.state.position[idx] = joint.move(self.state.position[idx], vel)
class TwoSteering(Robot):
def __init__(self, names, front, rear, beta1, beta2, L, r):
super().__init__(names, 3)
self.L = L
self.r = r
self.set_params(['L','r'])
self.front = front
self.rear = rear
self.beta1 = beta1
self.beta2 = beta2
def update(self):
# got (v, beta dot) in self.cmd
v1, bdot1, bdot2 = self.cmd
beta1,beta2 = self.state.position[2:4]
c1,s1 = np.cos(beta1), np.sin(beta1)
c2,s2 = np.cos(beta2), np.sin(beta2)
v2 = v1*c1/c2
# Twist
self.cmd_vel.linear.x = v1 * c1
self.cmd_vel.linear.y = v2 * s2
self.cmd_vel.angular.z = (v1*s1-v2*s2) / self.L
# wheel velocities
wf = v1/self.r
wr = v2/self.r
for idx, joint, vel in ((0, self.front, wf),
(1, self.rear, wr),
(2, self.beta1, bdot1),
(3, self.beta2, bdot2)):
self.state.position[idx] = joint.move(self.state.position[idx], vel)
def create_robot():
# get model xml
from rcl_interfaces.srv import GetParameters
from urdf_parser_py.urdf import URDF
node = Node('rsp_client')
client = node.create_client(GetParameters, 'robot_state_publisher/get_parameters')
client.wait_for_service()
req = GetParameters.Request()
req.names = ['robot_description']
res = client.call_async(req)
while rclpy.ok():
rclpy.spin_once(node)
if res.done():
model = URDF.from_xml_string(res.result().values[0].string_value)
break
node.destroy_node()
root = model.get_root()
def sk(u):
return np.matrix([[0,-u[2],u[1]],[u[2],0,-u[0]],[-u[1],u[0],0]])
def Rot(theta,u):
uuT = np.dot(np.reshape(u,(3,1)), np.reshape(u, (1,3)))
return np.cos(theta)*np.eye(3) + np.sin(theta)*sk(u) + (1-np.cos(theta))*uuT
def Homogeneous(joint):
t = np.array(joint.origin.position).reshape(3,1)
rpy = joint.origin.rotation
R = Rot(rpy[2],[0,0,1])*Rot(rpy[1],[0,1,0])*Rot(rpy[0],[1,0,0])
return np.matrix(np.vstack((np.hstack((R,t)), [0,0,0,1])))
def to_list(t):
return t.flatten().tolist()[0]
class Joint:
@staticmethod
def identify(joints, prop):
j1, j2 = joints
if getattr(j2, prop) < getattr(j1, prop):
return j1, j2
return j2, j1
def __init__(self, joint):
self.name = joint.name
self.low = self.up = None
if joint.limit is not None:
self.low = joint.limit.lower
self.up = joint.limit.upper
if self.low == self.up:
self.low = self.up = None
# get chain to root
joints = [joint]
while True:
for prev in model.joints:
if prev.child == joints[-1].parent:
joints.append(prev)
break
if joints[-1].parent == root:
break
M = np.matrix(np.eye(4))
for j in reversed(joints):
M *= Homogeneous(j)
# get position
self.pos = to_list(M[:3,3])
self.x, self.y, self.z = self.pos
# get horizontal (wheel + sign) / vertical
self.axis = to_list(M[:3,:3] * np.matrix(joint.axis).T)
self.wheel_sign = min((-1,0,1), key = lambda y: abs(y-self.axis[1]))
# parse model, guess role of each joint
joints = [Joint(joint) for joint in model.joints if joint.type in ('continuous','revolute')]
# identify wheels vs steering joints
wheels = [joint for joint in joints if joint.wheel_sign]
steering = [joint for joint in joints if not joint.wheel_sign]
if len(wheels) != 2 or len(steering) not in (0,1,2):
msg = ['Cannot identify robot type from its joints']
for j in joints:
msg.append(f' - {j.name} at {j.pos} with axis {j.axis}')
raise(RuntimeError('\n'.join(msg)))
# we assume the wheels have the same radius
r = 0.5* (wheels[0].z + wheels[1].z)
if len(steering) == 0:
# unicycle
left, right = Joint.identify(wheels, 'y')
return Unicycle([left.name, right.name],
Wheel(left),
Wheel(right),
left.y - right.y,
r)
front, rear = Joint.identify(wheels, 'x')
if len(steering) == 1:
return Bicycle([front.name, rear.name, steering[0].name],
Wheel(front),
Wheel(rear),
SteeringJoint(steering[0]),
front.x-rear.x,
r)
# two-steering
beta1, beta2 = Joint.identify(steering, 'x')
return TwoSteering([front.name, rear.name, beta1.name, beta2.name],
Wheel(front),
Wheel(rear),
SteeringJoint(beta1),
SteeringJoint(beta2),
front.x-rear.x,
r)
rclpy.init()
# guess one of the 3 robot types
robot = create_robot()
rclpy.spin(robot)
robot.destroy_node()
| [
"numpy.matrix",
"rclpy.spin_once",
"rclpy.spin",
"rclpy.node.Node",
"rcl_interfaces.srv.GetParameters.Request",
"rclpy.init",
"geometry_msgs.msg.Twist",
"numpy.hstack",
"numpy.sin",
"sensor_msgs.msg.JointState",
"numpy.reshape",
"numpy.cos",
"numpy.array",
"rclpy.ok",
"numpy.eye"
] | [((9371, 9383), 'rclpy.init', 'rclpy.init', ([], {}), '()\n', (9381, 9383), False, 'import rclpy\n'), ((9442, 9459), 'rclpy.spin', 'rclpy.spin', (['robot'], {}), '(robot)\n', (9452, 9459), False, 'import rclpy\n'), ((5161, 5179), 'rclpy.node.Node', 'Node', (['"""rsp_client"""'], {}), "('rsp_client')\n", (5165, 5179), False, 'from rclpy.node import Node\n'), ((5307, 5330), 'rcl_interfaces.srv.GetParameters.Request', 'GetParameters.Request', ([], {}), '()\n', (5328, 5330), False, 'from rcl_interfaces.srv import GetParameters\n'), ((5412, 5422), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (5420, 5422), False, 'import rclpy\n'), ((892, 904), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (902, 904), False, 'from sensor_msgs.msg import JointState\n'), ((1151, 1158), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (1156, 1158), False, 'from geometry_msgs.msg import Twist\n'), ((5432, 5453), 'rclpy.spin_once', 'rclpy.spin_once', (['node'], {}), '(node)\n', (5447, 5453), False, 'import rclpy\n'), ((5669, 5734), 'numpy.matrix', 'np.matrix', (['[[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]]'], {}), '([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]])\n', (5678, 5734), True, 'import numpy as np\n'), ((3420, 3432), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (3426, 3432), True, 'import numpy as np\n'), ((4352, 4365), 'numpy.cos', 'np.cos', (['beta1'], {}), '(beta1)\n', (4358, 4365), True, 'import numpy as np\n'), ((4367, 4380), 'numpy.sin', 'np.sin', (['beta1'], {}), '(beta1)\n', (4373, 4380), True, 'import numpy as np\n'), ((4397, 4410), 'numpy.cos', 'np.cos', (['beta2'], {}), '(beta2)\n', (4403, 4410), True, 'import numpy as np\n'), ((4412, 4425), 'numpy.sin', 'np.sin', (['beta2'], {}), '(beta2)\n', (4418, 4425), True, 'import numpy as np\n'), ((5771, 5792), 'numpy.reshape', 'np.reshape', (['u', '(3, 1)'], {}), '(u, (3, 1))\n', (5781, 5792), True, 'import numpy as np\n'), ((5792, 5813), 'numpy.reshape', 'np.reshape', (['u', '(1, 3)'], {}), '(u, (1, 3))\n', (5802, 5813), True, 'import numpy as np\n'), ((3470, 3482), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (3476, 3482), True, 'import numpy as np\n'), ((5944, 5975), 'numpy.array', 'np.array', (['joint.origin.position'], {}), '(joint.origin.position)\n', (5952, 5975), True, 'import numpy as np\n'), ((7230, 7239), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7236, 7239), True, 'import numpy as np\n'), ((5829, 5842), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5835, 5842), True, 'import numpy as np\n'), ((5843, 5852), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5849, 5852), True, 'import numpy as np\n'), ((5855, 5868), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5861, 5868), True, 'import numpy as np\n'), ((5880, 5893), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5886, 5893), True, 'import numpy as np\n'), ((6133, 6150), 'numpy.hstack', 'np.hstack', (['(R, t)'], {}), '((R, t))\n', (6142, 6150), True, 'import numpy as np\n'), ((7553, 7574), 'numpy.matrix', 'np.matrix', (['joint.axis'], {}), '(joint.axis)\n', (7562, 7574), True, 'import numpy as np\n')] |
r"""X-Path Factory :mod:`abelfunctions.complex_path_factory`
=================================================
Module for computing the monodromy group of the set of discriminant points
of a complex plane algebraic curve.
"""
import numpy
import scipy
from numpy import double, complex, floor, angle
from sage.all import infinity, QQbar, Graphics, scatter_plot
from sage.functions.other import real_part, imag_part
from abelfunctions.complex_path import (
ComplexLine,
ComplexArc,
ComplexPath,
)
class ComplexPathFactory(object):
r"""Factory for computing complex paths on the x-projection of a Riemann surface
determined by an algebraic curve :math:`C : f(x,y) = 0`.
Since paths on a Riemann surface are computed via analytic continuation
care needs to be taken when the x-part of the path gets close to a
discriminant point of the algebraic curve from which the Riemann surface is
derived. This is because some of the y-sheets of the curve, when considered
as a covering of the complex x-plane, coalesce at the discriminant points.
Therefore, "bounding circles" need to be computed at each discriminant
point.
Attributes
----------
riemann_surface : RiemannSurface
The Riemann surface on which to construct the x-paths.
base_point : complex
If a base point isn't provided, one will be chosen.
kappa : double (default: 3/5)
A scaling factor between 0.5 and 1.0 used to modify the radius
of the bounding circles.
discriminant_points
The discriminant points of the curve.
discriminant_points_complex
Floating point approximations of the discriminant points. Used for
computational efficiency since converting from QQbar to CDF is slow
Methods
-------
.. autosummary::
closest_discriminant_point
radius
intersecting_discriminant_points
intersects_discriminant_points
intersection_points
path_to_monodromy_point
path
monodromy_path
monodromy_path_infinity
show_paths
"""
@property
def base_point(self):
return self._base_point
@property
def discriminant_points(self):
return self._discriminant_points
@property
def discriminant_points_complex(self):
return self._discriminant_points_complex
@property
def radii(self):
return self._radii
def __init__(self, f, base_point=None, kappa=3./5.):
"""Initialize a complex path factory.
Complex path factories require a base point from which most complex
paths begin on a Riemann surface. In particular, this base point is
used as the base point in constructing the monodromy group of the
Riemann surface.
Parameters
----------
f : polynomial
The plane algebraic curve defining the Riemann surface.
base_point : complex
The base point of the factory and of the monodromy group of the
Riemann surface. If not provided one will be chosen based on the
discriminant point placement.
kappa : double
A scaling factor used to determine the radii of the "bounding
circles" around each discriminant point. `kappa = 1.0` means the
bounding circles are made as large as possible, resulting in
possibly touching circles between two or more discriminant points.
"""
self.f = f
# compute the discriminant points and determine a base point if none
# was provided
b,d,dc = self._compute_discriminant_points(base_point)
self._base_point = b
self._discriminant_points = d
self._discriminant_points_complex = dc
# compute the bounding circle radii from the discriminant points
r = self._compute_radii(kappa)
self._radii = r
def _compute_discriminant_points(self, base_point):
r"""Computes and stores the discriminant points of the underlying curve.
A discriminant point :math:`x=b` is an x-point where at least one
y-root lying above has multiplicity greater than one. A
:class:`PuiseuxTSeries` is required to represent a place on the Riemann
surface whose x-projection is a discriminant point. These kinds of
places are of type :class:`DiscriminantPlace`.
.. note::
The ordering of the discriminant points is important for the
purposes of computing the monodromy group, which is done in the
:class:`RiemannSurfacePathFactory` attribute, `PathFactory`.
Parameters
----------
None
Returns
-------
list : complex
Return a list of ordered discriminant points from the base point.
"""
# compute the symbolic and numerical discriminant points
f = self.f
x,y = f.parent().gens()
res = f.resultant(f.derivative(y), y).univariate_polynomial()
rts = res.roots(ring=QQbar, multiplicities=False)
discriminant_points = numpy.array(rts)
discriminant_points_complex = numpy.array(rts, dtype=complex)
# determine a base_point, if not specified
if not base_point:
a = min(complex(bi).real for bi in discriminant_points)
a = a - 1
aint = complex(floor(a))
base_point = aint
# sort the discriminant points first by argument with the base point
# and then by distance from the base point. the points need to be exact
centered_points = discriminant_points_complex - base_point
distances = abs(centered_points)
arguments = angle(centered_points)
sort_index = numpy.lexsort((distances, arguments))
# sort and return
discriminant_points = discriminant_points[sort_index]
discriminant_points_complex = discriminant_points_complex[sort_index]
return base_point, discriminant_points, discriminant_points_complex
def closest_discriminant_point(self, x, exact=True):
r"""Returns the closest discriminant point to a point x.
An often-used helper function by several components of
:class:`RiemannSurface`.
Parameters
----------
x : complex
A complex x-point.
exact : boolean
If `True`, returns a `sympy.Expr` representing the discriminant
point exactly. Otherwise, returns a numerical approximation.
Returns
-------
complex or sympy.Expr
The discriminant point, either exact or numerical.
"""
# use floating points approximations for performance
b = self.discriminant_points_complex
x = complex(x)
idx = numpy.argmin(abs(b - x))
if exact:
return self.discriminant_points[idx]
return self.discriminant_points_complex[idx]
def _compute_radii(self, kappa):
"""Returns the radii of the bounding circles.
Parameters
----------
kappa : double
A scaling factor between 0.5 and 1.0. `kappa = 1.0` means that the
bounding circles are taken to be as large as possible without
overlapping.
Returns
-------
radii : array
An ordered list of radii. The radius at index `k` is associated
with the discriminant point at index `k` in
`self.discriminant_points`.
"""
# special case when there is only one finite discriminant point: take
# the distance from the base point to the discriminant point (scaled by
# kappa, of course)
if len(self.discriminant_points_complex) == 1:
b = self.discriminant_points_complex[0]
radius = numpy.abs(self.base_point - b)
radius *= kappa/2.0
radii = numpy.array([radius], dtype=double)
return radii
# when there is more than one discriminant point we scale disctances
# accordingly. coerce to numerical.
radii = []
b = self.discriminant_points_complex
for bi in b:
dists = [abs(bi - bj) for bj in self.discriminant_points_complex
if bi != bj]
rho = min(dists)
radius = rho*kappa/2.0
radii.append(radius)
radii = numpy.array(radii, dtype=double)
# final check: assert that the base point is sufficiently far away from
# the discriminant points
dists = [abs(bi - self.base_point) for bi in b]
dists = numpy.array(dists, dtype=double) - radii
if any(dists < 0):
raise ValueError('Base point lies in the bounding circles of the '
'discriminant points. Use different base point or '
'circle scaling factor kappa.')
return radii
def radius(self, bi):
"""Returns the raidus of the bounding circle around `bi`.
Parameters
----------
bi : complex
A discriminant point of the algebraic curve.
Returns
-------
radius : double
The radius of the bounding circle.
"""
# find the index where bi appears in the list of discriminant points.
# it's done numerically in case a numerical approximation bi is given
bi = complex(bi)
index = 0
for z in self.discriminant_points_complex:
if abs(z-bi) < 1e-14:
break
index += 1
# raise an error if not found
if index == len(self.discriminant_points_complex):
raise ValueError('%s is not a discriminant point of %s'%(bi,f))
radius = self.radii[index]
return radius
def intersecting_discriminant_points(self, z0, z1, exact=False):
r"""Return the discriminant points which are too close to the line from
`z0` to `z1` along with the corresponding orientations.
Parameters
----------
z0 : complex
Line start.
z1 : complex
Line end.
Returns
-------
"""
if exact:
points = [bi for bi in self.discriminant_points
if self.intersects_discriminant_point(z0, z1, bi)]
else:
points = [bi for bi in self.discriminant_points_complex
if self.intersects_discriminant_point(z0, z1, bi)]
return points
def intersects_discriminant_point(self, z0, z1, bi):
"""Returns `True` if the line from `z0` to `z1` intersects the bounding circle
around the discriminant point `bi`.
Parameters
----------
z0 : complex
Line starting point.
z1 : complex
Line ending point.
bi : complex
A discriminant point.
Returns
-------
is_intersecting : bool
`True` if the line from `z0` to `z1` gets too close to `bi`.
"""
# first check the perpendicular distance from bi to the line
# passing through z0 and z1
z0 = complex(z0)
z1 = complex(z1)
bi = complex(bi)
direction = numpy.sign(angle(z1-z0) - angle(bi-z0))
normv = abs(z1-z0)
v = 1.0j*direction*(z1 - z0)
r = z0 - bi
# degenerate case: the line through z0 and z1 crosses bi. in this case
# just check if the branch point lies in between
if direction == 0:
if (abs(bi - z0) <= normv) and (abs(bi - z1) <= normv):
return True
return False
# return False if the distance from the _line_ passing through
# z0 and z1 to bi is greater than the radius fo teh bounding
# circle.
distance = (v.real*r.real + v.imag*r.imag)
distance = distance / normv
if distance > self.radius(bi):
return False
# also need to check if bi "lies between" the _line segment_
# between z0 and z1. use the distance vector w = d*v/|v|. the
# distance from vtilde to z0 and z1 should be less that the
# distance between z0 and z1
w = distance*v/normv + bi
if (abs(w - z0) <= normv) and (abs(w - z1) <= normv):
return True
return False
def intersection_points(self, z0, z1, b, R):
"""Returns the complex points `w0,w1` where the line from `z0` to `z1`
intersects the bounding circle around `bi`.
Parameters
----------
z0 : complex
Line starting point.
z1 : complex
Line ending point.
bi : complex
A discriminant point.
Ri : double
The radius of the circle around bi.
Returns
-------
w0, w1 : complex
Points on the bounding circle of `bi` where the line z0-z1
intersects.
"""
# special case when z1 = b:
if abs(z1 - b) < 1e-14:
R = self.radius(b)
b = complex(b)
l = lambda s: z0 + (b - z0)*s
s = 1.0 - R/abs(z0 - b)
z = l(s)
return z,z
# construct the polynomial giving the distance from the line l(t),
# parameterized by t in [0,1], to bi.
z0 = complex(z0)
z1 = complex(z1)
b = complex(b)
R = double(R)
v = z1 - z0
w = z0 - b
p2 = v.real**2 + v.imag**2
p1 = 2*(v.real*w.real + v.imag*w.imag)
p0 = w.real**2 + w.imag**2 - R**2 # solving |l(t) - bi| = Ri
# find the roots of this polynomial and sort by increasing t
p = numpy.poly1d([p2, p1, p0])
t = numpy.roots(p)
t.sort()
# compute ordered intersection points
w0 = v*t[0] + z0 # first intersection point
w1 = v*t[1] + z0 # second intersection point
return w0,w1
def path_to_discriminant_point(self, bi):
r"""Returns the complex path to the bounding circle around `bi` which avoids
other discriminant points.
This is a specific implementation of the routine used in
:meth:`path_to_point`. Although similar, this routine takes branch
point ordering into account when determining whether to go above or
below intersecting discriminant points. (See
:meth:`intersecting_discriminant_points`)
Parameters
----------
bi : complex
A discriminant / branch point of the curve.
Returns
-------
gamma : ComplexPath
The corresponding monodromy path.
See Also
--------
intersecting_discriminant_points
path_to_point
"""
# make sure we have the discriminant point exactly
point = self.closest_discriminant_point(bi, exact=True)
if abs(complex(point) - complex(bi)) > 1e-4:
raise ValueError('%s is not a discriminant point of %s'%(bi,self.f))
bi = point
Ri = self.radius(bi)
# compute the list points we need to stay sufficiently away from and
# sort them in increasing distance from the base point
z0 = self.base_point
_,z1 = self.intersection_points(z0, complex(bi), bi, Ri)
points_to_avoid = self.intersecting_discriminant_points(z0, z1, exact=False)
points_to_avoid.sort(key=lambda bj: abs(bj-z0))
# determine the relative orientations of the avoiding discriminant
# points with the point bi. recall that the ordering of discriminant
# points establishes the orientation. (points earlier in the list lie
# below those later in the list.)
#
# positive/negative orientation with a given bj means we need to go
# above/below bj, respectively.
orientations = []
i = numpy.argwhere(self.discriminant_points == bi).item(0)
for bj in points_to_avoid:
j = numpy.argwhere(self.discriminant_points_complex == bj).item(0)
if i < j:
orientations.append(-1)
else:
orientations.append(1)
# we now have sorted orientations and points to avoid. for each such
# point:
#
# 1. determine the points of intersection with the bounding circle
# 2. determine the appropriate arc along the bounding circle
# 3. construct the path segment using a line (if necessary) and the arc
segments = []
for j in range(len(points_to_avoid)):
bj = points_to_avoid[j]
oj = orientations[j]
Rj = self.radius(bj)
w0,w1 = self.intersection_points(z0,z1,bj,Rj)
arc = self.avoiding_arc(w0,w1,bj,Rj,orientation=oj)
if abs(z0-w0) > 1e-14:
segments.append(ComplexLine(z0,w0))
segments.append(arc)
# repeat by setting the new "start point" to be w1, the last point
# reached on the arc.
z0 = w1
# build the avoiding path from the segments
segments.append(ComplexLine(z0,z1))
if len(segments) == 1:
path = segments[0]
else:
path = ComplexPath(segments)
return path
def path(self, z0, z1):
r"""Returns the complex path to the bounding circle around `bi` which avoids
other discriminant points.
This is a specific implementation of the routine used in :meth:`path`.
Although similar, this routine takes branch point ordering into account
when determining whether to go above or below intersecting discriminant
points. (See :meth:`intersecting_discriminant_points`)
Parameters
----------
bi : complex
A discriminant / branch point of the curve.
Returns
-------
gamma : ComplexPath
The corresponding monodromy path.
See Also
--------
intersecting_discriminant_points
path
"""
# compute the list points we need to stay sufficiently away from and
# sort them in increasing distance from the base point
points_to_avoid = self.intersecting_discriminant_points(z0, z1, exact=False)
points_to_avoid.sort(key=lambda bj: abs(bj-z0))
# for each points we want to avoid
#
# 1. determine the points of intersection with the bounding circle
# 2. determine the appropriate arc along the bounding circle
# 3. construct the path segment using a line (if necessary) and the arc
segments = []
for j in range(len(points_to_avoid)):
bj = points_to_avoid[j]
Rj = self.radius(bj)
w0,w1 = self.intersection_points(z0,z1,bj,Rj)
arc = self.avoiding_arc(w0,w1,bj,Rj)
if abs(z0-w0) > 1e-14:
segments.append(ComplexLine(z0,w0))
segments.append(arc)
# repeat by setting the new "start point" to be w1, the last point
# reached on the arc.
z0 = w1
# append the final line and build the avoiding path from the segments
segments.append(ComplexLine(z0,z1))
if len(segments) == 1:
path = segments[0]
else:
path = ComplexPath(segments)
return path
def monodromy_path(self, bi, nrots=1):
"""Returns the complex path starting from the base point, going around the
discriminant point `bi` `nrots` times, and returning to the base
x-point.
The sign of `nrots` indicates the sign of the direction.
Parameters
----------
bi : complex
A discriminant point.
nrots : integer (default `1`)
A number of rotations around this discriminant point.
Returns
-------
path : ComplexPath
A complex path representing the monodromy path with `nrots`
rotations about the discriminant point `bi`.
"""
if bi in [infinity, numpy.Infinity, 'oo']:
return self.monodromy_path_infinity(nrots=nrots)
path_to_bi = self.path_to_discriminant_point(bi)
# determine the rotational path around the discriminant point
z = path_to_bi(1.0)
bi = complex(bi)
Ri = self.radius(bi)
theta = angle(z - bi)
dtheta = numpy.pi if nrots > 0 else -numpy.pi
circle = ComplexArc(Ri, bi, theta, dtheta) + \
ComplexArc(Ri, bi, theta + dtheta, dtheta)
path_around_bi = circle
for _ in range(abs(nrots)-1):
path_around_bi += circle
# the monodromy path is the sum of the path to the point, the
# rotational part, and the return path to the base point
path = path_to_bi + path_around_bi + path_to_bi.reverse()
return path
def monodromy_path_infinity(self, nrots=1):
"""Returns the complex path starting at the base point, going around
infinity `nrots` times, and returning to the base point.
This path is sure to not only encircle all of the discriminant
points but also stay sufficiently outside the bounding circles
of the points.
Parameters
----------
nrots : integer, (default `1`)
The number of rotations around infinity.
Returns
-------
RiemannSurfacePath
The complex path encircling infinity.
"""
path = []
# determine the radius R of the circle, centered at the origin,
# encircling all of the discriminant points and the bounding circles
b = self.discriminant_points
R = numpy.abs(self.base_point)
for bi in b:
radius = self.radius(bi)
Ri = numpy.abs(bi) + 2*radius # to be safely away
R = Ri if Ri > R else R
# the path begins with a line starting at the base point and ending at
# the point -R (where the circle will begin)
path = ComplexLine(self.base_point, -R)
# the positive direction around infinity is equal to the
# negative direction around the origin
dtheta = -numpy.pi if nrots > 0 else numpy.pi
for _ in range(abs(nrots)):
path += ComplexArc(R, 0, numpy.pi, dtheta)
path += ComplexArc(R, 0, 0, dtheta)
# return to the base point
path += ComplexLine(-R, self.base_point)
# determine if the circle actually touches the base point. this occurs
# when the base point is further away from the origin than the bounding
# circles of discriminant points. in this case, the path only consists
# of the arcs defining the circle
if abs(self.base_point + R) < 1e-15:
path = ComplexPath(path.segments[1:-1])
return path
def show_paths(self, *args, **kwds):
"""Plots all of the monodromy paths of the curve.
Returns
-------
None
"""
# fill the bounding circles around each discriminant point
a = complex(self.base_point)
b = numpy.array(self.discriminant_points, dtype=complex)
# plot the base point and the discriminant points
pts = [(a.real, a.imag)]
plt = scatter_plot(pts, facecolor='red', **kwds)
pts = zip(b.real, b.imag)
plt += scatter_plot(pts, facecolor='black', **kwds)
# plot the monodromy paths
for bi in b:
path = self.monodromy_path(bi)
plt += path.plot(**kwds)
return plt
def avoiding_arc(self, w0, w1, b, R, orientation=None):
"""Returns the arc `(radius, center, starting_theta, dtheta)`, from the points
`w0` and `w1` on the bounding circle around `bi`.
The arc is constructed in such a way so that the monodromy properties
of the path are conserved.
Parameters
----------
w0 : complex
The starting point of the arc on the bounding circle of `bi`.
w1 : complex
The ending point of the arc on the bounding circle of `bi`.
b : complex
The discriminant point to avoid.
R : double
The radius of the bounding circle.
Returns
-------
arc : ComplexArc
An arc from `w0` to `w1` around `bi`.
"""
w0 = complex(w0)
w1 = complex(w1)
b = complex(b)
R = double(R)
# ASSUMPTION: Re(w0) < Re(w1)
if w0.real >= w1.real:
raise ValueError('Cannot construct avoiding arc: all paths must '
'travel from left to right unless "reversed".')
# ASSERTION: w0 and w1 lie on the circle of radius Ri centered at bi
R0 = abs(w0 - b)
R1 = abs(w1 - b)
if abs(R0 - R) > 1e-13 or abs(R1 - R) > 1e-13:
raise ValueError('Cannot construct avoiding arc: '
'%s and %s must lie on the bounding circle of '
'radius %s centered at %s'%(w0,w1,R,b))
# degenerate case: w0, bi, w1 are co-linear
#
# if no orientation is provided then go above. otherwise, adhere to the
# orientation: orientation = +1/-1 means the path goes above/below
phi_w0_w1 = numpy.angle(w1-w0)
phi_w0_b = numpy.angle(b-w0)
if abs(phi_w0_w1 - phi_w0_b) < 1e-13:
theta0 = numpy.angle(w0-b)
dtheta = -numpy.pi # default above
if not orientation is None:
dtheta *= orientation
return ComplexArc(R, b, theta0, dtheta)
# otherwise: w0, bi, w1 are not co-linear
#
# first determine if the line form w0 to w1 is above or below the
# branch point bi. this will determine if dtheta is negative or
# positive, respectively
if phi_w0_b <= phi_w0_w1:
dtheta_sign = -1
else:
dtheta_sign = 1
# now determine the angle between w0 and w1 on the circle. since w0,
# bi, and w1 are not colinear this angle must be normalized to be in
# the interval (-pi,pi)
theta0 = numpy.angle(w0 - b)
theta1 = numpy.angle(w1 - b)
dtheta = theta1 - theta0
if dtheta > numpy.pi:
dtheta = 2*numpy.pi - dtheta
elif dtheta < -numpy.pi:
dtheta = 2*numpy.pi + dtheta
# sanity check: |dtheta| should be less than pi
if abs(dtheta) >= numpy.pi:
raise ValueError('Cannot construct avoiding arc: '
'|dtheta| must be less than pi.')
dtheta = dtheta_sign * abs(dtheta)
# finally, take orentation into account. orientation is a stronger
# condition than the above computations.
#
# in the case when the signs of the orientation and the dtheta are
# opposite then do nothing since: orentation = +1/-1 implies go
# above/below implies dtheta negative/positive.
#
# when the signs are same then make adjustments:
if not orientation is None:
if orientation == 1 and dtheta > 0:
dtheta = dtheta - 2*numpy.pi
elif orientation == -1 and dtheta < 0:
dtheta = 2*numpy.pi + dtheta
# add the path from z0 to w1 going around bi
arc = ComplexArc(R, b, theta0, dtheta)
return arc
| [
"numpy.poly1d",
"numpy.roots",
"numpy.abs",
"abelfunctions.complex_path.ComplexLine",
"numpy.double",
"abelfunctions.complex_path.ComplexArc",
"numpy.angle",
"numpy.lexsort",
"numpy.floor",
"sage.all.scatter_plot",
"abelfunctions.complex_path.ComplexPath",
"numpy.array",
"numpy.argwhere",
... | [((5105, 5121), 'numpy.array', 'numpy.array', (['rts'], {}), '(rts)\n', (5116, 5121), False, 'import numpy\n'), ((5160, 5191), 'numpy.array', 'numpy.array', (['rts'], {'dtype': 'complex'}), '(rts, dtype=complex)\n', (5171, 5191), False, 'import numpy\n'), ((5714, 5736), 'numpy.angle', 'angle', (['centered_points'], {}), '(centered_points)\n', (5719, 5736), False, 'from numpy import double, complex, floor, angle\n'), ((5758, 5795), 'numpy.lexsort', 'numpy.lexsort', (['(distances, arguments)'], {}), '((distances, arguments))\n', (5771, 5795), False, 'import numpy\n'), ((6778, 6788), 'numpy.complex', 'complex', (['x'], {}), '(x)\n', (6785, 6788), False, 'from numpy import double, complex, floor, angle\n'), ((8408, 8440), 'numpy.array', 'numpy.array', (['radii'], {'dtype': 'double'}), '(radii, dtype=double)\n', (8419, 8440), False, 'import numpy\n'), ((9433, 9444), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (9440, 9444), False, 'from numpy import double, complex, floor, angle\n'), ((11195, 11206), 'numpy.complex', 'complex', (['z0'], {}), '(z0)\n', (11202, 11206), False, 'from numpy import double, complex, floor, angle\n'), ((11220, 11231), 'numpy.complex', 'complex', (['z1'], {}), '(z1)\n', (11227, 11231), False, 'from numpy import double, complex, floor, angle\n'), ((11245, 11256), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (11252, 11256), False, 'from numpy import double, complex, floor, angle\n'), ((13381, 13392), 'numpy.complex', 'complex', (['z0'], {}), '(z0)\n', (13388, 13392), False, 'from numpy import double, complex, floor, angle\n'), ((13406, 13417), 'numpy.complex', 'complex', (['z1'], {}), '(z1)\n', (13413, 13417), False, 'from numpy import double, complex, floor, angle\n'), ((13430, 13440), 'numpy.complex', 'complex', (['b'], {}), '(b)\n', (13437, 13440), False, 'from numpy import double, complex, floor, angle\n'), ((13453, 13462), 'numpy.double', 'double', (['R'], {}), '(R)\n', (13459, 13462), False, 'from numpy import double, complex, floor, angle\n'), ((13737, 13763), 'numpy.poly1d', 'numpy.poly1d', (['[p2, p1, p0]'], {}), '([p2, p1, p0])\n', (13749, 13763), False, 'import numpy\n'), ((13776, 13790), 'numpy.roots', 'numpy.roots', (['p'], {}), '(p)\n', (13787, 13790), False, 'import numpy\n'), ((20380, 20391), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (20387, 20391), False, 'from numpy import double, complex, floor, angle\n'), ((20437, 20450), 'numpy.angle', 'angle', (['(z - bi)'], {}), '(z - bi)\n', (20442, 20450), False, 'from numpy import double, complex, floor, angle\n'), ((21777, 21803), 'numpy.abs', 'numpy.abs', (['self.base_point'], {}), '(self.base_point)\n', (21786, 21803), False, 'import numpy\n'), ((22109, 22141), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['self.base_point', '(-R)'], {}), '(self.base_point, -R)\n', (22120, 22141), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((22500, 22532), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['(-R)', 'self.base_point'], {}), '(-R, self.base_point)\n', (22511, 22532), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((23169, 23193), 'numpy.complex', 'complex', (['self.base_point'], {}), '(self.base_point)\n', (23176, 23193), False, 'from numpy import double, complex, floor, angle\n'), ((23206, 23258), 'numpy.array', 'numpy.array', (['self.discriminant_points'], {'dtype': 'complex'}), '(self.discriminant_points, dtype=complex)\n', (23217, 23258), False, 'import numpy\n'), ((23365, 23407), 'sage.all.scatter_plot', 'scatter_plot', (['pts'], {'facecolor': '"""red"""'}), "(pts, facecolor='red', **kwds)\n", (23377, 23407), False, 'from sage.all import infinity, QQbar, Graphics, scatter_plot\n'), ((23457, 23501), 'sage.all.scatter_plot', 'scatter_plot', (['pts'], {'facecolor': '"""black"""'}), "(pts, facecolor='black', **kwds)\n", (23469, 23501), False, 'from sage.all import infinity, QQbar, Graphics, scatter_plot\n'), ((24470, 24481), 'numpy.complex', 'complex', (['w0'], {}), '(w0)\n', (24477, 24481), False, 'from numpy import double, complex, floor, angle\n'), ((24495, 24506), 'numpy.complex', 'complex', (['w1'], {}), '(w1)\n', (24502, 24506), False, 'from numpy import double, complex, floor, angle\n'), ((24519, 24529), 'numpy.complex', 'complex', (['b'], {}), '(b)\n', (24526, 24529), False, 'from numpy import double, complex, floor, angle\n'), ((24542, 24551), 'numpy.double', 'double', (['R'], {}), '(R)\n', (24548, 24551), False, 'from numpy import double, complex, floor, angle\n'), ((25407, 25427), 'numpy.angle', 'numpy.angle', (['(w1 - w0)'], {}), '(w1 - w0)\n', (25418, 25427), False, 'import numpy\n'), ((25445, 25464), 'numpy.angle', 'numpy.angle', (['(b - w0)'], {}), '(b - w0)\n', (25456, 25464), False, 'import numpy\n'), ((26275, 26294), 'numpy.angle', 'numpy.angle', (['(w0 - b)'], {}), '(w0 - b)\n', (26286, 26294), False, 'import numpy\n'), ((26312, 26331), 'numpy.angle', 'numpy.angle', (['(w1 - b)'], {}), '(w1 - b)\n', (26323, 26331), False, 'import numpy\n'), ((27471, 27503), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['R', 'b', 'theta0', 'dtheta'], {}), '(R, b, theta0, dtheta)\n', (27481, 27503), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((7833, 7863), 'numpy.abs', 'numpy.abs', (['(self.base_point - b)'], {}), '(self.base_point - b)\n', (7842, 7863), False, 'import numpy\n'), ((7916, 7951), 'numpy.array', 'numpy.array', (['[radius]'], {'dtype': 'double'}), '([radius], dtype=double)\n', (7927, 7951), False, 'import numpy\n'), ((8628, 8660), 'numpy.array', 'numpy.array', (['dists'], {'dtype': 'double'}), '(dists, dtype=double)\n', (8639, 8660), False, 'import numpy\n'), ((13113, 13123), 'numpy.complex', 'complex', (['b'], {}), '(b)\n', (13120, 13123), False, 'from numpy import double, complex, floor, angle\n'), ((15324, 15335), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (15331, 15335), False, 'from numpy import double, complex, floor, angle\n'), ((17164, 17183), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['z0', 'z1'], {}), '(z0, z1)\n', (17175, 17183), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((17279, 17300), 'abelfunctions.complex_path.ComplexPath', 'ComplexPath', (['segments'], {}), '(segments)\n', (17290, 17300), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((19258, 19277), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['z0', 'z1'], {}), '(z0, z1)\n', (19269, 19277), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((19373, 19394), 'abelfunctions.complex_path.ComplexPath', 'ComplexPath', (['segments'], {}), '(segments)\n', (19384, 19394), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((20522, 20555), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['Ri', 'bi', 'theta', 'dtheta'], {}), '(Ri, bi, theta, dtheta)\n', (20532, 20555), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((20577, 20619), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['Ri', 'bi', '(theta + dtheta)', 'dtheta'], {}), '(Ri, bi, theta + dtheta, dtheta)\n', (20587, 20619), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((22365, 22399), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['R', '(0)', 'numpy.pi', 'dtheta'], {}), '(R, 0, numpy.pi, dtheta)\n', (22375, 22399), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((22420, 22447), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['R', '(0)', '(0)', 'dtheta'], {}), '(R, 0, 0, dtheta)\n', (22430, 22447), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((22878, 22910), 'abelfunctions.complex_path.ComplexPath', 'ComplexPath', (['path.segments[1:-1]'], {}), '(path.segments[1:-1])\n', (22889, 22910), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((25530, 25549), 'numpy.angle', 'numpy.angle', (['(w0 - b)'], {}), '(w0 - b)\n', (25541, 25549), False, 'import numpy\n'), ((25693, 25725), 'abelfunctions.complex_path.ComplexArc', 'ComplexArc', (['R', 'b', 'theta0', 'dtheta'], {}), '(R, b, theta0, dtheta)\n', (25703, 25725), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((5388, 5396), 'numpy.floor', 'floor', (['a'], {}), '(a)\n', (5393, 5396), False, 'from numpy import double, complex, floor, angle\n'), ((11288, 11302), 'numpy.angle', 'angle', (['(z1 - z0)'], {}), '(z1 - z0)\n', (11293, 11302), False, 'from numpy import double, complex, floor, angle\n'), ((11303, 11317), 'numpy.angle', 'angle', (['(bi - z0)'], {}), '(bi - z0)\n', (11308, 11317), False, 'from numpy import double, complex, floor, angle\n'), ((15923, 15969), 'numpy.argwhere', 'numpy.argwhere', (['(self.discriminant_points == bi)'], {}), '(self.discriminant_points == bi)\n', (15937, 15969), False, 'import numpy\n'), ((21879, 21892), 'numpy.abs', 'numpy.abs', (['bi'], {}), '(bi)\n', (21888, 21892), False, 'import numpy\n'), ((14943, 14957), 'numpy.complex', 'complex', (['point'], {}), '(point)\n', (14950, 14957), False, 'from numpy import double, complex, floor, angle\n'), ((14960, 14971), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (14967, 14971), False, 'from numpy import double, complex, floor, angle\n'), ((16029, 16083), 'numpy.argwhere', 'numpy.argwhere', (['(self.discriminant_points_complex == bj)'], {}), '(self.discriminant_points_complex == bj)\n', (16043, 16083), False, 'import numpy\n'), ((16900, 16919), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['z0', 'w0'], {}), '(z0, w0)\n', (16911, 16919), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((18968, 18987), 'abelfunctions.complex_path.ComplexLine', 'ComplexLine', (['z0', 'w0'], {}), '(z0, w0)\n', (18979, 18987), False, 'from abelfunctions.complex_path import ComplexLine, ComplexArc, ComplexPath\n'), ((5291, 5302), 'numpy.complex', 'complex', (['bi'], {}), '(bi)\n', (5298, 5302), False, 'from numpy import double, complex, floor, angle\n')] |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import tempfile
import six
import os
import json
import numpy as np
from pyspark import RDD
from bigdl.nn.criterion import Criterion
from bigdl.nn.layer import Model as BModel
from bigdl.nn.layer import Layer
from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, \
JavaValue, get_node_and_core_number
from zoo.common import Sample, JTensor
from zoo.feature.image import ImageSet
from zoo.pipeline.api.keras.engine.topology import ZooKerasLayer, KerasNet
from bigdl.optim.optimizer import Optimizer, EveryEpoch
from bigdl.optim.optimizer import MaxEpoch
if sys.version >= '3':
long = int
unicode = str
class GraphNet(BModel):
def __init__(self, input, output, jvalue=None, bigdl_type="float", **kwargs):
super(BModel, self).__init__(jvalue,
to_list(input),
to_list(output),
bigdl_type,
**kwargs)
def flattened_layers(self, include_container=False):
jlayers = callBigDlFunc(self.bigdl_type, "getFlattenSubModules", self, include_container)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
@property
def layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getSubModules", self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = GraphNet([], [], jvalue=jvalue, bigdl_type=bigdl_type)
model.value = jvalue
return model
def new_graph(self, outputs):
"""
Specify a list of nodes as output and return a new graph using the existing nodes
:param outputs: A list of nodes specified
:return: A graph model
"""
value = callBigDlFunc(self.bigdl_type, "newGraph", self.value, outputs)
return self.from_jvalue(value, self.bigdl_type)
def freeze_up_to(self, names):
"""
Freeze the model from the bottom up to the layers specified by names (inclusive).
This is useful for finetuning a model
:param names: A list of module names to be Freezed
:return: current graph model
"""
callBigDlFunc(self.bigdl_type, "freezeUpTo", self.value, names)
def unfreeze(self, names=None):
"""
"unfreeze" module, i.e. make the module parameters(weight/bias, if exists)
to be trained(updated) in training process.
If 'names' is a non-empty list, unfreeze layers that match given names
:param names: list of module names to be unFreezed. Default is None.
:return: current graph model
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
def to_keras(self):
value = callBigDlFunc(self.bigdl_type, "netToKeras", self.value)
return ZooKerasLayer.of(value, self.bigdl_type)
class Net:
@staticmethod
def load_bigdl(model_path, weight_path=None, bigdl_type="float"):
"""
Load a pre-trained BigDL model.
:param model_path: The path to the pre-trained model.
:param weight_path: The path to the weights of the pre-trained model. Default is None.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "netLoadBigDL", model_path, weight_path)
return GraphNet.from_jvalue(jmodel)
@staticmethod
def load(model_path, weight_path=None, bigdl_type="float"):
"""
Load an existing Analytics Zoo model defined in Keras-style(with weights).
:param model_path: The path to load the saved model.
Local file system, HDFS and Amazon S3 are supported.
HDFS path should be like 'hdfs://[host]:[port]/xxx'.
Amazon S3 path should be like 's3a://bucket/xxx'.
:param weight_path: The path for pre-trained weights if any. Default is None.
:return: An Analytics Zoo model.
"""
jmodel = callBigDlFunc(bigdl_type, "netLoad", model_path, weight_path)
return KerasNet.of(jmodel, bigdl_type)
@staticmethod
def load_torch(path, bigdl_type="float"):
"""
Load a pre-trained Torch model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "netLoadTorch", path)
return GraphNet.from_jvalue(jmodel, bigdl_type)
@staticmethod
def load_tf(path, inputs=None, outputs=None, byte_order="little_endian",
bin_file=None, bigdl_type="float"):
"""
Load a pre-trained TensorFlow model.
:param path: The path containing the pre-trained model.
OR alternatively, the exported folder path from `export_tf`.
In this case, path should contain 'frozen_inference_graph.pb' and
'graph_meta.json'. You don't need to specify inputs and outputs.
:param inputs: The input nodes of this graph.
:param outputs: The output nodes of this graph.
:param byte_order: Byte_order of the file, `little_endian` or `big_endian`.
:param bin_file: Optional bin file produced by bigdl dump_model util function
to store the weights. Default is None.
:return: A pre-trained model.
"""
if not inputs and not outputs: # load_tf from exported folder
if not os.path.isdir(path):
raise ValueError("load_tf from exported folder requires path to be a folder")
jmodel = callBigDlFunc(bigdl_type, "netLoadTF", path)
else:
jmodel = callBigDlFunc(bigdl_type, "netLoadTF", path, inputs, outputs,
byte_order, bin_file)
return GraphNet.from_jvalue(jmodel, bigdl_type)
@staticmethod
def load_caffe(def_path, model_path, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param def_path: The path containing the caffe model definition.
:param model_path: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "netLoadCaffe", def_path, model_path)
return GraphNet.from_jvalue(jmodel, bigdl_type)
@staticmethod
def load_keras(json_path=None, hdf5_path=None, by_name=False):
"""
Load a pre-trained Keras model.
:param json_path: The json path containing the keras model definition. Default is None.
:param hdf5_path: The HDF5 path containing the pre-trained keras model weights
with or without the model architecture. Default is None.
:param by_name: by default the architecture should be unchanged.
If set as True, only layers with the same name will be loaded.
:return: A BigDL model.
"""
return BModel.load_keras(json_path, hdf5_path, by_name)
def to_sample_rdd(x, y, sc, num_slices=None):
"""
Conver x and y into RDD[Sample]
:param sc: SparkContext
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
x_rdd = sc.parallelize(x, num_slices)
y_rdd = sc.parallelize(y, num_slices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
class TFNet(Layer):
def __init__(self, path, input_names=None, output_names=None, bigdl_type="float"):
if input_names is None and output_names is None:
super(TFNet, self).__init__(None, bigdl_type,
path)
else:
if isinstance(input_names, six.string_types):
input_names = [input_names]
if isinstance(output_names, six.string_types):
output_names = [output_names]
super(TFNet, self).__init__(None, bigdl_type,
path,
input_names,
output_names)
@staticmethod
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False
def predict(self, x, batch_per_thread=-1, distributed=True):
"""
Use a model to do prediction.
"""
if isinstance(x, ImageSet):
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
x,
batch_per_thread)
return ImageSet(results)
if distributed:
if isinstance(x, np.ndarray):
data_rdd = to_sample_rdd(x, np.zeros([x.shape[0]]), get_spark_context())
elif isinstance(x, RDD):
data_rdd = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
data_rdd,
batch_per_thread)
return results.map(lambda result: Layer.convert_output(result))
else:
if isinstance(x, np.ndarray) or isinstance(x, list):
results = callBigDlFunc(self.bigdl_type, "zooPredict",
self.value,
self._to_jtensors(x),
batch_per_thread)
return [Layer.convert_output(result) for result in results]
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
@staticmethod
def from_export_folder(folder):
if not os.path.isdir(folder):
raise ValueError(folder + " does not exist")
return TFNet(folder)
@staticmethod
def from_session(sess, inputs, outputs,
generate_backward=False, allow_non_differentiable_input=True):
from zoo.util.tf import export_tf
temp = tempfile.mkdtemp()
try:
export_tf(sess, temp, inputs, outputs,
generate_backward, allow_non_differentiable_input)
net = TFNet.from_export_folder(temp)
finally:
import shutil
shutil.rmtree(temp)
return net
def _find_placeholders(grads):
'''
find all the tensors that are used for computing grads and has been
computed during forward
:param grads:
:param forward_ops:
:return:
'''
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
queue = queue.Queue()
for grad in grads:
queue.put(grad)
placeholders = set()
visited = set()
while not queue.empty():
tensor = queue.get()
# this is necessary, because input may not be differentiable
if tensor is None:
continue
else:
visited.add(tensor.name)
if tensor.op.type.startswith("Placeholder"):
placeholders.add(tensor)
continue
for input_tensor in tensor.op.inputs:
# this is necessary because there may be a cycle in the graph such as tf.while_loop
if input_tensor.name not in visited:
queue.put(input_tensor)
return list(placeholders)
class IdentityCriterion(Criterion):
def __init__(self):
super(IdentityCriterion, self).__init__(None, "float")
class TFTrainingHelper(Layer):
def __init__(self, path):
super(TFTrainingHelper, self).__init__(None, "float", path)
class TFValidationMethod(JavaValue):
def __init__(self, val_method, output_length, target_length):
JavaValue.__init__(self, None, "float",
val_method, output_length, target_length)
class TFOptimizer:
def __init__(self, loss, optim_method, sess=None,
val_outputs=None, val_labels=None, val_method=None):
import tensorflow as tf
from zoo.util.tf import export_tf
'''
TFOptimizer is used for distributed training of tensorflow
on Spark/BigDL.
:param loss: The loss tensor of the tensorflow model, should be a scalar
:param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam
:param sess: the current tensorflow Session, if you want to used a pre-trained model, you
should use the Session to load the pre-trained variables and pass it to TFOptimizer.
'''
self.optim_method = optim_method
if sess is None:
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
else:
self.sess = sess
grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)
variables = []
grads = []
from zoo.util.tf import process_grad
for (grad, var) in grads_vars:
variables.append(var)
grad = process_grad(grad)
grads.append(grad)
self.export_dir = tempfile.mkdtemp()
all_required_inputs = _find_placeholders([loss])
self.dataset = tf.get_collection(all_required_inputs[0].name)[0]
if self.dataset.batch_size <= 0:
raise ValueError("You should set batch_size instead of batch_per_thread for training")
self.inputs = self.dataset.tensors
_check_the_same(all_required_inputs, self.inputs)
if val_outputs is not None and val_labels is not None:
outputs = val_outputs + val_labels + [loss]
else:
outputs = [loss]
export_tf(self.sess, self.export_dir,
inputs=self.inputs,
outputs=grads + outputs)
variable_names = [v.name for v in variables]
grad_names = [g.name for g in grads]
output_names = [o.name for o in outputs]
meta = {
"input_names": [i.name for i in self.inputs],
"output_names": output_names,
"variables": variable_names,
"grad_variables": grad_names
}
with open(os.path.join(self.export_dir, "training_meta.json"), "w") as f:
f.write(json.dumps(meta))
self.training_helper_layer = TFTrainingHelper(self.export_dir)
self.variable_placeholders = []
assigns = []
for v in variables:
p = tf.placeholder(dtype=tf.float32, shape=v.shape)
a = tf.assign(v, p)
self.variable_placeholders.append(p)
assigns.append(a)
self.assign = tf.group(*assigns)
data = self.dataset.rdd
batch_size = self.dataset.batch_size
sample_rdd = data.map(lambda t: Sample.from_ndarray(t, [np.array([0.0])]))
self.optimizer = Optimizer.create(self.training_helper_layer,
sample_rdd,
IdentityCriterion(),
batch_size=batch_size,
optim_method=self.optim_method)
if val_outputs is not None and val_labels is not None:
val_sample_rdd = self.dataset.val_rdd\
.map(lambda t: Sample.from_ndarray(t, [np.array([0.0])]))
val_method = TFValidationMethod(val_method, len(val_outputs), len(val_labels))
self.optimizer.set_validation(self.dataset.batch_size,
val_sample_rdd,
EveryEpoch(),
val_method)
def set_train_summary(self, summary):
self.optimizer.set_train_summary(summary)
def set_val_summary(self, summary):
self.optimizer.set_val_summary(summary)
def optimize(self, end_trigger=None):
if end_trigger is None:
end_trigger = MaxEpoch(1)
self.optimizer.set_end_when(end_trigger)
self.optimizer.optimize()
variables = self.training_helper_layer.get_weights()
feed_dict = dict(zip(self.variable_placeholders, variables))
self.sess.run(self.assign, feed_dict=feed_dict)
class TFDataset:
def __init__(self, rdd, names, shapes, types, batch_size,
batch_per_thread, hard_code_batch_size=False, val_rdd=None):
import tensorflow as tf
'''
TFDatasets represents a distributed collection of elements to be feed into
Tensorflow graph. TFDatasets can be created using a RDD and each of its records
is a list of numpy.ndarray representing the tensors to be feed into tensorflow
graph on each iteration. TFDatasets must be used with TFOptimizer or TFPredictor.
:param rdd: a rdd of list of numpy.ndarray each representing a tensor to feed into
tensorflow graph on each iteration
:param names: the names of the resulting tensors, should be a list of str
:param shapes: the shapes of the resulting tensors, should be a list of list of int
:param types: the types of the result tensors, should be a list of tf.dtype
:param batch_size: the batch size, used for training, should be a multiple of
total core num
:param batch_per_thread: the batch size for each thread, used for inference
:param hard_code_batch_size: whether to hard code the batch_size into tensorflow graph,
if True, the static size of the first dimension of the resulting tensors is
batch_size/total_core_num (training) or batch_per_thread for inference; if False,
it is None.
'''
if batch_size > 0 and batch_per_thread > 0:
raise ValueError("bath_size and batch_per_thread should not be set simultaneously")
node_num, core_num = get_node_and_core_number()
self.total_core_num = node_num * core_num
if batch_size > 0:
if batch_size % self.total_core_num != 0:
raise ValueError("batch_size should be a multiple " +
"of total core number, but got batch_size: " +
"%s where total core number is %s" % (batch_size,
self.total_core_num))
if batch_size <= 0 and batch_per_thread <= 0:
batch_per_thread = 1
batch_size = self.total_core_num
self.batch_size = batch_size
self.batch_per_thread = batch_per_thread
if not hard_code_batch_size:
self.tensors = [tf.placeholder(name=names[i],
dtype=types[i],
shape=[None] + shapes[i])
for i in range(len(names))]
else:
if batch_per_thread > 0:
self.tensors = [tf.placeholder(name=names[i],
dtype=types[i],
shape=[batch_per_thread] + shapes[i])
for i in range(len(names))]
else:
self.tensors = [tf.placeholder(name=names[i],
dtype=types[i],
shape=[batch_size / self.total_core_num] + shapes[i])
for i in range(len(names))]
self.val_rdd = val_rdd
self.rdd = rdd.map(lambda arr: arr[:len(names)])
self.input_names = names
for i in range(len(self.tensors)):
tf.add_to_collection(self.tensors[i].name, self)
@staticmethod
def from_rdd(rdd, names=None, shapes=None, types=None,
batch_size=-1, batch_per_thread=-1,
hard_code_batch_size=False, val_rdd=None):
import tensorflow as tf
if not names:
names = ["features", "labels"]
if not shapes:
shapes = [None] * len(names)
if not types:
types = [tf.float32] * len(names)
return TFDataset(rdd, names, shapes, types,
batch_size, batch_per_thread,
hard_code_batch_size, val_rdd)
def _check_the_same(all_required_inputs, inputs_in_datasets):
inputs_not_in_dataset = [i for i in all_required_inputs if i not in inputs_in_datasets]
if inputs_not_in_dataset:
raise ValueError("You should not use any placeholder that are not defined in dataset, " +
"found %s" % inputs_not_in_dataset)
if len(inputs_in_datasets) != len(all_required_inputs):
inputs_not_require_by_loss = [i for i in inputs_in_datasets if i not in all_required_inputs]
raise ValueError("You should use all the placeholders that are defined in dataset, " +
"%s are not used" % inputs_not_require_by_loss)
class TFPredictor:
def __init__(self, sess, outputs):
import tensorflow as tf
'''
TFPredictor takes a list of tensorflow tensors as the model outputs and
feed all the elements in TFDatasets to produce those outputs and returns
a Spark RDD with each of its elements representing the model prediction
for the corresponding input elements.
:param sess: the current tensorflow Session, you should first use this session
to load the trained variables then pass into TFPredictor
:param outputs: the output tensors of the tensorflow model
'''
self.sess = sess
all_required_inputs = _find_placeholders(outputs)
self.dataset = tf.get_collection(all_required_inputs[0].name)[0]
self.inputs = self.dataset.tensors
_check_the_same(all_required_inputs, self.inputs)
self.tfnet = TFNet.from_session(sess, self.inputs, outputs)
if self.dataset.batch_per_thread <= 0:
raise ValueError("You should set batch_per_thread on TFDataset" +
"instead of batch_size for prediction")
def predict(self):
rdd = self.dataset.rdd
sample_rdd = rdd.map(lambda x: Sample.from_ndarray(x, np.array([0.0])))
return self.tfnet.predict(sample_rdd, self.dataset.batch_per_thread)
| [
"bigdl.util.common.callBigDlFunc",
"tensorflow.get_collection",
"json.dumps",
"tensorflow.assign",
"shutil.rmtree",
"os.path.join",
"bigdl.nn.layer.Layer.convert_output",
"zoo.util.tf.process_grad",
"queue.get",
"tensorflow.placeholder",
"tempfile.mkdtemp",
"bigdl.nn.layer.Model.load_keras",
... | [((12279, 12292), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (12290, 12292), True, 'import queue as queue\n'), ((1670, 1749), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""getFlattenSubModules"""', 'self', 'include_container'], {}), "(self.bigdl_type, 'getFlattenSubModules', self, include_container)\n", (1683, 1749), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((1885, 1938), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""getSubModules"""', 'self'], {}), "(self.bigdl_type, 'getSubModules', self)\n", (1898, 1938), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((2621, 2684), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""newGraph"""', 'self.value', 'outputs'], {}), "(self.bigdl_type, 'newGraph', self.value, outputs)\n", (2634, 2684), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((3042, 3105), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""freezeUpTo"""', 'self.value', 'names'], {}), "(self.bigdl_type, 'freezeUpTo', self.value, names)\n", (3055, 3105), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((3504, 3565), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""unFreeze"""', 'self.value', 'names'], {}), "(self.bigdl_type, 'unFreeze', self.value, names)\n", (3517, 3565), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((3607, 3663), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""netToKeras"""', 'self.value'], {}), "(self.bigdl_type, 'netToKeras', self.value)\n", (3620, 3663), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((3679, 3719), 'zoo.pipeline.api.keras.engine.topology.ZooKerasLayer.of', 'ZooKerasLayer.of', (['value', 'self.bigdl_type'], {}), '(value, self.bigdl_type)\n', (3695, 3719), False, 'from zoo.pipeline.api.keras.engine.topology import ZooKerasLayer, KerasNet\n'), ((4099, 4165), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoadBigDL"""', 'model_path', 'weight_path'], {}), "(bigdl_type, 'netLoadBigDL', model_path, weight_path)\n", (4112, 4165), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((4840, 4901), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoad"""', 'model_path', 'weight_path'], {}), "(bigdl_type, 'netLoad', model_path, weight_path)\n", (4853, 4901), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((4917, 4948), 'zoo.pipeline.api.keras.engine.topology.KerasNet.of', 'KerasNet.of', (['jmodel', 'bigdl_type'], {}), '(jmodel, bigdl_type)\n', (4928, 4948), False, 'from zoo.pipeline.api.keras.engine.topology import ZooKerasLayer, KerasNet\n'), ((5198, 5245), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoadTorch"""', 'path'], {}), "(bigdl_type, 'netLoadTorch', path)\n", (5211, 5245), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((7051, 7114), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoadCaffe"""', 'def_path', 'model_path'], {}), "(bigdl_type, 'netLoadCaffe', def_path, model_path)\n", (7064, 7114), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((7793, 7841), 'bigdl.nn.layer.Model.load_keras', 'BModel.load_keras', (['json_path', 'hdf5_path', 'by_name'], {}), '(json_path, hdf5_path, by_name)\n', (7810, 7841), True, 'from bigdl.nn.layer import Model as BModel\n'), ((11628, 11646), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11644, 11646), False, 'import tempfile\n'), ((12324, 12339), 'queue.put', 'queue.put', (['grad'], {}), '(grad)\n', (12333, 12339), True, 'import queue as queue\n'), ((12400, 12413), 'queue.empty', 'queue.empty', ([], {}), '()\n', (12411, 12413), True, 'import queue as queue\n'), ((12432, 12443), 'queue.get', 'queue.get', ([], {}), '()\n', (12441, 12443), True, 'import queue as queue\n'), ((13383, 13468), 'bigdl.util.common.JavaValue.__init__', 'JavaValue.__init__', (['self', 'None', '"""float"""', 'val_method', 'output_length', 'target_length'], {}), "(self, None, 'float', val_method, output_length,\n target_length)\n", (13401, 13468), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((14744, 14762), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (14760, 14762), False, 'import tempfile\n'), ((15307, 15393), 'zoo.util.tf.export_tf', 'export_tf', (['self.sess', 'self.export_dir'], {'inputs': 'self.inputs', 'outputs': '(grads + outputs)'}), '(self.sess, self.export_dir, inputs=self.inputs, outputs=grads +\n outputs)\n', (15316, 15393), False, 'from zoo.util.tf import export_tf\n'), ((16264, 16282), 'tensorflow.group', 'tf.group', (['*assigns'], {}), '(*assigns)\n', (16272, 16282), True, 'import tensorflow as tf\n'), ((19472, 19498), 'bigdl.util.common.get_node_and_core_number', 'get_node_and_core_number', ([], {}), '()\n', (19496, 19498), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((1428, 1442), 'bigdl.util.common.to_list', 'to_list', (['input'], {}), '(input)\n', (1435, 1442), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((1481, 1496), 'bigdl.util.common.to_list', 'to_list', (['output'], {}), '(output)\n', (1488, 1496), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((1768, 1784), 'bigdl.nn.layer.Layer.of', 'Layer.of', (['jlayer'], {}), '(jlayer)\n', (1776, 1784), False, 'from bigdl.nn.layer import Layer\n'), ((1957, 1973), 'bigdl.nn.layer.Layer.of', 'Layer.of', (['jlayer'], {}), '(jlayer)\n', (1965, 1973), False, 'from bigdl.nn.layer import Layer\n'), ((6446, 6490), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoadTF"""', 'path'], {}), "(bigdl_type, 'netLoadTF', path)\n", (6459, 6490), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((6526, 6613), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['bigdl_type', '"""netLoadTF"""', 'path', 'inputs', 'outputs', 'byte_order', 'bin_file'], {}), "(bigdl_type, 'netLoadTF', path, inputs, outputs, byte_order,\n bin_file)\n", (6539, 6613), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((8258, 8295), 'zoo.common.Sample.from_ndarray', 'Sample.from_ndarray', (['item[0]', 'item[1]'], {}), '(item[0], item[1])\n', (8277, 8295), False, 'from zoo.common import Sample, JTensor\n'), ((9912, 9989), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""zooPredict"""', 'self.value', 'x', 'batch_per_thread'], {}), "(self.bigdl_type, 'zooPredict', self.value, x, batch_per_thread)\n", (9925, 9989), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((10117, 10134), 'zoo.feature.image.ImageSet', 'ImageSet', (['results'], {}), '(results)\n', (10125, 10134), False, 'from zoo.feature.image import ImageSet\n'), ((10478, 10566), 'bigdl.util.common.callBigDlFunc', 'callBigDlFunc', (['self.bigdl_type', '"""zooPredict"""', 'self.value', 'data_rdd', 'batch_per_thread'], {}), "(self.bigdl_type, 'zooPredict', self.value, data_rdd,\n batch_per_thread)\n", (10491, 10566), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((11315, 11336), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (11328, 11336), False, 'import os\n'), ((11672, 11765), 'zoo.util.tf.export_tf', 'export_tf', (['sess', 'temp', 'inputs', 'outputs', 'generate_backward', 'allow_non_differentiable_input'], {}), '(sess, temp, inputs, outputs, generate_backward,\n allow_non_differentiable_input)\n', (11681, 11765), False, 'from zoo.util.tf import export_tf\n'), ((11888, 11907), 'shutil.rmtree', 'shutil.rmtree', (['temp'], {}), '(temp)\n', (11901, 11907), False, 'import shutil\n'), ((14290, 14302), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (14300, 14302), True, 'import tensorflow as tf\n'), ((14668, 14686), 'zoo.util.tf.process_grad', 'process_grad', (['grad'], {}), '(grad)\n', (14680, 14686), False, 'from zoo.util.tf import process_grad\n'), ((14843, 14889), 'tensorflow.get_collection', 'tf.get_collection', (['all_required_inputs[0].name'], {}), '(all_required_inputs[0].name)\n', (14860, 14889), True, 'import tensorflow as tf\n'), ((16083, 16130), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'v.shape'}), '(dtype=tf.float32, shape=v.shape)\n', (16097, 16130), True, 'import tensorflow as tf\n'), ((16147, 16162), 'tensorflow.assign', 'tf.assign', (['v', 'p'], {}), '(v, p)\n', (16156, 16162), True, 'import tensorflow as tf\n'), ((17569, 17580), 'bigdl.optim.optimizer.MaxEpoch', 'MaxEpoch', (['(1)'], {}), '(1)\n', (17577, 17580), False, 'from bigdl.optim.optimizer import MaxEpoch\n'), ((21257, 21305), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['self.tensors[i].name', 'self'], {}), '(self.tensors[i].name, self)\n', (21277, 21305), True, 'import tensorflow as tf\n'), ((23294, 23340), 'tensorflow.get_collection', 'tf.get_collection', (['all_required_inputs[0].name'], {}), '(all_required_inputs[0].name)\n', (23311, 23340), True, 'import tensorflow as tf\n'), ((6310, 6329), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6323, 6329), False, 'import os\n'), ((9286, 9309), 'zoo.common.JTensor.from_ndarray', 'JTensor.from_ndarray', (['i'], {}), '(i)\n', (9306, 9309), False, 'from zoo.common import Sample, JTensor\n'), ((14329, 14362), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (14360, 14362), True, 'import tensorflow as tf\n'), ((14428, 14464), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0)'], {}), '(0)\n', (14461, 14464), True, 'import tensorflow as tf\n'), ((15803, 15854), 'os.path.join', 'os.path.join', (['self.export_dir', '"""training_meta.json"""'], {}), "(self.export_dir, 'training_meta.json')\n", (15815, 15854), False, 'import os\n'), ((15887, 15903), 'json.dumps', 'json.dumps', (['meta'], {}), '(meta)\n', (15897, 15903), False, 'import json\n'), ((17218, 17230), 'bigdl.optim.optimizer.EveryEpoch', 'EveryEpoch', ([], {}), '()\n', (17228, 17230), False, 'from bigdl.optim.optimizer import Optimizer, EveryEpoch\n'), ((20240, 20311), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': 'names[i]', 'dtype': 'types[i]', 'shape': '([None] + shapes[i])'}), '(name=names[i], dtype=types[i], shape=[None] + shapes[i])\n', (20254, 20311), True, 'import tensorflow as tf\n'), ((10245, 10267), 'numpy.zeros', 'np.zeros', (['[x.shape[0]]'], {}), '([x.shape[0]])\n', (10253, 10267), True, 'import numpy as np\n'), ((10269, 10288), 'bigdl.util.common.get_spark_context', 'get_spark_context', ([], {}), '()\n', (10286, 10288), False, 'from bigdl.util.common import to_list, callBigDlFunc, get_spark_context, JavaValue, get_node_and_core_number\n'), ((10717, 10745), 'bigdl.nn.layer.Layer.convert_output', 'Layer.convert_output', (['result'], {}), '(result)\n', (10737, 10745), False, 'from bigdl.nn.layer import Layer\n'), ((11093, 11121), 'bigdl.nn.layer.Layer.convert_output', 'Layer.convert_output', (['result'], {}), '(result)\n', (11113, 11121), False, 'from bigdl.nn.layer import Layer\n'), ((12958, 12981), 'queue.put', 'queue.put', (['input_tensor'], {}), '(input_tensor)\n', (12967, 12981), True, 'import queue as queue\n'), ((20537, 20624), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': 'names[i]', 'dtype': 'types[i]', 'shape': '([batch_per_thread] + shapes[i])'}), '(name=names[i], dtype=types[i], shape=[batch_per_thread] +\n shapes[i])\n', (20551, 20624), True, 'import tensorflow as tf\n'), ((20825, 20929), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': 'names[i]', 'dtype': 'types[i]', 'shape': '([batch_size / self.total_core_num] + shapes[i])'}), '(name=names[i], dtype=types[i], shape=[batch_size / self.\n total_core_num] + shapes[i])\n', (20839, 20929), True, 'import tensorflow as tf\n'), ((23824, 23839), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (23832, 23839), True, 'import numpy as np\n'), ((16425, 16440), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (16433, 16440), True, 'import numpy as np\n'), ((16941, 16956), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (16949, 16956), True, 'import numpy as np\n')] |
import pickle, os
import pylab as pl
import numpy as np
import math
PATH_TO_PLOTS = "./Plots/"
PATH_TO_DATA = "./Optimization_Stats/"
ALGORITHMS = ["AMPGO", "Anneal", "BSA", "Cuckoo", "qABC"]
FUNCTIONS = ["Adjiman", "Beale", "EggHolder", "GoldsteinPrice",
"Langermann", "Shubert01", "SixHumpCamel", "UrsemWaves",
"DropWave", "Whitley", "MieleCantrell", "Weierstrass",
"Rastrigin", "Katsuura", "Salomon", "Deceptive",
"Giunta", "Griewank", "Trigonometric02", "Paviani",
"Sargan", "ZeroSum", "Plateau", "Michalewicz",
"Mishra11", "OddSquare", "Qing", "Rosenbrock",
"Alpine01", "Bohachevsky", "Easom", "Levy03",
"MultiModal", "Penalty02", "Quintic", "Vincent",
"Ackley", "CosineMixture", "Wavy", "NeedleEye",
"Pathological", "Rana", "Schwefel22",
"DeflectedCorrugatedSpring", "Mishra02", "Penalty01",
"Exponential", "Ripple01", "Schwefel26", "SineEnvelope"]
X = list(range(5000))
# Set the line styles for each algorithm with pyplot (on,off) syntax
LINE_STYLES = {"qABC":(None,None),
"AMPGO":(6,2),
"Anneal":(1,1),
"BSA":(3,6),
"Cuckoo":(5,1,2,1,5,1)}
NAME_FIXER = {"mean":"Mean",
"min":"Absolute Min",
"max":"Absolute Max",
"stdev":"Standard Deviation"}
# Load all algorithm specific files
# ===========================================
algorithm_files = {}
for alg_name in ALGORITHMS:
algorithm_files[alg_name] = [line.strip().split("/")[-1] for line
in os.popen("ls %s*%s*"%
(PATH_TO_DATA, alg_name))
.readlines()]
# Create Plots of Min,Max,Stdev,Avg,Rank,Data Per Function
# ==================================================================
for fun_name in FUNCTIONS:
print("[%0.1f%%] Plotting results..."%(100.0 * FUNCTIONS.index(fun_name)
/len(FUNCTIONS)),end="\r")
stats = {alg_name:{} for alg_name in ALGORITHMS}
for alg_name in ALGORITHMS:
file_name = [fn for fn in algorithm_files[alg_name]
if fun_name in fn][0]
with open(PATH_TO_DATA+file_name, "rb") as f:
raw_data = pickle.load(f)
stats[alg_name].update(raw_data)
for stat in stats[ALGORITHMS[0]]:
if stat not in NAME_FIXER: continue
plot_name = fun_name + " " + NAME_FIXER[stat]
for alg_name in ALGORITHMS:
pl.plot(X, stats[alg_name][stat],
dashes=LINE_STYLES[alg_name], label=alg_name,
linewidth=1)
pl.title(plot_name)
pl.xlabel("Objective function exectuions")
pl.ylabel(NAME_FIXER[stat])
pl.legend(loc="best")
pl.savefig(PATH_TO_PLOTS+plot_name.replace(" ","_")+".svg")
pl.clf()
# Calculate average number of trials
# ============================================
lens = []
for alg_name in ALGORITHMS:
for file_name in algorithm_files[alg_name]:
print("[%0.1f%%] Loading %s..."%(100.0 * ALGORITHMS.index(alg_name)
/len(ALGORITHMS), file_name
),end="\r")
with open(PATH_TO_DATA+file_name, "rb") as f:
raw_data = pickle.load(f)
lens.append(sum(raw_data["hist"][0]))
print("Average length: %i"%(sum(lens) / len(lens)))
print("Max length: %i"%(max(lens)))
# Calculate 1% best algorithms
# ======================================
in_the_best = {alg_name:[] for alg_name in ALGORITHMS}
for fun_name in FUNCTIONS:
print("[%0.1f%%] Recording best.."%(FUNCTIONS.index(fun_name) /
len(FUNCTIONS)*100.0), end="\r")
abs_best = []
abs_min = float("inf")
abs_max = -float("inf")
# First calculate the "best" and the absolute min and max
for alg_name in ALGORITHMS:
# Get the file specific to this function for this algorithm
file_name = [fn for fn in algorithm_files[alg_name]
if fun_name in fn][0]
with open(PATH_TO_DATA+file_name, "rb") as f:
raw_data = pickle.load(f)
abs_min = min(abs_min, min(raw_data["min"]))
abs_max = max(abs_max, max(raw_data["max"]))
if len(abs_best) == 0:
abs_best = raw_data["min"]
else:
abs_best = [min(a,b) for a,b in
zip(abs_best, raw_data["min"])]
# Second calculate the actual percentage of the time the algorithm was best
one_percent = 0.01 * (abs_max - abs_min)
for alg_name in ALGORITHMS:
# Initialize the list of best counters for this algorithm
if len(in_the_best[alg_name]) == 0:
in_the_best[alg_name] = [0] * len(abs_best)
# Get the file specific to this function for this algorithm
file_name = [fn for fn in algorithm_files[alg_name]
if fun_name in fn][0]
# Calculate the iterations for which this algorithm was in the best
with open(PATH_TO_DATA+file_name, "rb") as f:
raw_data = pickle.load(f)
for i in range(len(abs_best)):
if abs(raw_data["min"][i] - abs_best[i]) < one_percent:
in_the_best[alg_name][i] += 1
# Load all comparison metric files
# ==========================================
loaded = 0
rank_average = {}
profile_tols = []
profile_average = {}
function_files = [line.strip().split("/")[-1] for line in
os.popen("ls %s*Compare*"%PATH_TO_DATA)]
# Tau for selecting data profile plot to create
T = [10**(-i) for i in (1,3,5,7)][0]
for file_name in function_files:
print("[%0.1f%%] Loading %s..."%(100.0 * function_files.index(file_name)
/len(function_files), file_name
),end="\r")
with open(PATH_TO_DATA+file_name, "rb") as f:
raw_data = pickle.load(f)
loaded += 1
for alg_name in ALGORITHMS:
# Uncomment rank 0 probabilities and comment
# "Plot Data Profiles" in order to switch between two,
# also flip the comments for the "stat" variable
# # Plot Rank 0 Probabilities
# # ===================================
# pl.plot(X, raw_data["rank0prob"][alg_name],
# dashes=LINE_STYLES[alg_name], label=alg_name,
# linewidth=1)
# stat = "Rank 0 Probability"
stat = "Data Profile T-%0.7f"%T
# Plot Data Profiles
# ============================
pl.plot(X, raw_data["dataprofile"][alg_name][T],
dashes=LINE_STYLES[alg_name], label=alg_name,
linewidth=1)
# Rank 0 Probability
# ============================
if rank_average.get(alg_name,None) == None:
rank_average[alg_name] = np.array(raw_data["rank0prob"][alg_name])
else:
# Dynamically average the rank 0 probabilities
rank_average[alg_name] += (
np.array(raw_data["rank0prob"][alg_name]) -
rank_average[alg_name]) / loaded
# Data Profiling
# ========================
if len(profile_tols) == 0:
profile_tols = list(raw_data["dataprofile"][alg_name].keys())
profile_tols.sort()
for tol in profile_tols:
if tol not in profile_average:
profile_average[tol] = {}
if alg_name not in profile_average[tol]:
profile_average[tol][alg_name] = np.array(
raw_data["dataprofile"][alg_name][tol])
else:
profile_average[tol][alg_name] += (
np.array(raw_data["dataprofile"][alg_name][tol]) -
profile_average[tol][alg_name]) / loaded
plot_name = file_name.split("_")[0] + " " + stat
pl.title(plot_name)
pl.xlabel("Objective function exectuions")
pl.ylabel(NAME_FIXER.get(stat,stat))
pl.legend(loc="best")
pl.savefig(PATH_TO_PLOTS+plot_name.replace(" ","_")+".svg")
pl.clf()
# Plot results
# ======================
pl.rcParams.update({'font.size': 11, 'font.family':'serif'})
# Plot the percentage best results
# ==========================================
for alg_name in ALGORITHMS:
in_the_best[alg_name] = np.array(in_the_best[alg_name])
in_the_best[alg_name] = in_the_best[alg_name] / float(len(FUNCTIONS))
pl.plot(X, in_the_best[alg_name] * 100.0,
dashes=LINE_STYLES[alg_name], label=alg_name,
linewidth=1)
pl.xlabel("Executions of objective function")
pl.ylabel("Probability of being able to achieve the best 1%")
pl.legend(loc="best")
pl.savefig(PATH_TO_PLOTS+"Average_Prob_Best.png")
# pl.show()
# Plot and save Rank 0 Probability results
# ==================================================
RP_BOX_LOC = (0.995,0.478)
DP_BOX_LOC = (0.995,0.46)
# Pick an arbitrary function and count how many iterations it goes out to
for alg_name in ALGORITHMS:
pl.plot(X, rank_average[alg_name] * 100,
dashes=LINE_STYLES[alg_name], label=alg_name,
linewidth=1)
# pl.title("Average Rank 0 Probability")
pl.xlabel("Executions of objective function")
pl.ylabel("Probability of being rank 0")
pl.ylim( (0.0, 60.0) )
pl.legend(bbox_to_anchor=RP_BOX_LOC)
pl.savefig(PATH_TO_PLOTS+"Average_Rank_0_Probability.png")
# pl.show()
# Plot and save Data Profile results
# ============================================
pl.rcParams.update({'font.size': 14, 'font.family':'serif'})
for tol in profile_tols:
pl.clf()
for alg_name in ALGORITHMS:
pl.plot(X, profile_average[tol][alg_name] * 100,
dashes=LINE_STYLES[alg_name], label=alg_name,
linewidth=2)
tol = round(math.log10(tol))
# pl.title("Average Data Profile T=10e%i"%tol)
pl.xlabel("Executions of objective function")
pl.ylabel("Percent successfully converged")
pl.legend(bbox_to_anchor=DP_BOX_LOC)
pl.savefig(PATH_TO_PLOTS+"Average_Data_Profile_T%i.png"%tol)
# pl.show()
| [
"pylab.title",
"pylab.rcParams.update",
"pylab.ylabel",
"os.popen",
"pylab.plot",
"pylab.savefig",
"math.log10",
"pickle.load",
"numpy.array",
"pylab.ylim",
"pylab.xlabel",
"pylab.clf",
"pylab.legend"
] | [((8594, 8655), 'pylab.rcParams.update', 'pl.rcParams.update', (["{'font.size': 11, 'font.family': 'serif'}"], {}), "({'font.size': 11, 'font.family': 'serif'})\n", (8612, 8655), True, 'import pylab as pl\n'), ((9037, 9082), 'pylab.xlabel', 'pl.xlabel', (['"""Executions of objective function"""'], {}), "('Executions of objective function')\n", (9046, 9082), True, 'import pylab as pl\n'), ((9083, 9144), 'pylab.ylabel', 'pl.ylabel', (['"""Probability of being able to achieve the best 1%"""'], {}), "('Probability of being able to achieve the best 1%')\n", (9092, 9144), True, 'import pylab as pl\n'), ((9145, 9166), 'pylab.legend', 'pl.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9154, 9166), True, 'import pylab as pl\n'), ((9167, 9218), 'pylab.savefig', 'pl.savefig', (["(PATH_TO_PLOTS + 'Average_Prob_Best.png')"], {}), "(PATH_TO_PLOTS + 'Average_Prob_Best.png')\n", (9177, 9218), True, 'import pylab as pl\n'), ((9662, 9707), 'pylab.xlabel', 'pl.xlabel', (['"""Executions of objective function"""'], {}), "('Executions of objective function')\n", (9671, 9707), True, 'import pylab as pl\n'), ((9708, 9748), 'pylab.ylabel', 'pl.ylabel', (['"""Probability of being rank 0"""'], {}), "('Probability of being rank 0')\n", (9717, 9748), True, 'import pylab as pl\n'), ((9749, 9769), 'pylab.ylim', 'pl.ylim', (['(0.0, 60.0)'], {}), '((0.0, 60.0))\n', (9756, 9769), True, 'import pylab as pl\n'), ((9772, 9808), 'pylab.legend', 'pl.legend', ([], {'bbox_to_anchor': 'RP_BOX_LOC'}), '(bbox_to_anchor=RP_BOX_LOC)\n', (9781, 9808), True, 'import pylab as pl\n'), ((9809, 9869), 'pylab.savefig', 'pl.savefig', (["(PATH_TO_PLOTS + 'Average_Rank_0_Probability.png')"], {}), "(PATH_TO_PLOTS + 'Average_Rank_0_Probability.png')\n", (9819, 9869), True, 'import pylab as pl\n'), ((9975, 10036), 'pylab.rcParams.update', 'pl.rcParams.update', (["{'font.size': 14, 'font.family': 'serif'}"], {}), "({'font.size': 14, 'font.family': 'serif'})\n", (9993, 10036), True, 'import pylab as pl\n'), ((8802, 8833), 'numpy.array', 'np.array', (['in_the_best[alg_name]'], {}), '(in_the_best[alg_name])\n', (8810, 8833), True, 'import numpy as np\n'), ((8912, 9016), 'pylab.plot', 'pl.plot', (['X', '(in_the_best[alg_name] * 100.0)'], {'dashes': 'LINE_STYLES[alg_name]', 'label': 'alg_name', 'linewidth': '(1)'}), '(X, in_the_best[alg_name] * 100.0, dashes=LINE_STYLES[alg_name],\n label=alg_name, linewidth=1)\n', (8919, 9016), True, 'import pylab as pl\n'), ((9496, 9599), 'pylab.plot', 'pl.plot', (['X', '(rank_average[alg_name] * 100)'], {'dashes': 'LINE_STYLES[alg_name]', 'label': 'alg_name', 'linewidth': '(1)'}), '(X, rank_average[alg_name] * 100, dashes=LINE_STYLES[alg_name],\n label=alg_name, linewidth=1)\n', (9503, 9599), True, 'import pylab as pl\n'), ((10065, 10073), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (10071, 10073), True, 'import pylab as pl\n'), ((10342, 10387), 'pylab.xlabel', 'pl.xlabel', (['"""Executions of objective function"""'], {}), "('Executions of objective function')\n", (10351, 10387), True, 'import pylab as pl\n'), ((10392, 10435), 'pylab.ylabel', 'pl.ylabel', (['"""Percent successfully converged"""'], {}), "('Percent successfully converged')\n", (10401, 10435), True, 'import pylab as pl\n'), ((10440, 10476), 'pylab.legend', 'pl.legend', ([], {'bbox_to_anchor': 'DP_BOX_LOC'}), '(bbox_to_anchor=DP_BOX_LOC)\n', (10449, 10476), True, 'import pylab as pl\n'), ((10481, 10545), 'pylab.savefig', 'pl.savefig', (["(PATH_TO_PLOTS + 'Average_Data_Profile_T%i.png' % tol)"], {}), "(PATH_TO_PLOTS + 'Average_Data_Profile_T%i.png' % tol)\n", (10491, 10545), True, 'import pylab as pl\n'), ((2761, 2780), 'pylab.title', 'pl.title', (['plot_name'], {}), '(plot_name)\n', (2769, 2780), True, 'import pylab as pl\n'), ((2789, 2831), 'pylab.xlabel', 'pl.xlabel', (['"""Objective function exectuions"""'], {}), "('Objective function exectuions')\n", (2798, 2831), True, 'import pylab as pl\n'), ((2840, 2867), 'pylab.ylabel', 'pl.ylabel', (['NAME_FIXER[stat]'], {}), '(NAME_FIXER[stat])\n', (2849, 2867), True, 'import pylab as pl\n'), ((2876, 2897), 'pylab.legend', 'pl.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2885, 2897), True, 'import pylab as pl\n'), ((2974, 2982), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (2980, 2982), True, 'import pylab as pl\n'), ((5730, 5771), 'os.popen', 'os.popen', (["('ls %s*Compare*' % PATH_TO_DATA)"], {}), "('ls %s*Compare*' % PATH_TO_DATA)\n", (5738, 5771), False, 'import pickle, os\n'), ((6151, 6165), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6162, 6165), False, 'import pickle, os\n'), ((8311, 8330), 'pylab.title', 'pl.title', (['plot_name'], {}), '(plot_name)\n', (8319, 8330), True, 'import pylab as pl\n'), ((8339, 8381), 'pylab.xlabel', 'pl.xlabel', (['"""Objective function exectuions"""'], {}), "('Objective function exectuions')\n", (8348, 8381), True, 'import pylab as pl\n'), ((8435, 8456), 'pylab.legend', 'pl.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8444, 8456), True, 'import pylab as pl\n'), ((8533, 8541), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (8539, 8541), True, 'import pylab as pl\n'), ((10114, 10226), 'pylab.plot', 'pl.plot', (['X', '(profile_average[tol][alg_name] * 100)'], {'dashes': 'LINE_STYLES[alg_name]', 'label': 'alg_name', 'linewidth': '(2)'}), '(X, profile_average[tol][alg_name] * 100, dashes=LINE_STYLES[\n alg_name], label=alg_name, linewidth=2)\n', (10121, 10226), True, 'import pylab as pl\n'), ((10270, 10285), 'math.log10', 'math.log10', (['tol'], {}), '(tol)\n', (10280, 10285), False, 'import math\n'), ((2380, 2394), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2391, 2394), False, 'import pickle, os\n'), ((2620, 2717), 'pylab.plot', 'pl.plot', (['X', 'stats[alg_name][stat]'], {'dashes': 'LINE_STYLES[alg_name]', 'label': 'alg_name', 'linewidth': '(1)'}), '(X, stats[alg_name][stat], dashes=LINE_STYLES[alg_name], label=\n alg_name, linewidth=1)\n', (2627, 2717), True, 'import pylab as pl\n'), ((3436, 3450), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3447, 3450), False, 'import pickle, os\n'), ((4316, 4330), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4327, 4330), False, 'import pickle, os\n'), ((5298, 5312), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5309, 5312), False, 'import pickle, os\n'), ((6861, 6973), 'pylab.plot', 'pl.plot', (['X', "raw_data['dataprofile'][alg_name][T]"], {'dashes': 'LINE_STYLES[alg_name]', 'label': 'alg_name', 'linewidth': '(1)'}), "(X, raw_data['dataprofile'][alg_name][T], dashes=LINE_STYLES[\n alg_name], label=alg_name, linewidth=1)\n", (6868, 6973), True, 'import pylab as pl\n'), ((7193, 7234), 'numpy.array', 'np.array', (["raw_data['rank0prob'][alg_name]"], {}), "(raw_data['rank0prob'][alg_name])\n", (7201, 7234), True, 'import numpy as np\n'), ((1661, 1709), 'os.popen', 'os.popen', (["('ls %s*%s*' % (PATH_TO_DATA, alg_name))"], {}), "('ls %s*%s*' % (PATH_TO_DATA, alg_name))\n", (1669, 1709), False, 'import pickle, os\n'), ((7952, 8000), 'numpy.array', 'np.array', (["raw_data['dataprofile'][alg_name][tol]"], {}), "(raw_data['dataprofile'][alg_name][tol])\n", (7960, 8000), True, 'import numpy as np\n'), ((7380, 7421), 'numpy.array', 'np.array', (["raw_data['rank0prob'][alg_name]"], {}), "(raw_data['rank0prob'][alg_name])\n", (7388, 7421), True, 'import numpy as np\n'), ((8128, 8176), 'numpy.array', 'np.array', (["raw_data['dataprofile'][alg_name][tol]"], {}), "(raw_data['dataprofile'][alg_name][tol])\n", (8136, 8176), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the entity resolver component of the MindMeld natural language processor.
"""
import copy
import hashlib
import json
import logging
import os
import pickle
import re
from abc import ABC, abstractmethod
from collections import OrderedDict
from string import punctuation
import numpy as np
import scipy
from elasticsearch.exceptions import ConnectionError as EsConnectionError
from elasticsearch.exceptions import ElasticsearchException, TransportError
from sklearn.feature_extraction.text import TfidfVectorizer
from tqdm.autonotebook import trange
from ._config import (
DEFAULT_ES_SYNONYM_MAPPING,
PHONETIC_ES_SYNONYM_MAPPING,
get_app_namespace,
get_classifier_config,
)
from ._elasticsearch_helpers import (
INDEX_TYPE_KB,
INDEX_TYPE_SYNONYM,
DOC_TYPE,
create_es_client,
delete_index,
does_index_exist,
get_field_names,
get_scoped_index_name,
load_index,
resolve_es_config_for_version,
)
from ._util import _is_module_available, _get_module_or_attr as _getattr
from .. import path
from ..core import Entity, Bunch
from ..exceptions import EntityResolverConnectionError, EntityResolverError
from ..resource_loader import ResourceLoader, Hasher
logger = logging.getLogger(__name__)
def _correct_deprecated_er_config(er_config):
"""
for backwards compatability
if `er_config` is supplied in deprecated format, its format is corrected and returned,
else it is not modified and returned as-is
deprecated usage
>>> er_config = {
"model_type": "text_relevance",
"model_settings": {
...
}
}
new usage
>>> er_config = {
"model_type": "resolver",
"model_settings": {
"resolver_type": "text_relevance"
...
}
}
"""
if not er_config.get("model_settings", {}).get("resolver_type", None):
model_type = er_config.get("model_type")
if model_type == "resolver":
raise Exception("Could not find `resolver_type` in `model_settings` of entity resolver")
else:
logger.warning("DeprecationWarning: Use latest format of configs for entity resolver. "
"See https://www.mindmeld.com/docs/userguide/entity_resolver.html "
"for more details.")
er_config = copy.deepcopy(er_config)
model_settings = er_config.get("model_settings", {})
model_settings.update({"resolver_type": model_type})
er_config["model_settings"] = model_settings
er_config["model_type"] = "resolver"
return er_config
def _torch(op, *args, sub="", **kwargs):
return _getattr(f"torch{'.' + sub if sub else ''}", op)(*args, **kwargs)
class BertEmbedder:
"""
Encoder class for bert models based on https://github.com/UKPLab/sentence-transformers
"""
# class variable to cache bert model(s);
# helps to mitigate keeping duplicate sets of large weight matrices
CACHE_MODELS = {}
@staticmethod
def _batch_to_device(batch, target_device):
"""
send a pytorch batch to a device (CPU/GPU)
"""
tensor = _getattr("torch", "Tensor")
for key in batch:
if isinstance(batch[key], tensor):
batch[key] = batch[key].to(target_device)
return batch
@staticmethod
def _num_layers(model):
"""
Finds the number of layers in a given transformers model
"""
if hasattr(model, "n_layers"): # eg. xlm
num_layers = model.n_layers
elif hasattr(model, "layer"): # eg. xlnet
num_layers = len(model.layer)
elif hasattr(model, "encoder"): # eg. bert
num_layers = len(model.encoder.layer)
elif hasattr(model, "transformer"): # eg. sentence_transformers models
num_layers = len(model.transformer.layer)
else:
raise ValueError(f"Not supported model {model} to obtain number of layers")
return num_layers
@property
def device(self):
return "cuda" if _torch("is_available", sub="cuda") else "cpu"
@staticmethod
def get_hashid(config):
string = json.dumps(config, sort_keys=True)
return Hasher(algorithm="sha1").hash(string=string)
@staticmethod
def get_sentence_transformers_encoder(name_or_path,
output_type="mean",
quantize=True,
return_components=False):
"""
Retrieves a sentence-transformer model and returns it along with its transformer and
pooling components.
Args:
name_or_path: name or path to load a huggingface model
output_type: type of pooling required
quantize: if the model needs to be qunatized or not
return_components: if True, returns the Transformer and Poooling components of the
sentence-bert model in a Bunch data type,
else just returns the sentence-bert model
Returns:
Union[
sentence_transformers.SentenceTransformer,
Bunch(sentence_transformers.Transformer,
sentence_transformers.Pooling,
sentence_transformers.SentenceTransformer)
]
"""
strans_models = _getattr("sentence_transformers.models")
strans = _getattr("sentence_transformers", "SentenceTransformer")
transformer_model = strans_models.Transformer(name_or_path,
model_args={"output_hidden_states": True})
pooling_model = strans_models.Pooling(transformer_model.get_word_embedding_dimension(),
pooling_mode_cls_token=output_type == "cls",
pooling_mode_max_tokens=False,
pooling_mode_mean_tokens=output_type == "mean",
pooling_mode_mean_sqrt_len_tokens=False)
sbert_model = strans(modules=[transformer_model, pooling_model])
if quantize:
if not _is_module_available("torch"):
raise ImportError("`torch` library required to quantize models") from None
torch_qint8 = _getattr("torch", "qint8")
torch_nn_linear = _getattr("torch.nn", "Linear")
torch_quantize_dynamic = _getattr("torch.quantization", "quantize_dynamic")
transformer_model = torch_quantize_dynamic(
transformer_model, {torch_nn_linear}, dtype=torch_qint8
) if transformer_model else None
pooling_model = torch_quantize_dynamic(
pooling_model, {torch_nn_linear}, dtype=torch_qint8
) if pooling_model else None
sbert_model = torch_quantize_dynamic(
sbert_model, {torch_nn_linear}, dtype=torch_qint8
) if sbert_model else None
if return_components:
return Bunch(
transformer_model=transformer_model,
pooling_model=pooling_model,
sbert_model=sbert_model
)
return sbert_model
def _init_sentence_transformers_encoder(self, model_configs):
sbert_model = None
sbert_model_hashid = self.get_hashid(model_configs)
sbert_model_name = model_configs["pretrained_name_or_abspath"]
sbert_output_type = model_configs["bert_output_type"]
sbert_quantize_model = model_configs["quantize_model"]
if sbert_model_hashid not in BertEmbedder.CACHE_MODELS:
info_msg = ""
for name in [f"sentence-transformers/{sbert_model_name}", sbert_model_name]:
try:
sbert_model = (
self.get_sentence_transformers_encoder(name,
output_type=sbert_output_type,
quantize=sbert_quantize_model,
return_components=True)
)
info_msg += f"Successfully initialized name/path `{name}` directly through " \
f"huggingface-transformers. "
except OSError:
info_msg += f"Could not initialize name/path `{name}` directly through " \
f"huggingface-transformers. "
if sbert_model:
break
logger.info(info_msg)
if not sbert_model:
msg = f"Could not resolve the name/path `{sbert_model_name}`. " \
f"Please check the model name and retry."
raise Exception(msg)
BertEmbedder.CACHE_MODELS.update({sbert_model_hashid: sbert_model})
sbert_model = BertEmbedder.CACHE_MODELS.get(sbert_model_hashid)
self.transformer_model = sbert_model.transformer_model
self.pooling_model = sbert_model.pooling_model
self.sbert_model = sbert_model.sbert_model
def _encode_local(self,
sentences,
batch_size,
show_progress_bar,
output_value,
convert_to_numpy,
convert_to_tensor,
device,
concat_last_n_layers,
normalize_token_embs):
"""
Computes sentence embeddings (Note: Method largely derived from Sentence Transformers
library to improve flexibility in encoding and pooling. Notably, `is_pretokenized` and
`num_workers` are ignored due to deprecation in their library, retrieved 23-Feb-2021)
"""
if concat_last_n_layers != 1:
assert 1 <= concat_last_n_layers <= self._num_layers(self.transformer_model.auto_model)
self.transformer_model.eval()
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or
logger.getEffectiveLevel() == logging.DEBUG
)
if convert_to_tensor:
convert_to_numpy = False
input_is_string = isinstance(sentences, str)
if input_is_string: # Cast an individual sentence to a list with length 1
sentences = [sentences]
self.transformer_model.to(device)
self.pooling_model.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches",
disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index + batch_size]
features = self.transformer_model.tokenize(sentences_batch)
features = self._batch_to_device(features, device)
with _torch("no_grad"):
out_features_transformer = self.transformer_model.forward(features)
token_embeddings = out_features_transformer["token_embeddings"]
if concat_last_n_layers > 1:
_all_layer_embs = out_features_transformer["all_layer_embeddings"]
token_embeddings = _torch(
"cat", _all_layer_embs[-concat_last_n_layers:], dim=-1)
if normalize_token_embs:
_norm_token_embeddings = _torch(
"norm", token_embeddings, sub="linalg", dim=2, keepdim=True)
token_embeddings = token_embeddings.div(_norm_token_embeddings)
out_features_transformer.update({"token_embeddings": token_embeddings})
out_features = self.pooling_model.forward(out_features_transformer)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
# Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
embeddings = embeddings.detach()
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = _torch("stack", all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_is_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def encode(self, phrases, **kwargs):
"""Encodes input text(s) into embeddings, one vector for each phrase
Args:
phrases (str, list[str]): textual inputs that are to be encoded using sentence \
transformers' model
batch_size (int): the batch size used for the computation
show_progress_bar (bool): Output a progress bar when encode sentences
output_value (str): Default sentence_embedding, to get sentence embeddings.
Can be set to token_embeddings to get wordpiece token embeddings.
convert_to_numpy (bool): If true, the output is a list of numpy vectors. Else, it is a
list of pytorch tensors.
convert_to_tensor (bool): If true, you get one large tensor as return. Overwrites any
setting from convert_to_numpy
device: Which torch.device to use for the computation
concat_last_n_layers (int): number of hidden outputs to concat starting from last layer
normalize_token_embs (bool): if the (sub-)token embs are to be individually normalized
Returns:
(Union[List[Tensor], ndarray, Tensor]): By default, a list of tensors is returned.
If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy
matrix is returned.
"""
if not phrases:
return []
if not isinstance(phrases, (str, list)):
raise TypeError(f"argument phrases must be of type str or list, not {type(phrases)}")
batch_size = kwargs.get("batch_size", 16)
_len_phrases = len(phrases) if isinstance(phrases, list) else 1
show_progress_bar = kwargs.get("show_progress_bar", False) and _len_phrases > 1
output_value = kwargs.get("output_value", 'sentence_embedding')
convert_to_numpy = kwargs.get("convert_to_numpy", True)
convert_to_tensor = kwargs.get("convert_to_tensor", False)
device = kwargs.get("device", self.device)
concat_last_n_layers = kwargs.get("concat_last_n_layers", 1)
normalize_token_embs = kwargs.get("normalize_token_embs", False)
# `False` for first call but might not for the subsequent calls
_use_sbert_model = getattr(self, "_use_sbert_model", False)
if not _use_sbert_model:
try:
# this snippet is to reduce dependency on sentence-transformers library
# note that currently, the dependency is not fully eliminated due to backwards
# compatability issues in huggingface-transformers between older (python 3.6)
# and newer (python >=3.7) versions which needs more conditions to be implemented
# in `_encode_local` and hence will be addressed in future work
# TODO: eliminate depedency on sentence-transformers library
results = self._encode_local(phrases,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
output_value=output_value,
convert_to_numpy=convert_to_numpy,
convert_to_tensor=convert_to_tensor,
device=device,
concat_last_n_layers=concat_last_n_layers,
normalize_token_embs=normalize_token_embs)
setattr(self, "_use_sbert_model", False)
except TypeError as e:
logger.error(e)
if concat_last_n_layers != 1 or normalize_token_embs:
msg = f"{'concat_last_n_layers,' if concat_last_n_layers != 1 else ''} " \
f"{'normalize_token_embs' if normalize_token_embs else ''} " \
f"ignored as resorting to using encode methods from sentence-transformers"
logger.warning(msg)
setattr(self, "_use_sbert_model", True)
if getattr(self, "_use_sbert_model"):
results = self.sbert_model.encode(phrases,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
output_value=output_value,
convert_to_numpy=convert_to_numpy,
convert_to_tensor=convert_to_tensor,
device=device)
return results
class EntityResolverFactory:
@staticmethod
def _validate_resolver_type(name):
if name not in ENTITY_RESOLVER_MODEL_TYPES:
raise Exception(f"Expected 'resolver_type' in ENTITY_RESOLVER_CONFIG "
f"among {ENTITY_RESOLVER_MODEL_TYPES}")
if name == "sbert_cosine_similarity" and not _is_module_available("sentence_transformers"):
raise ImportError(
"Must install the extra [bert] by running `pip install mindmeld[bert]` "
"to use the built in embbedder for entity resolution.")
@classmethod
def create_resolver(cls, app_path, entity_type, **kwargs):
"""
Identifies appropriate entity resolver based on input config and
returns it.
Args:
app_path (str): The application path.
entity_type (str): The entity type associated with this entity resolver.
resource_loader (ResourceLoader): An object which can load resources for the resolver.
er_config (dict): A classifier config
es_host (str): The Elasticsearch host server.
es_client (Elasticsearch): The Elasticsearch client.
"""
er_config = (
kwargs.pop("er_config", None) or
get_classifier_config("entity_resolution", app_path=app_path)
)
er_config = _correct_deprecated_er_config(er_config)
resolver_type = er_config["model_settings"]["resolver_type"]
cls._validate_resolver_type(resolver_type)
resource_loader = kwargs.pop(
"resource_loader",
ResourceLoader.create_resource_loader(app_path=app_path))
return ENTITY_RESOLVER_MODEL_MAPPINGS.get(resolver_type)(
app_path, entity_type, er_config, resource_loader, **kwargs
)
class EntityResolverBase(ABC):
"""
Base class for Entity Resolvers
"""
def __init__(self, app_path, entity_type, resource_loader=None):
"""Initializes an entity resolver"""
self.app_path = app_path
self.type = entity_type
self._resource_loader = (
resource_loader or ResourceLoader.create_resource_loader(app_path=self.app_path)
)
self._is_system_entity = Entity.is_system_entity(self.type)
self._no_trainable_canonical_entity_map = False
self.dirty = False # bool, True if exists any unsaved generated data that can be saved
self.ready = False # bool, True if the model is fit by calling .fit()
@staticmethod
def _process_entity_map(entity_type,
entity_map,
normalizer=None,
augment_lower_case=False,
augment_title_case=False,
augment_normalized=False,
normalize_aliases=False):
"""
Loads in the mapping.json file and stores the synonym mappings in a item_map
and a synonym_map
Args:
entity_type (str): The entity type associated with this entity resolver
entity_map (dict): The loaded mapping.json file for the given entity type
normalizer (callable): The normalizer to use, if provided, used to normalize synonyms
augment_lower_case (bool): If to extend the synonyms list with their lower-cased values
augment_title_case (bool): If to extend the synonyms list with their title-cased values
augment_normalized (bool): If to extend the synonyms list with their normalized values,
uses the provided normalizer
"""
do_mutate_strings = any([augment_lower_case, augment_title_case, augment_normalized])
if do_mutate_strings:
msg = "Adding additional form of the whitelist and cnames to list of possible synonyms"
logger.info(msg)
item_map = {}
syn_map = {}
seen_ids = []
for item in entity_map.get("entities", []):
cname = item["cname"]
item_id = item.get("id")
if cname in item_map:
msg = "Canonical name %s specified in %s entity map multiple times"
logger.debug(msg, cname, entity_type)
if item_id and item_id in seen_ids:
msg = "Item id {!r} specified in {!r} entity map multiple times"
raise ValueError(msg.format(item_id, entity_type))
seen_ids.append(item_id)
aliases = [cname] + item.pop("whitelist", [])
if do_mutate_strings:
new_aliases = []
if augment_lower_case:
new_aliases.extend([string.lower() for string in aliases])
if augment_title_case:
new_aliases.extend([string.title() for string in aliases])
if augment_normalized and normalizer:
new_aliases.extend([normalizer(string) for string in aliases])
aliases = set([*aliases, *new_aliases])
if normalize_aliases:
alias_normalizer = normalizer
aliases = [alias_normalizer(alias) for alias in aliases]
items_for_cname = item_map.get(cname, [])
items_for_cname.append(item)
item_map[cname] = items_for_cname
for alias in aliases:
if alias in syn_map:
msg = "Synonym %s specified in %s entity map multiple times"
logger.debug(msg, cname, entity_type)
cnames_for_syn = syn_map.get(alias, [])
cnames_for_syn.append(cname)
syn_map[alias] = list(set(cnames_for_syn))
return {"items": item_map, "synonyms": syn_map}
def _load_entity_map(self, force_reload=False):
return self._resource_loader.get_entity_map(self.type, force_reload=force_reload)
@abstractmethod
def _fit(self, clean, entity_map):
"""Fits the entity resolver model
Args:
clean (bool): If ``True``, deletes and recreates the index from scratch instead of
updating the existing index with synonyms in the mapping.json.
entity_map (json): json data loaded from `mapping.json` file for the entity type
"""
raise NotImplementedError
def fit(self, clean=False):
"""Fits the resolver model, if required
Args:
clean (bool, optional): If ``True``, deletes and recreates the index from scratch
with synonyms in the mapping.json.
"""
msg = f"Fitting {self.__class__.__name__} entity resolver for entity_type {self.type}"
logger.info(msg)
if (not clean) and self.ready:
return
if self._is_system_entity:
self._no_trainable_canonical_entity_map = True
self.ready = True
return
# load data: list of canonical entities and their synonyms
entity_map = self._load_entity_map()
if not entity_map.get("entities", []):
self._no_trainable_canonical_entity_map = True
self.ready = True
return
self._fit(clean, entity_map)
self.ready = True
return
@abstractmethod
def _predict(self, nbest_entities, top_n):
raise NotImplementedError
def predict(self, entity, top_n: int = 20):
"""Predicts the resolved value(s) for the given entity using the loaded entity map or the
trained entity resolution model.
Args:
entity (Entity, tuple[Entity], str, tuple[str]): An entity found in an input query,
or a list of n-best entity objects.
top_n (int): maximum number of results to populate
Returns:
(list): The top n resolved values for the provided entity.
"""
if not self.ready:
msg = "Resolver not ready, model must be built (.fit()) or loaded (.load()) first."
logger.error(msg)
nbest_entities = entity
if not isinstance(nbest_entities, (list, tuple)):
nbest_entities = tuple([nbest_entities])
nbest_entities = tuple(
[Entity(e, self.type) if isinstance(e, str) else e for e in nbest_entities]
)
top_entity = nbest_entities[0]
if self._is_system_entity:
# system entities are already resolved
return [top_entity.value]
if self._no_trainable_canonical_entity_map:
return []
results = self._predict(nbest_entities, top_n)
if not results:
return None
results = results[:top_n]
if len(results) < top_n:
logger.info(
"Retrieved only %d entity resolutions instead of asked number %d for "
"entity %r of type %r",
len(results), top_n, nbest_entities[0].text, self.type,
)
return results
@abstractmethod
def _load(self):
raise NotImplementedError
def load(self):
"""If available, loads embeddings of synonyms that are previously dumped
"""
self._load()
def __repr__(self):
msg = "<{} ready: {!r}, dirty: {!r}>"
return msg.format(self.__class__.__name__, self.ready, self.dirty)
class ElasticsearchEntityResolver(EntityResolverBase):
"""
Resolver class based on Elastic Search
"""
# prefix for Elasticsearch indices used to store synonyms for entity resolution
ES_SYNONYM_INDEX_PREFIX = "synonym"
"""The prefix of the ES index."""
def __init__(self, app_path, entity_type, er_config, resource_loader, **kwargs):
super().__init__(app_path, entity_type, resource_loader=resource_loader)
self._es_host = kwargs.get("es_host", None)
self._es_config = {"client": kwargs.get("es_client", None), "pid": os.getpid()}
self._use_double_metaphone = "double_metaphone" in (
er_config.get("model_settings", {}).get("phonetic_match_types", [])
)
self._app_namespace = get_app_namespace(self.app_path)
@property
def _es_index_name(self):
return f"{ElasticsearchEntityResolver.ES_SYNONYM_INDEX_PREFIX}_{self.type}"
@property
def _es_client(self):
# Lazily connect to Elasticsearch. Make sure each subprocess gets it's own connection
if self._es_config["client"] is None or self._es_config["pid"] != os.getpid():
self._es_config = {"pid": os.getpid(), "client": create_es_client()}
return self._es_config["client"]
@staticmethod
def ingest_synonym(
app_namespace,
index_name,
index_type=INDEX_TYPE_SYNONYM,
field_name=None,
data=None,
es_host=None,
es_client=None,
use_double_metaphone=False,
):
"""Loads synonym documents from the mapping.json data into the
specified index. If an index with the specified name doesn't exist, a
new index with that name will be created.
Args:
app_namespace (str): The namespace of the app. Used to prevent
collisions between the indices of this app and those of other
apps.
index_name (str): The name of the new index to be created.
index_type (str): specify whether to import to synonym index or
knowledge base object index. INDEX_TYPE_SYNONYM is the default
which indicates the synonyms to be imported to synonym index,
while INDEX_TYPE_KB indicates that the synonyms should be
imported into existing knowledge base index.
field_name (str): specify name of the knowledge base field that the
synonym list corresponds to when index_type is
INDEX_TYPE_SYNONYM.
data (list): A list of documents to be loaded into the index.
es_host (str): The Elasticsearch host server.
es_client (Elasticsearch): The Elasticsearch client.
use_double_metaphone (bool): Whether to use the phonetic mapping or not.
"""
data = data or []
def _action_generator(docs):
for doc in docs:
action = {}
# id
if doc.get("id"):
action["_id"] = doc["id"]
else:
# generate hash from canonical name as ID
action["_id"] = hashlib.sha256(
doc.get("cname").encode("utf-8")
).hexdigest()
# synonym whitelist
whitelist = doc["whitelist"]
syn_list = []
syn_list.append({"name": doc["cname"]})
for syn in whitelist:
syn_list.append({"name": syn})
# If index type is INDEX_TYPE_KB we import the synonym into knowledge base object
# index by updating the knowledge base object with additional synonym whitelist
# field. Otherwise, by default we import to synonym index in ES.
if index_type == INDEX_TYPE_KB and field_name:
syn_field = field_name + "$whitelist"
action["_op_type"] = "update"
action["doc"] = {syn_field: syn_list}
else:
action.update(doc)
action["whitelist"] = syn_list
yield action
mapping = (
PHONETIC_ES_SYNONYM_MAPPING
if use_double_metaphone
else DEFAULT_ES_SYNONYM_MAPPING
)
es_client = es_client or create_es_client(es_host)
mapping = resolve_es_config_for_version(mapping, es_client)
load_index(
app_namespace,
index_name,
_action_generator(data),
len(data),
mapping,
DOC_TYPE,
es_host,
es_client,
)
def _fit(self, clean, entity_map):
"""Loads an entity mapping file to Elasticsearch for text relevance based entity resolution.
In addition, the synonyms in entity mapping are imported to knowledge base indexes if the
corresponding knowledge base object index and field name are specified for the entity type.
The synonym info is then used by Question Answerer for text relevance matches.
"""
try:
if clean:
delete_index(
self._app_namespace, self._es_index_name, self._es_host, self._es_client
)
except ValueError as e: # when `clean = True` but no index to delete
logger.error(e)
entities = entity_map.get("entities", [])
# create synonym index and import synonyms
logger.info("Importing synonym data to synonym index '%s'", self._es_index_name)
self.ingest_synonym(
app_namespace=self._app_namespace,
index_name=self._es_index_name,
data=entities,
es_host=self._es_host,
es_client=self._es_client,
use_double_metaphone=self._use_double_metaphone,
)
# It's supported to specify the KB object type and field name that the NLP entity type
# corresponds to in the mapping.json file. In this case the synonym whitelist is also
# imported to KB object index and the synonym info will be used when using Question Answerer
# for text relevance matches.
kb_index = entity_map.get("kb_index_name")
kb_field = entity_map.get("kb_field_name")
# if KB index and field name is specified then also import synonyms into KB object index.
if kb_index and kb_field:
# validate the KB index and field are valid.
# TODO: this validation can probably be in some other places like resource loader.
if not does_index_exist(
self._app_namespace, kb_index, self._es_host, self._es_client
):
raise ValueError(
"Cannot import synonym data to knowledge base. The knowledge base "
"index name '{}' is not valid.".format(kb_index)
)
if kb_field not in get_field_names(
self._app_namespace, kb_index, self._es_host, self._es_client
):
raise ValueError(
"Cannot import synonym data to knowledge base. The knowledge base "
"field name '{}' is not valid.".format(kb_field)
)
if entities and not entities[0].get("id"):
raise ValueError(
"Knowledge base index and field cannot be specified for entities "
"without ID."
)
logger.info("Importing synonym data to knowledge base index '%s'", kb_index)
ElasticsearchEntityResolver.ingest_synonym(
app_namespace=self._app_namespace,
index_name=kb_index,
index_type="kb",
field_name=kb_field,
data=entities,
es_host=self._es_host,
es_client=self._es_client,
use_double_metaphone=self._use_double_metaphone,
)
def _predict(self, nbest_entities, top_n):
"""Predicts the resolved value(s) for the given entity using the loaded entity map or the
trained entity resolution model.
Args:
nbest_entities (tuple): List of one entity object found in an input query, or a list \
of n-best entity objects.
Returns:
(list): The resolved values for the provided entity.
"""
top_entity = nbest_entities[0]
weight_factors = [1 - float(i) / len(nbest_entities) for i in range(len(nbest_entities))]
def _construct_match_query(entity, weight=1):
return [
{
"match": {
"cname.normalized_keyword": {
"query": entity.text,
"boost": 10 * weight,
}
}
},
{"match": {"cname.raw": {"query": entity.text, "boost": 10 * weight}}},
{
"match": {
"cname.char_ngram": {"query": entity.text, "boost": weight}
}
},
]
def _construct_nbest_match_query(entity, weight=1):
return [
{
"match": {
"cname.normalized_keyword": {
"query": entity.text,
"boost": weight,
}
}
}
]
def _construct_phonetic_match_query(entity, weight=1):
return [
{
"match": {
"cname.double_metaphone": {
"query": entity.text,
"boost": 2 * weight,
}
}
}
]
def _construct_whitelist_query(entity, weight=1, use_phons=False):
query = {
"nested": {
"path": "whitelist",
"score_mode": "max",
"query": {
"bool": {
"should": [
{
"match": {
"whitelist.name.normalized_keyword": {
"query": entity.text,
"boost": 10 * weight,
}
}
},
{
"match": {
"whitelist.name": {
"query": entity.text,
"boost": weight,
}
}
},
{
"match": {
"whitelist.name.char_ngram": {
"query": entity.text,
"boost": weight,
}
}
},
]
}
},
"inner_hits": {},
}
}
if use_phons:
query["nested"]["query"]["bool"]["should"].append(
{
"match": {
"whitelist.double_metaphone": {
"query": entity.text,
"boost": 3 * weight,
}
}
}
)
return query
text_relevance_query = {
"query": {
"function_score": {
"query": {"bool": {"should": []}},
"field_value_factor": {
"field": "sort_factor",
"modifier": "log1p",
"factor": 10,
"missing": 0,
},
"boost_mode": "sum",
"score_mode": "sum",
}
}
}
match_query = []
top_transcript = True
for e, weight in zip(nbest_entities, weight_factors):
if top_transcript:
match_query.extend(_construct_match_query(e, weight))
top_transcript = False
else:
match_query.extend(_construct_nbest_match_query(e, weight))
if self._use_double_metaphone:
match_query.extend(_construct_phonetic_match_query(e, weight))
text_relevance_query["query"]["function_score"]["query"]["bool"][
"should"
].append({"bool": {"should": match_query}})
whitelist_query = _construct_whitelist_query(
top_entity, use_phons=self._use_double_metaphone
)
text_relevance_query["query"]["function_score"]["query"]["bool"][
"should"
].append(whitelist_query)
try:
index = get_scoped_index_name(self._app_namespace, self._es_index_name)
response = self._es_client.search(index=index, body=text_relevance_query)
except EsConnectionError as ex:
logger.error(
"Unable to connect to Elasticsearch: %s details: %s", ex.error, ex.info
)
raise EntityResolverConnectionError(es_host=self._es_client.transport.hosts) from ex
except TransportError as ex:
logger.error(
"Unexpected error occurred when sending requests to Elasticsearch: %s "
"Status code: %s details: %s",
ex.error,
ex.status_code,
ex.info,
)
raise EntityResolverError(
"Unexpected error occurred when sending requests to "
"Elasticsearch: {} Status code: {} details: "
"{}".format(ex.error, ex.status_code, ex.info)
) from ex
except ElasticsearchException as ex:
raise EntityResolverError from ex
else:
hits = response["hits"]["hits"]
results = []
for hit in hits:
if self._use_double_metaphone and len(nbest_entities) > 1:
if hit["_score"] < 0.5 * len(nbest_entities):
continue
top_synonym = None
synonym_hits = hit["inner_hits"]["whitelist"]["hits"]["hits"]
if synonym_hits:
top_synonym = synonym_hits[0]["_source"]["name"]
result = {
"cname": hit["_source"]["cname"],
"score": hit["_score"],
"top_synonym": top_synonym,
}
if hit["_source"].get("id"):
result["id"] = hit["_source"].get("id")
if hit["_source"].get("sort_factor"):
result["sort_factor"] = hit["_source"].get("sort_factor")
results.append(result)
return results
def _load(self):
"""Loads the trained entity resolution model from disk."""
try:
scoped_index_name = get_scoped_index_name(
self._app_namespace, self._es_index_name
)
if not self._es_client.indices.exists(index=scoped_index_name):
self.fit()
except EsConnectionError as e:
logger.error(
"Unable to connect to Elasticsearch: %s details: %s", e.error, e.info
)
raise EntityResolverConnectionError(es_host=self._es_client.transport.hosts) from e
except TransportError as e:
logger.error(
"Unexpected error occurred when sending requests to Elasticsearch: %s "
"Status code: %s details: %s",
e.error,
e.status_code,
e.info,
)
raise EntityResolverError from e
except ElasticsearchException as e:
raise EntityResolverError from e
class ExactMatchEntityResolver(EntityResolverBase):
"""
Resolver class based on exact matching
"""
def __init__(self, app_path, entity_type, er_config, resource_loader, **_kwargs):
super().__init__(app_path, entity_type, resource_loader=resource_loader)
self._augment_lower_case = er_config.get(
"model_settings", {}
).get("augment_lower_case", False)
self._processed_entity_map = None
def _fit(self, clean, entity_map):
if clean:
logger.info(
"clean=True ignored while fitting ExactMatchEntityResolver"
)
self._processed_entity_map = self._process_entity_map(
self.type,
entity_map,
normalizer=self._resource_loader.query_factory.normalize,
augment_lower_case=self._augment_lower_case,
normalize_aliases=True
)
def _predict(self, nbest_entities, top_n):
"""Looks for exact name in the synonyms data
"""
entity = nbest_entities[0] # top_entity
normed = self._resource_loader.query_factory.normalize(entity.text)
try:
cnames = self._processed_entity_map["synonyms"][normed]
except (KeyError, TypeError):
logger.warning(
"Failed to resolve entity %r for type %r", entity.text, entity.type
)
return None
if len(cnames) > 1:
logger.info(
"Multiple possible canonical names for %r entity for type %r",
entity.text,
entity.type,
)
values = []
for cname in cnames:
for item in self._processed_entity_map["items"][cname]:
item_value = copy.copy(item)
item_value.pop("whitelist", None)
values.append(item_value)
return values
def _load(self):
self.fit()
class SentenceBertCosSimEntityResolver(EntityResolverBase, BertEmbedder):
"""
Resolver class for bert models as described here:
https://github.com/UKPLab/sentence-transformers
"""
def __init__(self, app_path, entity_type, er_config, resource_loader, **_kwargs):
super().__init__(app_path, entity_type, resource_loader=resource_loader)
# default configs useful for reusing model's encodings through a cache path
for key, value in self.default_er_config.get("model_settings", {}).items():
er_config["model_settings"][key] = value
self.batch_size = er_config["model_settings"]["batch_size"]
_model_configs = {
"pretrained_name_or_abspath": er_config["model_settings"]["pretrained_name_or_abspath"],
"bert_output_type": er_config["model_settings"]["bert_output_type"],
"quantize_model": er_config["model_settings"]["quantize_model"],
}
self._runtime_configs = {
"concat_last_n_layers": er_config["model_settings"]["concat_last_n_layers"],
"normalize_token_embs": er_config["model_settings"]["normalize_token_embs"],
"augment_lower_case": er_config["model_settings"]["augment_lower_case"],
"augment_average_synonyms_embeddings":
er_config["model_settings"]["augment_average_synonyms_embeddings"],
}
self.cache_path = self.get_cache_path(
app_path=self.app_path,
er_config={**_model_configs, **self._runtime_configs},
entity_type=self.type
)
self._processed_entity_map = None
self._synonyms = None
self._synonyms_embs = None
self._init_sentence_transformers_encoder(_model_configs)
@property
def default_er_config(self):
defaults = {
"model_settings": {
"pretrained_name_or_abspath": "distilbert-base-nli-stsb-mean-tokens",
"batch_size": 16,
"concat_last_n_layers": 4,
"normalize_token_embs": True,
"bert_output_type": "mean",
"augment_lower_case": False,
"quantize_model": True,
"augment_average_synonyms_embeddings": True
}
}
return defaults
@staticmethod
def get_cache_path(app_path, er_config, entity_type):
"""Obtains and return a unique cache path for saving synonyms' embeddings
Args:
er_config: the er_config dictionary of the reolver class
entity_type: entity type of the class instance, for unique path identification
Return:
str: path with a .pkl extension to cache embeddings
"""
string = json.dumps(er_config, sort_keys=True)
hashid = Hasher(algorithm="sha1").hash(string=string)
hashid = f"{hashid}$synonym_{entity_type}"
return path.get_entity_resolver_cache_file_path(app_path, hashid)
@staticmethod
def _compute_cosine_similarity(synonyms,
synonyms_encodings,
entity_emb,
top_n,
return_as_dict=False):
"""Uses cosine similarity metric on synonym embeddings to sort most relevant ones
for entity resolution
Args:
synonyms (dict): a dict of synonym and its corresponding embedding's row index
in synonyms_encodings
synonyms_encodings (np.array): a 2d array of embedding of the synonyms; an array of
size equal to number of synonyms
entity_emb (np.array): a 2d array of embedding of the input entity text(s)
top_n (int): maximum number of results to populate
Returns:
Union[dict, list[tuple]]: if return_as_dict, returns a dictionary of synonyms and their
scores, else a list of sorted synonym names, paired with
their similarity scores (descending)
"""
n_entities = len(entity_emb)
is_single = n_entities == 1
# [n_syns, emd_dim] -> [n_entities, n_syns, emd_dim]
t_syn_enc = _torch("as_tensor", synonyms_encodings)
t_syn_enc = t_syn_enc.expand([n_entities, *t_syn_enc.shape])
# [n_entities, emd_dim] -> [n_entities, n_syns, emd_dim]
t_entity_emb = _torch("as_tensor", entity_emb)
t_entity_emb = t_entity_emb.unsqueeze(dim=1).expand_as(t_syn_enc)
# returns -> [n_entities, n_syns]
similarity_scores_2d = _torch(
"cosine_similarity", t_syn_enc, t_entity_emb, dim=-1).numpy()
results = []
for similarity_scores in similarity_scores_2d:
similarity_scores = similarity_scores.reshape(-1)
similarity_scores = np.around(similarity_scores, decimals=2)
if return_as_dict:
results.append(dict(zip(synonyms.keys(), similarity_scores)))
else:
# results in descending scores
n_scores = len(similarity_scores)
if n_scores > top_n:
top_inds = similarity_scores.argpartition(n_scores - top_n)[-top_n:]
result = sorted(
zip(np.asarray([*synonyms.keys()])[top_inds], similarity_scores[top_inds]),
key=lambda x: x[1], reverse=True)
else:
result = sorted(zip(synonyms.keys(), similarity_scores), key=lambda x: x[1],
reverse=True)
results.append(result)
if is_single:
return results[0]
return results
def _fit(self, clean, entity_map):
if clean and os.path.exists(self.cache_path):
os.remove(self.cache_path)
# load mapping.json data and process it
augment_lower_case = self._runtime_configs["augment_lower_case"]
self._processed_entity_map = self._process_entity_map(
self.type,
entity_map,
augment_lower_case=augment_lower_case
)
# load embeddings from cache if exists, encode any other synonyms if required
synonyms, synonyms_embs = OrderedDict(), np.empty(0)
if os.path.exists(self.cache_path):
logger.info("Cached embs exists for entity %s. "
"Loading existing data from: %s",
self.type, self.cache_path)
cached_data = self._load_embeddings(self.cache_path)
synonyms, synonyms_embs = cached_data["synonyms"], cached_data["synonyms_embs"]
new_synonyms_to_encode = [syn for syn in self._processed_entity_map["synonyms"] if
syn not in synonyms]
if new_synonyms_to_encode:
new_synonyms_encodings = (
self.encode(
new_synonyms_to_encode,
batch_size=self.batch_size,
concat_last_n_layers=self._runtime_configs["concat_last_n_layers"],
normalize_token_embs=self._runtime_configs["normalize_token_embs"],
)
)
synonyms_embs = new_synonyms_encodings if not synonyms else np.concatenate(
[synonyms_embs, new_synonyms_encodings])
synonyms.update(
OrderedDict(zip(
new_synonyms_to_encode,
np.arange(len(synonyms), len(synonyms) + len(new_synonyms_to_encode)))
)
)
# encode artificial synonyms if required
if self._runtime_configs["augment_average_synonyms_embeddings"]:
# obtain cnames to synonyms mapping
entity_mapping_synonyms = self._processed_entity_map["synonyms"]
cnames2synonyms = {}
for syn, cnames in entity_mapping_synonyms.items():
for cname in cnames:
items = cnames2synonyms.get(cname, [])
items.append(syn)
cnames2synonyms[cname] = items
dummy_new_synonyms_to_encode, dummy_new_synonyms_encodings = [], []
# assert dummy synonyms
for cname, syns in cnames2synonyms.items():
dummy_synonym = f"{cname} - SYNONYMS AVERAGE"
# update synonyms map 'cause such synonyms don't actually exist in mapping.json file
dummy_synonym_mappings = entity_mapping_synonyms.get(dummy_synonym, [])
dummy_synonym_mappings.append(cname)
entity_mapping_synonyms[dummy_synonym] = dummy_synonym_mappings
# check if needs to be encoded
if dummy_synonym in synonyms:
continue
# if required, obtain dummy encoding and update collections
dummy_encoding = np.mean([synonyms_embs[synonyms[syn]] for syn in syns], axis=0)
dummy_new_synonyms_to_encode.append(dummy_synonym)
dummy_new_synonyms_encodings.append(dummy_encoding)
if dummy_new_synonyms_encodings:
dummy_new_synonyms_encodings = np.vstack(dummy_new_synonyms_encodings)
if dummy_new_synonyms_to_encode:
synonyms_embs = dummy_new_synonyms_encodings if not synonyms else np.concatenate(
[synonyms_embs, dummy_new_synonyms_encodings])
synonyms.update(
OrderedDict(zip(
dummy_new_synonyms_to_encode,
np.arange(len(synonyms), len(synonyms) + len(dummy_new_synonyms_to_encode)))
)
)
# dump embeddings if required
self._synonyms, self._synonyms_embs = synonyms, synonyms_embs
do_dump = (
new_synonyms_to_encode or
dummy_new_synonyms_to_encode or
not os.path.exists(self.cache_path)
)
if do_dump:
data_dump = {"synonyms": self._synonyms, "synonyms_embs": self._synonyms_embs}
self._dump_embeddings(self.cache_path, data_dump)
self.dirty = False # never True with the current logic, kept for consistency purpose
def _predict(self, nbest_entities, top_n):
"""Predicts the resolved value(s) for the given entity using cosine similarity.
Args:
nbest_entities (tuple): List of one entity object found in an input query, or a list \
of n-best entity objects.
Returns:
(list): The resolved values for the provided entity.
"""
synonyms, synonyms_encodings = self._synonyms, self._synonyms_embs
# encode input entity
# TODO: Use all provided entities (i.e all nbest_entities) like elastic search
top_entity = nbest_entities[0] # top_entity
existing_index = synonyms.get(top_entity.text, None)
if existing_index:
top_entity_emb = synonyms_encodings[existing_index]
else:
top_entity_emb = (
self.encode(
top_entity.text,
concat_last_n_layers=self._runtime_configs["concat_last_n_layers"],
normalize_token_embs=self._runtime_configs["normalize_token_embs"],
)
)
top_entity_emb = top_entity_emb.reshape(1, -1)
try:
sorted_items = self._compute_cosine_similarity(
synonyms, synonyms_encodings, top_entity_emb, top_n
)
values = []
for synonym, score in sorted_items:
cnames = self._processed_entity_map["synonyms"][synonym]
for cname in cnames:
for item in self._processed_entity_map["items"][cname]:
item_value = copy.copy(item)
item_value.pop("whitelist", None)
item_value.update({"score": score})
item_value.update({"top_synonym": synonym})
values.append(item_value)
except KeyError:
logger.warning(
"Failed to resolve entity %r for type %r; "
"set 'clean=True' for computing embeddings of newly added items in mappings.json",
top_entity.text, top_entity.type
)
return None
except TypeError:
logger.warning(
"Failed to resolve entity %r for type %r", top_entity.text, top_entity.type
)
return None
return values
def _load(self):
self.fit()
@staticmethod
def _load_embeddings(cache_path):
"""Loads embeddings for all synonyms, previously dumped into a .pkl file
"""
with open(cache_path, "rb") as fp:
_cached_embs = pickle.load(fp)
return _cached_embs
def _dump_embeddings(self, cache_path, data):
"""Dumps embeddings of synonyms into a .pkl file when the .fit() method is called
"""
msg = f"bert embeddings are are being cached for entity_type: `{self.type}` " \
f"for quicker entity resolution; consumes some disk space"
logger.info(msg)
folder = os.path.split(cache_path)[0]
if folder and not os.path.exists(folder):
os.makedirs(folder)
with open(cache_path, "wb") as fp:
pickle.dump(data, fp)
def _predict_batch(self, nbest_entities_list, batch_size, top_n):
synonyms, synonyms_encodings = self._synonyms, self._synonyms_embs
# encode input entity
top_entity_list = [i[0] for i in nbest_entities_list] # top_entity
# called a list but observed as a list
top_entity_emb_list = []
for st_idx in trange(0, len(top_entity_list), batch_size, disable=False):
batch = [top_entity.text for top_entity in top_entity_list[st_idx:st_idx + batch_size]]
top_entity_emb_list.append(
self.encode(
batch,
show_progress_bar=False,
batch_size=self.batch_size,
concat_last_n_layers=self._runtime_configs["concat_last_n_layers"],
normalize_token_embs=self._runtime_configs["normalize_token_embs"],
)
)
top_entity_emb_list = np.vstack(top_entity_emb_list)
try:
# w/o batch, [ nsyms x 768*4 ] x [ 1 x 768*4 ] --> [ nsyms x 1 ]
# w/ batch, [ nsyms x 768*4 ] x [ k x 768*4 ] --> [ nsyms x k ]
sorted_items_list = []
for st_idx in trange(0, len(top_entity_emb_list), batch_size, disable=False):
batch = top_entity_emb_list[st_idx:st_idx + batch_size]
result = self._compute_cosine_similarity(synonyms, synonyms_encodings, batch, top_n)
# due to way compute similarity returns
if len(batch) == 1:
result = [result]
sorted_items_list.extend(result)
values_list = []
for sorted_items in sorted_items_list:
values = []
for synonym, score in sorted_items:
cnames = self._processed_entity_map["synonyms"][synonym]
for cname in cnames:
for item in self._processed_entity_map["items"][cname]:
item_value = copy.copy(item)
item_value.pop("whitelist", None)
item_value.update({"score": score})
item_value.update({"top_synonym": synonym})
values.append(item_value)
values_list.append(values)
except (KeyError, TypeError) as e:
logger.error(e)
return None
return values_list
def predict_batch(self, entity_list, top_n: int = 20, batch_size: int = 16):
if self._no_trainable_canonical_entity_map:
return [[] for _ in entity_list]
nbest_entities_list = []
results_list = []
for entity in entity_list:
if isinstance(entity, (list, tuple)):
top_entity = entity[0]
nbest_entities = tuple(entity)
else:
top_entity = entity
nbest_entities = tuple([entity])
nbest_entities_list.append(nbest_entities)
if self._is_system_entity:
# system entities are already resolved
results_list.append(top_entity.value)
if self._is_system_entity:
return results_list
results_list = self._predict_batch(nbest_entities_list, batch_size, top_n)
for i, results in enumerate(results_list):
if results:
results_list[i] = results[:top_n]
return results_list
class TfIdfSparseCosSimEntityResolver(EntityResolverBase):
"""
a tf-idf based entity resolver using sparse matrices. ref:
scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
"""
def __init__(self, app_path, entity_type, er_config, resource_loader, **_kwargs):
super().__init__(app_path, entity_type, resource_loader=resource_loader)
self._aug_lower_case = er_config.get("model_settings", {}).get("augment_lower_case", True)
self._aug_title_case = er_config.get("model_settings", {}).get("augment_title_case", False)
self._aug_normalized = er_config.get("model_settings", {}).get("augment_normalized", False)
self._aug_max_syn_embs = (
er_config.get("model_settings", {}).get("augment_max_synonyms_embeddings", True)
)
self._processed_entity_map = None
self.ngram_length = 5 # max number of character ngrams to consider
self._vectorizer = \
TfidfVectorizer(analyzer=self._char_ngram_and_word_analyzer, lowercase=False)
self._syn_tfidf_matrix = None
self._unique_synonyms = []
def _char_ngram_and_word_analyzer(self, string):
results = self._char_ngram_analyzer(string)
# add words
words = re.split(r'[\s{}]+'.format(re.escape(punctuation)), string.strip())
results.extend(words)
return results
def _char_ngram_analyzer(self, string):
results = []
# give more importance to starting and ending characters of a word
string = f" {string.strip()} "
for n in range(self.ngram_length + 1):
results.extend([''.join(gram) for gram in zip(*[string[i:] for i in range(n)])])
results = list(set(results))
results.remove(' ')
# adding lowercased single characters might add more noise
results = [r for r in results if not (len(r) == 1 and r.islower())]
return results
def _fit(self, clean, entity_map):
if clean:
logger.info(
"clean=True ignored while fitting tf-idf algo for entity resolution"
)
# load mappings.json data
self._processed_entity_map = self._process_entity_map(
self.type,
entity_map,
normalizer=self._resource_loader.query_factory.normalize,
augment_lower_case=self._aug_lower_case,
augment_title_case=self._aug_title_case,
augment_normalized=self._aug_normalized,
)
# obtain sparse matrix
synonyms = {v: k for k, v in
dict(enumerate(set(self._processed_entity_map["synonyms"]))).items()}
synonyms_embs = self._vectorizer.fit_transform([*synonyms.keys()])
# encode artificial synonyms if required
if self._aug_max_syn_embs:
# obtain cnames to synonyms mapping
entity_mapping_synonyms = self._processed_entity_map["synonyms"]
cnames2synonyms = {}
for syn, cnames in entity_mapping_synonyms.items():
for cname in cnames:
items = cnames2synonyms.get(cname, [])
items.append(syn)
cnames2synonyms[cname] = items
dummy_new_synonyms_to_encode, dummy_new_synonyms_encodings = [], []
# assert dummy synonyms
for cname, syns in cnames2synonyms.items():
dummy_synonym = f"{cname} - SYNONYMS AVERAGE"
# update synonyms map 'cause such synonyms don't actually exist in mapping.json file
dummy_synonym_mappings = entity_mapping_synonyms.get(dummy_synonym, [])
dummy_synonym_mappings.append(cname)
entity_mapping_synonyms[dummy_synonym] = dummy_synonym_mappings
# check if needs to be encoded
if dummy_synonym in synonyms:
continue
# if required, obtain dummy encoding and update collections
dummy_encoding = scipy.sparse.csr_matrix(
np.max([synonyms_embs[synonyms[syn]].toarray() for syn in syns], axis=0)
)
dummy_new_synonyms_to_encode.append(dummy_synonym)
dummy_new_synonyms_encodings.append(dummy_encoding)
if dummy_new_synonyms_encodings:
dummy_new_synonyms_encodings = scipy.sparse.vstack(dummy_new_synonyms_encodings)
if dummy_new_synonyms_to_encode:
synonyms_embs = (
dummy_new_synonyms_encodings if not synonyms else scipy.sparse.vstack(
[synonyms_embs, dummy_new_synonyms_encodings])
)
synonyms.update(
OrderedDict(zip(
dummy_new_synonyms_to_encode,
np.arange(len(synonyms), len(synonyms) + len(dummy_new_synonyms_to_encode)))
)
)
# returns a sparse matrix
self._unique_synonyms = [*synonyms.keys()]
self._syn_tfidf_matrix = synonyms_embs
def _predict(self, nbest_entities, top_n):
"""Predicts the resolved value(s) for the given entity using cosine similarity.
Args:
nbest_entities (tuple): List of one entity object found in an input query, or a list \
of n-best entity objects.
Returns:
(list): The resolved values for the provided entity.
"""
# encode input entity
# TODO: Use all provided entities (i.e all nbest_entities) like elastic search
top_entity = nbest_entities[0] # top_entity
top_entity_vector = self._vectorizer.transform([top_entity.text])
similarity_scores = self._syn_tfidf_matrix.dot(top_entity_vector.T).toarray().reshape(-1)
# Rounding sometimes helps to bring correct answers on to the top score as other
# non-correct resolutions
similarity_scores = np.around(similarity_scores, decimals=4)
sorted_items = sorted(list(zip(self._unique_synonyms, similarity_scores)),
key=lambda x: x[1], reverse=True)
try:
values = []
for synonym, score in sorted_items:
cnames = self._processed_entity_map["synonyms"][synonym]
for cname in cnames:
for item in self._processed_entity_map["items"][cname]:
item_value = copy.copy(item)
item_value.pop("whitelist", None)
item_value.update({"score": score})
item_value.update({"top_synonym": synonym})
values.append(item_value)
except (TypeError, KeyError):
logger.warning(
"Failed to resolve entity %r for type %r", top_entity.text, top_entity.type
)
return None
return values
def _load(self):
self.fit()
class EntityResolver:
"""
for backwards compatability
deprecated usage
>>> entity_resolver = EntityResolver(
app_path, self.resource_loader, entity_type
)
new usage
>>> entity_resolver = EntityResolverFactory.create_resolver(
app_path, entity_type
)
# or ...
>>> entity_resolver = EntityResolverFactory.create_resolver(
app_path, entity_type, resource_loader=self.resource_loader
)
"""
def __new__(cls, app_path, resource_loader, entity_type, es_host=None, es_client=None):
logger.warning(
"DeprecationWarning: Entity Resolver should now be loaded using EntityResolverFactory. "
"See https://www.mindmeld.com/docs/userguide/entity_resolver.html for more details.")
return EntityResolverFactory.create_resolver(
app_path, entity_type, resource_loader=resource_loader,
es_host=es_host, es_client=es_client
)
ENTITY_RESOLVER_MODEL_MAPPINGS = {
"exact_match": ExactMatchEntityResolver,
"text_relevance": ElasticsearchEntityResolver,
"sbert_cosine_similarity": SentenceBertCosSimEntityResolver,
"tfidf_cosine_similarity": TfIdfSparseCosSimEntityResolver
}
ENTITY_RESOLVER_MODEL_TYPES = [*ENTITY_RESOLVER_MODEL_MAPPINGS]
| [
"os.remove",
"pickle.dump",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.empty",
"json.dumps",
"numpy.argsort",
"numpy.around",
"pickle.load",
"numpy.mean",
"os.path.exists",
"re.escape",
"copy.deepcopy",
"numpy.concatenate",
"numpy.vstack",
"os.getpid",
"os.makedirs",
"... | [((1880, 1907), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1897, 1907), False, 'import logging\n'), ((4977, 5011), 'json.dumps', 'json.dumps', (['config'], {'sort_keys': '(True)'}), '(config, sort_keys=True)\n', (4987, 5011), False, 'import json\n'), ((49364, 49401), 'json.dumps', 'json.dumps', (['er_config'], {'sort_keys': '(True)'}), '(er_config, sort_keys=True)\n', (49374, 49401), False, 'import json\n'), ((53011, 53042), 'os.path.exists', 'os.path.exists', (['self.cache_path'], {}), '(self.cache_path)\n', (53025, 53042), False, 'import os\n'), ((61116, 61146), 'numpy.vstack', 'np.vstack', (['top_entity_emb_list'], {}), '(top_entity_emb_list)\n', (61125, 61146), True, 'import numpy as np\n'), ((64661, 64738), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': 'self._char_ngram_and_word_analyzer', 'lowercase': '(False)'}), '(analyzer=self._char_ngram_and_word_analyzer, lowercase=False)\n', (64676, 64738), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((69659, 69699), 'numpy.around', 'np.around', (['similarity_scores'], {'decimals': '(4)'}), '(similarity_scores, decimals=4)\n', (69668, 69699), True, 'import numpy as np\n'), ((3107, 3131), 'copy.deepcopy', 'copy.deepcopy', (['er_config'], {}), '(er_config)\n', (3120, 3131), False, 'import copy\n'), ((28704, 28715), 'os.getpid', 'os.getpid', ([], {}), '()\n', (28713, 28715), False, 'import os\n'), ((51554, 51594), 'numpy.around', 'np.around', (['similarity_scores'], {'decimals': '(2)'}), '(similarity_scores, decimals=2)\n', (51563, 51594), True, 'import numpy as np\n'), ((52488, 52519), 'os.path.exists', 'os.path.exists', (['self.cache_path'], {}), '(self.cache_path)\n', (52502, 52519), False, 'import os\n'), ((52533, 52559), 'os.remove', 'os.remove', (['self.cache_path'], {}), '(self.cache_path)\n', (52542, 52559), False, 'import os\n'), ((52973, 52986), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (52984, 52986), False, 'from collections import OrderedDict\n'), ((52988, 52999), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (52996, 52999), True, 'import numpy as np\n'), ((59585, 59600), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (59596, 59600), False, 'import pickle\n'), ((59986, 60011), 'os.path.split', 'os.path.split', (['cache_path'], {}), '(cache_path)\n', (59999, 60011), False, 'import os\n'), ((60077, 60096), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (60088, 60096), False, 'import os\n'), ((60152, 60173), 'pickle.dump', 'pickle.dump', (['data', 'fp'], {}), '(data, fp)\n', (60163, 60173), False, 'import pickle\n'), ((13549, 13578), 'numpy.argsort', 'np.argsort', (['length_sorted_idx'], {}), '(length_sorted_idx)\n', (13559, 13578), True, 'import numpy as np\n'), ((29271, 29282), 'os.getpid', 'os.getpid', ([], {}), '()\n', (29280, 29282), False, 'import os\n'), ((29322, 29333), 'os.getpid', 'os.getpid', ([], {}), '()\n', (29331, 29333), False, 'import os\n'), ((46432, 46447), 'copy.copy', 'copy.copy', (['item'], {}), '(item)\n', (46441, 46447), False, 'import copy\n'), ((53993, 54048), 'numpy.concatenate', 'np.concatenate', (['[synonyms_embs, new_synonyms_encodings]'], {}), '([synonyms_embs, new_synonyms_encodings])\n', (54007, 54048), True, 'import numpy as np\n'), ((55612, 55675), 'numpy.mean', 'np.mean', (['[synonyms_embs[synonyms[syn]] for syn in syns]'], {'axis': '(0)'}), '([synonyms_embs[synonyms[syn]] for syn in syns], axis=0)\n', (55619, 55675), True, 'import numpy as np\n'), ((55903, 55942), 'numpy.vstack', 'np.vstack', (['dummy_new_synonyms_encodings'], {}), '(dummy_new_synonyms_encodings)\n', (55912, 55942), True, 'import numpy as np\n'), ((56645, 56676), 'os.path.exists', 'os.path.exists', (['self.cache_path'], {}), '(self.cache_path)\n', (56659, 56676), False, 'import os\n'), ((60041, 60063), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (60055, 60063), False, 'import os\n'), ((64981, 65003), 're.escape', 're.escape', (['punctuation'], {}), '(punctuation)\n', (64990, 65003), False, 'import re\n'), ((68069, 68118), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['dummy_new_synonyms_encodings'], {}), '(dummy_new_synonyms_encodings)\n', (68088, 68118), False, 'import scipy\n'), ((56070, 56131), 'numpy.concatenate', 'np.concatenate', (['[synonyms_embs, dummy_new_synonyms_encodings]'], {}), '([synonyms_embs, dummy_new_synonyms_encodings])\n', (56084, 56131), True, 'import numpy as np\n'), ((68268, 68334), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['[synonyms_embs, dummy_new_synonyms_encodings]'], {}), '([synonyms_embs, dummy_new_synonyms_encodings])\n', (68287, 68334), False, 'import scipy\n'), ((58566, 58581), 'copy.copy', 'copy.copy', (['item'], {}), '(item)\n', (58575, 58581), False, 'import copy\n'), ((70156, 70171), 'copy.copy', 'copy.copy', (['item'], {}), '(item)\n', (70165, 70171), False, 'import copy\n'), ((62195, 62210), 'copy.copy', 'copy.copy', (['item'], {}), '(item)\n', (62204, 62210), False, 'import copy\n')] |
import numpy as np
from matplotlib import pyplot as plt
from datetime import datetime
def data_ft(data, plotting=True):
t = data[5:-5, 0]
x = data[5:-5, 1:]
x /= np.mean(x, axis=0)
f = np.fft.fftfreq(t.size, t[1]-t[0])
# f_norm = (f>0) & (f<250)
x_fft =np.fft.fft(x, axis=0)
x_fft_abs = np.abs(x_fft)
# x_fft_abs /= np.max(x_fft_abs[f_norm, :], axis=0)
f_sel = f>=0
if plotting:
fpath = plot_ft(f[f_sel], x_fft_abs[f_sel, :])
return fpath
def plot_ft(f, x_fft, channels=[0], f_max=250):
if f_max is None:
f_max=f.max()
f_norm = (f > 5) & (f < 250)
plt.ioff()
fig = plt.figure()
fig.set_tight_layout(True)
plt.plot(f, x_fft[:, channels], lw=0.5)
plt.xlim(0, f_max)
plt.ylim(0, x_fft[f_norm, channels].max())
plt.xlabel('frequency, Hz')
plt.ylabel('amplitude')
fpath = '/nsls2/xf08id/log/diagnostics/' + 'vibration_diagnostics_' + str(datetime.now()).replace(':', '-')[:-7] + '.png'
plt.savefig(fpath, dpi=300)
plt.ion()
plt.close(fig)
return fpath
| [
"matplotlib.pyplot.xlim",
"numpy.abs",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ion",
"numpy.fft.fftfreq",
"numpy.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"datetime.dat... | [((175, 193), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (182, 193), True, 'import numpy as np\n'), ((202, 237), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['t.size', '(t[1] - t[0])'], {}), '(t.size, t[1] - t[0])\n', (216, 237), True, 'import numpy as np\n'), ((279, 300), 'numpy.fft.fft', 'np.fft.fft', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (289, 300), True, 'import numpy as np\n'), ((317, 330), 'numpy.abs', 'np.abs', (['x_fft'], {}), '(x_fft)\n', (323, 330), True, 'import numpy as np\n'), ((631, 641), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (639, 641), True, 'from matplotlib import pyplot as plt\n'), ((652, 664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (662, 664), True, 'from matplotlib import pyplot as plt\n'), ((700, 739), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'x_fft[:, channels]'], {'lw': '(0.5)'}), '(f, x_fft[:, channels], lw=0.5)\n', (708, 739), True, 'from matplotlib import pyplot as plt\n'), ((744, 762), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'f_max'], {}), '(0, f_max)\n', (752, 762), True, 'from matplotlib import pyplot as plt\n'), ((814, 841), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency, Hz"""'], {}), "('frequency, Hz')\n", (824, 841), True, 'from matplotlib import pyplot as plt\n'), ((846, 869), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""amplitude"""'], {}), "('amplitude')\n", (856, 869), True, 'from matplotlib import pyplot as plt\n'), ((1000, 1027), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fpath'], {'dpi': '(300)'}), '(fpath, dpi=300)\n', (1011, 1027), True, 'from matplotlib import pyplot as plt\n'), ((1032, 1041), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1039, 1041), True, 'from matplotlib import pyplot as plt\n'), ((1046, 1060), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1055, 1060), True, 'from matplotlib import pyplot as plt\n'), ((948, 962), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (960, 962), False, 'from datetime import datetime\n')] |
"""Contains integration measures."""
import abc
from typing import Optional, Tuple, Union
import numpy as np
import scipy.stats
from probnum.randvars import Normal
from probnum.typing import FloatArgType, IntArgType
class IntegrationMeasure(abc.ABC):
"""An abstract class for a measure against which a target function is integrated.
Child classes implement specific integration measures and, if available, make use
of random variables for sampling and evaluation of the density function.
Parameters
----------
dim :
Dimension of the integration domain.
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
"""
def __init__(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
self._set_dimension_domain(dim, domain)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
"""Evaluate the density function of the integration measure.
Parameters
----------
points :
*shape=(n_points,) or (n_points, dim)* -- Input locations.
Returns
-------
density_evals :
*shape=(n_points,)* -- Density evaluated at given locations.
"""
# pylint: disable=no-member
return self.random_variable.pdf(points).squeeze()
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
"""Sample ``n_sample`` points from the integration measure.
Parameters
----------
rng :
Random number generator
n_sample :
Number of points to be sampled
Returns
-------
points :
*shape=(n_sample,) or (n_sample,dim)* -- Sampled points
"""
# pylint: disable=no-member
return np.reshape(
self.random_variable.sample(rng=rng, size=n_sample),
newshape=(n_sample, self.dim),
)
def _set_dimension_domain(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
"""Sets the integration domain and dimension.
The following logic is used to set the domain and dimension:
1. If ``dim`` is not given (``dim == None``):
1a. If either ``domain[0]`` or ``domain[1]`` is a scalar, the dimension
is set as the maximum of their lengths and the scalar is expanded to
a constant vector.
1b. Otherwise, if the ``domain[0]`` and ``domain[1]`` are not of equal
length, an error is raised.
2. If ``dim`` is given:
2a. If both ``domain[0]`` and ``domain[1]`` are scalars, they are
expanded to constant vectors of length ``dim``.
2b. If only one of `domain[0]`` and ``domain[1]`` is a scalar and the
length of the other equals ``dim``, the scalar one is expanded to a
constant vector of length ``dim``.
2c. Otherwise, if neither of ``domain[0]`` and ``domain[1]`` is a
scalar, error is raised if either of them has length which does not
equal ``dim``.
"""
domain_a_dim = np.size(domain[0])
domain_b_dim = np.size(domain[1])
# Check that given dimensions match and are positive
dim_mismatch = False
if dim is None:
if domain_a_dim == domain_b_dim:
dim = domain_a_dim
elif domain_a_dim == 1 or domain_b_dim == 1:
dim = np.max([domain_a_dim, domain_b_dim])
else:
dim_mismatch = True
else:
if (domain_a_dim > 1 or domain_b_dim > 1) and dim != np.max(
[domain_a_dim, domain_b_dim]
):
dim_mismatch = True
if dim_mismatch:
raise ValueError(
"Domain limits must have the same length or at least "
"one of them has to be one-dimensional."
)
if dim < 1:
raise ValueError(f"Domain dimension dim = {dim} must be positive.")
# Use same domain limit in all dimensions if only one limit is given
if domain_a_dim == 1:
domain_a = np.full((dim,), domain[0])
else:
domain_a = domain[0]
if domain_b_dim == 1:
domain_b = np.full((dim,), domain[1])
else:
domain_b = domain[1]
# Check that the domain is non-empty
if not np.all(domain_a < domain_b):
raise ValueError("Domain must be non-empty.")
self.dim = dim
self.domain = (domain_a, domain_b)
class LebesgueMeasure(IntegrationMeasure):
"""Lebesgue measure on a hyper-rectangle.
Parameters
----------
dim :
Dimension of the integration domain
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
normalized :
Boolean which controls whether or not the measure is normalized (i.e.,
integral over the domain is one).
"""
def __init__(
self,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
dim: Optional[IntArgType] = None,
normalized: Optional[bool] = False,
) -> None:
super().__init__(dim=dim, domain=domain)
# Set normalization constant
self.normalized = normalized
if self.normalized:
self.normalization_constant = 1.0 / np.prod(self.domain[1] - self.domain[0])
else:
self.normalization_constant = 1.0
if self.normalization_constant in [0, np.Inf, -np.Inf]:
raise ValueError(
"Normalization constant is too small or too large. "
"Consider setting normalized = False."
)
# Use scipy's uniform random variable since uniform random variables are not
# yet implemented in probnum
self.random_variable = scipy.stats.uniform(
loc=self.domain[0], scale=self.domain[1] - self.domain[0]
)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
num_dat = np.atleast_1d(points).shape[0]
return np.full(() if num_dat == 1 else (num_dat,), self.normalization_constant)
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
return self.random_variable.rvs(size=(n_sample, self.dim), random_state=rng)
# pylint: disable=too-few-public-methods
class GaussianMeasure(IntegrationMeasure):
"""Gaussian measure on Euclidean space with given mean and covariance.
If ``mean`` and ``cov`` are scalars but ``dim`` is larger than one, ``mean`` and
``cov`` are extended to a constant vector and diagonal matrix, respectively,
of appropriate dimensions.
Parameters
----------
mean :
*shape=(dim,)* -- Mean of the Gaussian measure.
cov :
*shape=(dim, dim)* -- Covariance matrix of the Gaussian measure.
dim :
Dimension of the integration domain.
"""
def __init__(
self,
mean: Union[float, np.floating, np.ndarray],
cov: Union[float, np.floating, np.ndarray],
dim: Optional[IntArgType] = None,
) -> None:
# Extend scalar mean and covariance to higher dimensions if dim has been
# supplied by the user
# pylint: disable=fixme
# TODO: This needs to be modified to account for cases where only either the
# mean or covariance is given in scalar form
if (
(np.isscalar(mean) or mean.size == 1)
and (np.isscalar(cov) or cov.size == 1)
and dim is not None
):
mean = np.full((dim,), mean)
cov = cov * np.eye(dim)
# Set dimension based on the mean vector
if np.isscalar(mean):
dim = 1
else:
dim = mean.size
# If cov has been given as a vector of variances, transform to diagonal matrix
if isinstance(cov, np.ndarray) and np.squeeze(cov).ndim == 1 and dim > 1:
cov = np.diag(np.squeeze(cov))
# Exploit random variables to carry out mean and covariance checks
self.random_variable = Normal(mean=np.squeeze(mean), cov=np.squeeze(cov))
self.mean = self.random_variable.mean
self.cov = self.random_variable.cov
# Set diagonal_covariance flag
if dim == 1:
self.diagonal_covariance = True
else:
self.diagonal_covariance = (
np.count_nonzero(self.cov - np.diag(np.diagonal(self.cov))) == 0
)
super().__init__(
dim=dim,
domain=(np.full((dim,), -np.Inf), np.full((dim,), np.Inf)),
)
| [
"numpy.full",
"numpy.size",
"numpy.isscalar",
"numpy.prod",
"numpy.max",
"numpy.squeeze",
"numpy.eye",
"numpy.atleast_1d",
"numpy.all",
"numpy.diagonal"
] | [((3491, 3509), 'numpy.size', 'np.size', (['domain[0]'], {}), '(domain[0])\n', (3498, 3509), True, 'import numpy as np\n'), ((3533, 3551), 'numpy.size', 'np.size', (['domain[1]'], {}), '(domain[1])\n', (3540, 3551), True, 'import numpy as np\n'), ((6584, 6656), 'numpy.full', 'np.full', (['(() if num_dat == 1 else (num_dat,))', 'self.normalization_constant'], {}), '(() if num_dat == 1 else (num_dat,), self.normalization_constant)\n', (6591, 6656), True, 'import numpy as np\n'), ((8236, 8253), 'numpy.isscalar', 'np.isscalar', (['mean'], {}), '(mean)\n', (8247, 8253), True, 'import numpy as np\n'), ((4529, 4555), 'numpy.full', 'np.full', (['(dim,)', 'domain[0]'], {}), '((dim,), domain[0])\n', (4536, 4555), True, 'import numpy as np\n'), ((4656, 4682), 'numpy.full', 'np.full', (['(dim,)', 'domain[1]'], {}), '((dim,), domain[1])\n', (4663, 4682), True, 'import numpy as np\n'), ((4791, 4818), 'numpy.all', 'np.all', (['(domain_a < domain_b)'], {}), '(domain_a < domain_b)\n', (4797, 4818), True, 'import numpy as np\n'), ((8117, 8138), 'numpy.full', 'np.full', (['(dim,)', 'mean'], {}), '((dim,), mean)\n', (8124, 8138), True, 'import numpy as np\n'), ((5845, 5885), 'numpy.prod', 'np.prod', (['(self.domain[1] - self.domain[0])'], {}), '(self.domain[1] - self.domain[0])\n', (5852, 5885), True, 'import numpy as np\n'), ((6538, 6559), 'numpy.atleast_1d', 'np.atleast_1d', (['points'], {}), '(points)\n', (6551, 6559), True, 'import numpy as np\n'), ((7966, 7983), 'numpy.isscalar', 'np.isscalar', (['mean'], {}), '(mean)\n', (7977, 7983), True, 'import numpy as np\n'), ((8020, 8036), 'numpy.isscalar', 'np.isscalar', (['cov'], {}), '(cov)\n', (8031, 8036), True, 'import numpy as np\n'), ((8163, 8174), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (8169, 8174), True, 'import numpy as np\n'), ((8513, 8528), 'numpy.squeeze', 'np.squeeze', (['cov'], {}), '(cov)\n', (8523, 8528), True, 'import numpy as np\n'), ((8649, 8665), 'numpy.squeeze', 'np.squeeze', (['mean'], {}), '(mean)\n', (8659, 8665), True, 'import numpy as np\n'), ((8671, 8686), 'numpy.squeeze', 'np.squeeze', (['cov'], {}), '(cov)\n', (8681, 8686), True, 'import numpy as np\n'), ((3826, 3862), 'numpy.max', 'np.max', (['[domain_a_dim, domain_b_dim]'], {}), '([domain_a_dim, domain_b_dim])\n', (3832, 3862), True, 'import numpy as np\n'), ((3996, 4032), 'numpy.max', 'np.max', (['[domain_a_dim, domain_b_dim]'], {}), '([domain_a_dim, domain_b_dim])\n', (4002, 4032), True, 'import numpy as np\n'), ((8448, 8463), 'numpy.squeeze', 'np.squeeze', (['cov'], {}), '(cov)\n', (8458, 8463), True, 'import numpy as np\n'), ((9101, 9125), 'numpy.full', 'np.full', (['(dim,)', '(-np.Inf)'], {}), '((dim,), -np.Inf)\n', (9108, 9125), True, 'import numpy as np\n'), ((9127, 9150), 'numpy.full', 'np.full', (['(dim,)', 'np.Inf'], {}), '((dim,), np.Inf)\n', (9134, 9150), True, 'import numpy as np\n'), ((8990, 9011), 'numpy.diagonal', 'np.diagonal', (['self.cov'], {}), '(self.cov)\n', (9001, 9011), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import os
import pandas as pd
import pickle
import util
from datetime import datetime
from scipy.sparse import save_npz, vstack
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
# Commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument('dir', type = str, help = 'Directory containing the data')
parser.add_argument('--test_users', type = int, default = 10000)
args = parser.parse_args()
# Fix seed for reproducibility
np.random.seed(42)
# Load rating data
print(datetime.now(), 'Loading in ratings...')
ratings = pd.read_csv(args.dir + 'preprocessed_pref.csv')
ratings.columns = ['user', 'item']
print('\t{0:8} ratings'.format(ratings.shape[0]))
print('\t{0:8} unique users, {1:8} unique items'.format(ratings['user'].nunique(), ratings['item'].nunique()))
# Load side info
print(datetime.now(), 'Loading in side-info...')
###########################
# ARTISTS - GENRES - TAGS #
###########################
# Load in data
artists = pd.read_csv(args.dir + 'preprocessed_artists.csv')
artists.columns = ['item', 'artist']
genres = pd.read_csv(args.dir + 'preprocessed_genres.csv')
genres.columns = ['item', 'genre']
tags = pd.read_csv(args.dir + 'preprocessed_tags.csv')
tags.columns = ['item', 'tag']
# Drop those not appearing in preference data
artists = artists.merge(ratings[['item']].drop_duplicates(), how = 'right').dropna()
genres = genres.merge(ratings[['item']].drop_duplicates(), how = 'right').dropna()
tags = tags.merge(ratings[['item']].drop_duplicates(), how = 'right').dropna()
# Ensure proper integer identifiers
user_enc = LabelEncoder()
item_enc = LabelEncoder()
ratings['user'] = user_enc.fit_transform(ratings['user'])
ratings['item'] = item_enc.fit_transform(ratings['item'])
artists['item'] = item_enc.transform(artists['item'])
genres['item'] = item_enc.transform(genres['item'])
tags['item'] = item_enc.transform(tags['item'])
# Generate Metadata-to-item mapping
X_artists = util.generate_csr_matrix(artists, 'artist', ratings['item'].max() + 1)
X_genres = util.generate_csr_matrix(genres, 'genre', ratings['item'].max() + 1)
X_tags = util.generate_csr_matrix(tags, 'tag', ratings['item'].max() + 1)
X_meta = vstack((X_artists, X_genres, X_tags))
# Check whether output directory already exists - make it if necessary
if not os.path.exists(args.dir + 'preprocessed/'):
os.makedirs(args.dir + 'preprocessed/')
# Write out metadata-item matrix
print(datetime.now(), 'Writing out metadata-item matrix...')
save_npz(args.dir + 'preprocessed/X_meta.npz', X_meta)
# Train - validation - test split
print(datetime.now(), 'Train-validation-test split...')
X_train, X_val, val_dict, X_test, test_dict = util.train_val_test_split_Jebara(ratings, n_test_users = args.test_users)
# Write out validation and test data
print(datetime.now(), 'Writing out validation and test data...')
save_npz(args.dir + 'preprocessed/X_val.npz', X_val)
with open(args.dir + 'preprocessed/val_dict.pkl', 'wb') as handle:
pickle.dump(val_dict, handle)
save_npz(args.dir + 'preprocessed/X_test.npz', X_test)
with open(args.dir + 'preprocessed/test_dict.pkl', 'wb') as handle:
pickle.dump(test_dict, handle)
# Write out full user-item training matrix
print(datetime.now(), 'Writing out train data...')
save_npz(args.dir + 'preprocessed/X_train.npz', X_train)
# Subsample training data on a user-level
print(datetime.now(), 'Subsampling training users...')
train_users = np.unique(X_train.nonzero()[0])
np.random.shuffle(train_users)
for frac_train_users in [0.01, .05, .1, .25, .5]:
train_users[:int(frac_train_users * len(train_users))]
pd.DataFrame(train_users[:int(frac_train_users * len(train_users))], columns = ['user']).to_csv(args.dir + 'preprocessed/train_users_{}.csv'.format(frac_train_users), index = False)
print(datetime.now(), 'Finished!')
| [
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"scipy.sparse.vstack",
"os.makedirs",
"pandas.read_csv",
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"util.train_val_test_split_Jebara",
"scipy.sparse.save_npz",
"datetime.datetime.now",
"numpy.random.shuffle"
] | [((279, 304), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (302, 304), False, 'import argparse\n'), ((528, 546), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (542, 546), True, 'import numpy as np\n'), ((636, 683), 'pandas.read_csv', 'pd.read_csv', (["(args.dir + 'preprocessed_pref.csv')"], {}), "(args.dir + 'preprocessed_pref.csv')\n", (647, 683), True, 'import pandas as pd\n'), ((1096, 1146), 'pandas.read_csv', 'pd.read_csv', (["(args.dir + 'preprocessed_artists.csv')"], {}), "(args.dir + 'preprocessed_artists.csv')\n", (1107, 1146), True, 'import pandas as pd\n'), ((1201, 1250), 'pandas.read_csv', 'pd.read_csv', (["(args.dir + 'preprocessed_genres.csv')"], {}), "(args.dir + 'preprocessed_genres.csv')\n", (1212, 1250), True, 'import pandas as pd\n'), ((1301, 1348), 'pandas.read_csv', 'pd.read_csv', (["(args.dir + 'preprocessed_tags.csv')"], {}), "(args.dir + 'preprocessed_tags.csv')\n", (1312, 1348), True, 'import pandas as pd\n'), ((1750, 1764), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1762, 1764), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1780, 1794), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1792, 1794), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2388, 2425), 'scipy.sparse.vstack', 'vstack', (['(X_artists, X_genres, X_tags)'], {}), '((X_artists, X_genres, X_tags))\n', (2394, 2425), False, 'from scipy.sparse import save_npz, vstack\n'), ((2716, 2770), 'scipy.sparse.save_npz', 'save_npz', (["(args.dir + 'preprocessed/X_meta.npz')", 'X_meta'], {}), "(args.dir + 'preprocessed/X_meta.npz', X_meta)\n", (2724, 2770), False, 'from scipy.sparse import save_npz, vstack\n'), ((2920, 2991), 'util.train_val_test_split_Jebara', 'util.train_val_test_split_Jebara', (['ratings'], {'n_test_users': 'args.test_users'}), '(ratings, n_test_users=args.test_users)\n', (2952, 2991), False, 'import util\n'), ((3110, 3162), 'scipy.sparse.save_npz', 'save_npz', (["(args.dir + 'preprocessed/X_val.npz')", 'X_val'], {}), "(args.dir + 'preprocessed/X_val.npz', X_val)\n", (3118, 3162), False, 'from scipy.sparse import save_npz, vstack\n'), ((3276, 3330), 'scipy.sparse.save_npz', 'save_npz', (["(args.dir + 'preprocessed/X_test.npz')", 'X_test'], {}), "(args.dir + 'preprocessed/X_test.npz', X_test)\n", (3284, 3330), False, 'from scipy.sparse import save_npz, vstack\n'), ((3549, 3605), 'scipy.sparse.save_npz', 'save_npz', (["(args.dir + 'preprocessed/X_train.npz')", 'X_train'], {}), "(args.dir + 'preprocessed/X_train.npz', X_train)\n", (3557, 3605), False, 'from scipy.sparse import save_npz, vstack\n'), ((3766, 3796), 'numpy.random.shuffle', 'np.random.shuffle', (['train_users'], {}), '(train_users)\n', (3783, 3796), True, 'import numpy as np\n'), ((581, 595), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (593, 595), False, 'from datetime import datetime\n'), ((924, 938), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (936, 938), False, 'from datetime import datetime\n'), ((2517, 2559), 'os.path.exists', 'os.path.exists', (["(args.dir + 'preprocessed/')"], {}), "(args.dir + 'preprocessed/')\n", (2531, 2559), False, 'import os\n'), ((2569, 2608), 'os.makedirs', 'os.makedirs', (["(args.dir + 'preprocessed/')"], {}), "(args.dir + 'preprocessed/')\n", (2580, 2608), False, 'import os\n'), ((2657, 2671), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2669, 2671), False, 'from datetime import datetime\n'), ((2820, 2834), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2832, 2834), False, 'from datetime import datetime\n'), ((3047, 3061), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3059, 3061), False, 'from datetime import datetime\n'), ((3242, 3271), 'pickle.dump', 'pickle.dump', (['val_dict', 'handle'], {}), '(val_dict, handle)\n', (3253, 3271), False, 'import pickle\n'), ((3411, 3441), 'pickle.dump', 'pickle.dump', (['test_dict', 'handle'], {}), '(test_dict, handle)\n', (3422, 3441), False, 'import pickle\n'), ((3500, 3514), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3512, 3514), False, 'from datetime import datetime\n'), ((3663, 3677), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3675, 3677), False, 'from datetime import datetime\n'), ((4114, 4128), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4126, 4128), False, 'from datetime import datetime\n')] |
from sklearn import datasets
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import pandas as pd
import numpy as np
import matrix_plot
import matplotlib.pyplot as plt
from sklearn.svm import SVC
import data_augmentation
seed = 8
np.random.seed(seed)
def feature_normalize(dataset):
return (dataset - np.mean(dataset, axis=0))/np.std(dataset, axis=0)
df1 = pd.read_excel('data/female_session_1.xlsx', header=None)
input1 = df1.as_matrix()
df2 = pd.read_excel('data/female_session_2.xlsx', header=None)
input2 = df2.as_matrix()
df3 = pd.read_excel('data/male_session_1.xlsx', header=None)
input3 = df3.as_matrix()
df4 = pd.read_excel('data/male_session_2.xlsx', header=None)
input4 = df4.as_matrix()
Y1 = np.ones((144,1), np.float32)
for i in range(0,Y1.shape[0],48):
if (i == 0):
Y1[0:48] = Y1[0:48]*0
if (i == 0):
Y1[96:] = Y1[96:]*2
Y2 = np.ones((144,1), np.float32)
for i in range(0,Y2.shape[0],48):
if (i == 0):
Y2[0:48] = Y2[0:48]*0
if (i == 0):
Y2[96:] = Y2[96:]*2
Y3 = np.ones((144,1), np.float32)
for i in range(0,Y3.shape[0],48):
if (i == 0):
Y3[0:48] = Y3[0:48]*0
if (i == 0):
Y3[96:] = Y3[96:]*2
Y4 = np.ones((144,1), np.float32)
for i in range(0,Y4.shape[0],48):
if (i == 0):
Y4[0:48] = Y4[0:48]*0
if (i == 0):
Y4[96:] = Y4[96:]*2
X_aug_1, Y_aug_1 = data_augmentation.get_augmented_input_1()
X_aug_2, Y_aug_2 = data_augmentation.get_augmented_input_2()
X_aug_3, Y_aug_3 = data_augmentation.get_augmented_input_3()
X_aug_4, Y_aug_4 = data_augmentation.get_augmented_input_4()
Y_o = np.vstack([Y1, Y2, Y3, Y4])
Y = np.vstack([Y_o, Y_aug_1, Y_aug_2, Y_aug_3, Y_aug_4]).reshape((2832))
X_input_o = np.vstack([input1, input2, input3, input4])
X_input = np.vstack([X_input_o, X_aug_1, X_aug_2, X_aug_3, X_aug_4])
X_norm = feature_normalize(X_input).reshape((2832, 25))
X = X_norm
class_names = ['eye','man','hand']
x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, random_state=4)
x_test, x_dev, y_test, y_dev = train_test_split(x_val, y_val, test_size=0.5, random_state=4)
svm_model_linear = SVC(kernel = 'rbf', C = 1).fit(x_train, y_train)
svm_predictions = svm_model_linear.predict(x_test)
accuracy = svm_model_linear.score(x_test, y_test)
cm = confusion_matrix(y_test, svm_predictions)
matrix_plot.plot_confusion_matrix(y_test, svm_predictions, classes=class_names, normalize=True,
title='SVM Confusion Matrix on Augmented Data')
plt.show()
print("Train Count: ", y_train.shape)
print("============================")
print("Test Count: ", y_test.shape)
print("============================")
print("Results: ", svm_predictions)
print("============================")
print("True Values: ", y_test)
print("============================")
print("accuracy: ", accuracy*100)
print("============================")
accuracy = svm_model_linear.score(x_train, y_train)
print("train accuracy: ", accuracy*100)
print("============================") | [
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.std",
"data_augmentation.get_augmented_input_1",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"matrix_plot.plot_confusion_matrix",
"data_augmentation.get_augmented_input_2",
"pandas.read_excel",
"data_augmentation.get_augmented_inp... | [((435, 455), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (449, 455), True, 'import numpy as np\n'), ((574, 630), 'pandas.read_excel', 'pd.read_excel', (['"""data/female_session_1.xlsx"""'], {'header': 'None'}), "('data/female_session_1.xlsx', header=None)\n", (587, 630), True, 'import pandas as pd\n'), ((667, 723), 'pandas.read_excel', 'pd.read_excel', (['"""data/female_session_2.xlsx"""'], {'header': 'None'}), "('data/female_session_2.xlsx', header=None)\n", (680, 723), True, 'import pandas as pd\n'), ((760, 814), 'pandas.read_excel', 'pd.read_excel', (['"""data/male_session_1.xlsx"""'], {'header': 'None'}), "('data/male_session_1.xlsx', header=None)\n", (773, 814), True, 'import pandas as pd\n'), ((851, 905), 'pandas.read_excel', 'pd.read_excel', (['"""data/male_session_2.xlsx"""'], {'header': 'None'}), "('data/male_session_2.xlsx', header=None)\n", (864, 905), True, 'import pandas as pd\n'), ((941, 970), 'numpy.ones', 'np.ones', (['(144, 1)', 'np.float32'], {}), '((144, 1), np.float32)\n', (948, 970), True, 'import numpy as np\n'), ((1111, 1140), 'numpy.ones', 'np.ones', (['(144, 1)', 'np.float32'], {}), '((144, 1), np.float32)\n', (1118, 1140), True, 'import numpy as np\n'), ((1279, 1308), 'numpy.ones', 'np.ones', (['(144, 1)', 'np.float32'], {}), '((144, 1), np.float32)\n', (1286, 1308), True, 'import numpy as np\n'), ((1449, 1478), 'numpy.ones', 'np.ones', (['(144, 1)', 'np.float32'], {}), '((144, 1), np.float32)\n', (1456, 1478), True, 'import numpy as np\n'), ((1631, 1672), 'data_augmentation.get_augmented_input_1', 'data_augmentation.get_augmented_input_1', ([], {}), '()\n', (1670, 1672), False, 'import data_augmentation\n'), ((1693, 1734), 'data_augmentation.get_augmented_input_2', 'data_augmentation.get_augmented_input_2', ([], {}), '()\n', (1732, 1734), False, 'import data_augmentation\n'), ((1755, 1796), 'data_augmentation.get_augmented_input_3', 'data_augmentation.get_augmented_input_3', ([], {}), '()\n', (1794, 1796), False, 'import data_augmentation\n'), ((1817, 1858), 'data_augmentation.get_augmented_input_4', 'data_augmentation.get_augmented_input_4', ([], {}), '()\n', (1856, 1858), False, 'import data_augmentation\n'), ((1868, 1895), 'numpy.vstack', 'np.vstack', (['[Y1, Y2, Y3, Y4]'], {}), '([Y1, Y2, Y3, Y4])\n', (1877, 1895), True, 'import numpy as np\n'), ((1985, 2028), 'numpy.vstack', 'np.vstack', (['[input1, input2, input3, input4]'], {}), '([input1, input2, input3, input4])\n', (1994, 2028), True, 'import numpy as np\n'), ((2040, 2098), 'numpy.vstack', 'np.vstack', (['[X_input_o, X_aug_1, X_aug_2, X_aug_3, X_aug_4]'], {}), '([X_input_o, X_aug_1, X_aug_2, X_aug_3, X_aug_4])\n', (2049, 2098), True, 'import numpy as np\n'), ((2244, 2297), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(4)'}), '(X, Y, test_size=0.2, random_state=4)\n', (2260, 2297), False, 'from sklearn.model_selection import train_test_split\n'), ((2330, 2391), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_val', 'y_val'], {'test_size': '(0.5)', 'random_state': '(4)'}), '(x_val, y_val, test_size=0.5, random_state=4)\n', (2346, 2391), False, 'from sklearn.model_selection import train_test_split\n'), ((2586, 2627), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'svm_predictions'], {}), '(y_test, svm_predictions)\n', (2602, 2627), False, 'from sklearn.metrics import confusion_matrix\n'), ((2631, 2784), 'matrix_plot.plot_confusion_matrix', 'matrix_plot.plot_confusion_matrix', (['y_test', 'svm_predictions'], {'classes': 'class_names', 'normalize': '(True)', 'title': '"""SVM Confusion Matrix on Augmented Data"""'}), "(y_test, svm_predictions, classes=\n class_names, normalize=True, title='SVM Confusion Matrix on Augmented Data'\n )\n", (2664, 2784), False, 'import matrix_plot\n'), ((2799, 2809), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2807, 2809), True, 'import matplotlib.pyplot as plt\n'), ((540, 563), 'numpy.std', 'np.std', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (546, 563), True, 'import numpy as np\n'), ((1901, 1953), 'numpy.vstack', 'np.vstack', (['[Y_o, Y_aug_1, Y_aug_2, Y_aug_3, Y_aug_4]'], {}), '([Y_o, Y_aug_1, Y_aug_2, Y_aug_3, Y_aug_4])\n', (1910, 1953), True, 'import numpy as np\n'), ((2415, 2437), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'C': '(1)'}), "(kernel='rbf', C=1)\n", (2418, 2437), False, 'from sklearn.svm import SVC\n'), ((514, 538), 'numpy.mean', 'np.mean', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (521, 538), True, 'import numpy as np\n')] |
"""GAN implementation for name generation."""
import time
import json
import os
import tensorflow as tf
from tensorflow.keras.layers import (
Dense,
BatchNormalization,
Reshape,
Conv1D,
Conv1DTranspose,
Flatten,
Dropout,
)
from tensorflow.keras.models import Sequential
import numpy as np
from IPython import display
EPOCHS = 5000
MULTIPLIER = 5
CHECKPOINT_DIR = "./training_checkpoints"
def create_letter_list(names):
return list(sorted(set("".join(set(names)))))
def make_generator_model():
model = Sequential()
model.add(Dense(input_dim, use_bias=False, input_shape=(input_dim,),))
## print(model.output_shape)
# model.add(BatchNormalization())
# Dense(max_length * 2, use_bias=False)
## print(model.output_shape)
# model.add(BatchNormalization())
# Dense(max_length, activation="tanh", use_bias=False)
# print(model.output_shape)
# model.add(BatchNormalization())
model.add(Reshape((1, input_dim)))
print(model.output_shape)
model.add(
Conv1DTranspose(
input_dim,
(4),
activation="elu",
strides=(1),
padding="same",
use_bias=False,
)
)
# print(model.output_shape)
model.add(BatchNormalization())
model.add(
Conv1DTranspose(
max_length * 4,
(4),
activation="elu",
strides=(1),
padding="same",
use_bias=False,
)
)
# print(model.output_shape)
model.add(BatchNormalization(momentum=0.8))
model.add(
Conv1DTranspose(
max_length * 2,
(4),
strides=(1),
activation="elu",
padding="same",
use_bias=False,
)
)
# print(model.output_shape)
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(max_length, activation="tanh", use_bias=False))
model.add(Reshape((max_length,)))
print("output_shape", model.output_shape)
assert model.output_shape == (None, max_length,) # Note: None is the batch size
return model
def make_discriminator_model():
model = Sequential()
# model.add(Dense(max_length * 8, use_bias=False, input_shape=(input_dim, 1),))
# print(model.output_shape)
# Dense(max_length * 4, use_bias=False)
# print(model.output_shape)
# Dense(max_length * 2, activation="elu", use_bias=False)
model.add(
Conv1D(
30 * max_length,
(4),
kernel_initializer="he_uniform",
strides=(1),
activation="elu",
padding="same",
input_shape=[max_length, 1],
)
)
# print(model.output_shape)
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(
Conv1D(
5 * max_length,
(4),
kernel_initializer="he_uniform",
activation="elu",
strides=(1),
padding="same",
)
)
# print(model.output_shape)
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1))
print(model.output_shape)
return model
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for name_batch in dataset:
name_batch = tf.expand_dims(name_batch, -1)
# print(name_batch)
train_step(name_batch)
seed = tf.random.normal([1, input_dim])
generate_and_save_names(generator, seed)
# print(seed)
# Save the model every x epochs
if (epoch + 1) % int(EPOCHS / 4) == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print("Time for epoch {} is {} sec".format(epoch + 1, time.time() - start))
generate_and_save_names(generator, seed)
display.clear_output(wait=True)
def generate_and_save_names(model, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
with open("data/predictions.txt", mode="a+") as pred_file:
for pred in predictions:
new_name_int = np.array(pred)
new_name_int = (new_name_int * (len(letters) / 2)) + (len(letters) / 2)
decision = discriminator(tf.expand_dims(predictions, -1))
print("decision", decision.numpy()[0][0])
try:
new_name = "".join(
[letters[int(np.round(number)) - 1] for number in new_name_int]
)
pred_file.write(f"{new_name, decision.numpy()[0][0]}\n")
except IndexError:
pred_file.write(f"Out of bounds... {decision.numpy()[0][0]}\n")
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
names = []
with open("data/names.json", "r") as name_file:
name_dict = json.load(name_file)
names = list(name_dict.keys())[9000:10000]
max_length = max(len(name) for name in names)
print(max_length)
string_shape = (max_length, 1)
standard_names = [name + "_" * (max_length - len(name)) for name in names]
letters = create_letter_list(standard_names)
print(letters)
name_ints = []
for standard_name in standard_names:
name_int = [letters.index(character) + 1 for character in standard_name]
name_int = list((np.array(name_int) - len(letters) / 2) / (len(letters) / 2))
name_ints.append(name_int)
letter_ints = list(set(letter_int for name in name_ints for letter_int in name))
print(min(letter_ints), max(letter_ints))
batch_size = int(len(name_ints) / 100)
buffer_size = len(name_ints)
input_dim = max_length * MULTIPLIER
train_dataset = (
tf.data.Dataset.from_tensor_slices(name_ints).shuffle(buffer_size).batch(batch_size)
)
generator = make_generator_model()
noise = tf.random.normal([1, input_dim])
print("noise", noise.shape)
generated_name = generator(noise, training=False)
print("name", generated_name.shape)
discriminator = make_discriminator_model()
decision = discriminator(tf.expand_dims(generated_name, -1))
print("disc", tf.expand_dims(generated_name, -1).shape)
print("decision", decision.numpy()[0][0])
checkpoint_prefix = os.path.join(CHECKPOINT_DIR, "ckpt")
checkpoint = tf.train.Checkpoint(
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator,
)
checkpoint.restore(tf.train.latest_checkpoint(CHECKPOINT_DIR))
num_examples_to_generate = 16
# You will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([1, input_dim])
# seed = tf.random.normal([1, 15], int(len(letters)/2), 1)
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(names):
noise = tf.random.normal([batch_size, input_dim])
# print("noise", noise.shape)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_names = tf.expand_dims(generator(noise, training=True), -1)
# print("gen:", generated_names.shape)
# print("names", names.shape)
real_output = discriminator(names, training=True)
fake_output = discriminator(generated_names, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
train(train_dataset, EPOCHS)
| [
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_like",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.models.Sequential",
"os.path.join",
"numpy.round",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.train... | [((5157, 5209), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5191, 5209), True, 'import tensorflow as tf\n'), ((5233, 5265), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (5257, 5265), True, 'import tensorflow as tf\n'), ((5290, 5322), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.0001)'], {}), '(0.0001)\n', (5314, 5322), True, 'import tensorflow as tf\n'), ((6316, 6348), 'tensorflow.random.normal', 'tf.random.normal', (['[1, input_dim]'], {}), '([1, input_dim])\n', (6332, 6348), True, 'import tensorflow as tf\n'), ((6688, 6724), 'os.path.join', 'os.path.join', (['CHECKPOINT_DIR', '"""ckpt"""'], {}), "(CHECKPOINT_DIR, 'ckpt')\n", (6700, 6724), False, 'import os\n'), ((6738, 6905), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'generator_optimizer': 'generator_optimizer', 'discriminator_optimizer': 'discriminator_optimizer', 'generator': 'generator', 'discriminator': 'discriminator'}), '(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer, generator=generator,\n discriminator=discriminator)\n', (6757, 6905), True, 'import tensorflow as tf\n'), ((7117, 7149), 'tensorflow.random.normal', 'tf.random.normal', (['[1, input_dim]'], {}), '([1, input_dim])\n', (7133, 7149), True, 'import tensorflow as tf\n'), ((546, 558), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (556, 558), False, 'from tensorflow.keras.models import Sequential\n'), ((2186, 2198), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2196, 2198), False, 'from tensorflow.keras.models import Sequential\n'), ((4216, 4247), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (4236, 4247), False, 'from IPython import display\n'), ((5397, 5417), 'json.load', 'json.load', (['name_file'], {}), '(name_file)\n', (5406, 5417), False, 'import json\n'), ((6532, 6566), 'tensorflow.expand_dims', 'tf.expand_dims', (['generated_name', '(-1)'], {}), '(generated_name, -1)\n', (6546, 6566), True, 'import tensorflow as tf\n'), ((6936, 6978), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['CHECKPOINT_DIR'], {}), '(CHECKPOINT_DIR)\n', (6962, 6978), True, 'import tensorflow as tf\n'), ((7348, 7389), 'tensorflow.random.normal', 'tf.random.normal', (['[batch_size, input_dim]'], {}), '([batch_size, input_dim])\n', (7364, 7389), True, 'import tensorflow as tf\n'), ((573, 631), 'tensorflow.keras.layers.Dense', 'Dense', (['input_dim'], {'use_bias': '(False)', 'input_shape': '(input_dim,)'}), '(input_dim, use_bias=False, input_shape=(input_dim,))\n', (578, 631), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((970, 993), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, input_dim)'], {}), '((1, input_dim))\n', (977, 993), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1049, 1143), 'tensorflow.keras.layers.Conv1DTranspose', 'Conv1DTranspose', (['input_dim', '(4)'], {'activation': '"""elu"""', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(input_dim, 4, activation='elu', strides=1, padding='same',\n use_bias=False)\n", (1064, 1143), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1279, 1299), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1297, 1299), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1324, 1424), 'tensorflow.keras.layers.Conv1DTranspose', 'Conv1DTranspose', (['(max_length * 4)', '(4)'], {'activation': '"""elu"""', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(max_length * 4, 4, activation='elu', strides=1, padding=\n 'same', use_bias=False)\n", (1339, 1424), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1559, 1591), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (1577, 1591), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1616, 1716), 'tensorflow.keras.layers.Conv1DTranspose', 'Conv1DTranspose', (['(max_length * 2)', '(4)'], {'strides': '(1)', 'activation': '"""elu"""', 'padding': '"""same"""', 'use_bias': '(False)'}), "(max_length * 2, 4, strides=1, activation='elu', padding=\n 'same', use_bias=False)\n", (1631, 1716), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1851, 1883), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (1869, 1883), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1899, 1951), 'tensorflow.keras.layers.Dense', 'Dense', (['max_length'], {'activation': '"""tanh"""', 'use_bias': '(False)'}), "(max_length, activation='tanh', use_bias=False)\n", (1904, 1951), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((1967, 1989), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(max_length,)'], {}), '((max_length,))\n', (1974, 1989), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((2482, 2619), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(30 * max_length)', '(4)'], {'kernel_initializer': '"""he_uniform"""', 'strides': '(1)', 'activation': '"""elu"""', 'padding': '"""same"""', 'input_shape': '[max_length, 1]'}), "(30 * max_length, 4, kernel_initializer='he_uniform', strides=1,\n activation='elu', padding='same', input_shape=[max_length, 1])\n", (2488, 2619), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((2767, 2779), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2774, 2779), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((2795, 2815), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2813, 2815), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((2840, 2947), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(5 * max_length)', '(4)'], {'kernel_initializer': '"""he_uniform"""', 'activation': '"""elu"""', 'strides': '(1)', 'padding': '"""same"""'}), "(5 * max_length, 4, kernel_initializer='he_uniform', activation='elu',\n strides=1, padding='same')\n", (2846, 2947), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((3083, 3095), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3090, 3095), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((3112, 3121), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3119, 3121), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((3137, 3145), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3142, 3145), False, 'from tensorflow.keras.layers import Dense, BatchNormalization, Reshape, Conv1D, Conv1DTranspose, Flatten, Dropout\n'), ((3277, 3302), 'tensorflow.ones_like', 'tf.ones_like', (['real_output'], {}), '(real_output)\n', (3289, 3302), True, 'import tensorflow as tf\n'), ((3347, 3373), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake_output'], {}), '(fake_output)\n', (3360, 3373), True, 'import tensorflow as tf\n'), ((3509, 3534), 'tensorflow.ones_like', 'tf.ones_like', (['fake_output'], {}), '(fake_output)\n', (3521, 3534), True, 'import tensorflow as tf\n'), ((3627, 3638), 'time.time', 'time.time', ([], {}), '()\n', (3636, 3638), False, 'import time\n'), ((3824, 3856), 'tensorflow.random.normal', 'tf.random.normal', (['[1, input_dim]'], {}), '([1, input_dim])\n', (3840, 3856), True, 'import tensorflow as tf\n'), ((6582, 6616), 'tensorflow.expand_dims', 'tf.expand_dims', (['generated_name', '(-1)'], {}), '(generated_name, -1)\n', (6596, 6616), True, 'import tensorflow as tf\n'), ((7437, 7454), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7452, 7454), True, 'import tensorflow as tf\n'), ((7468, 7485), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7483, 7485), True, 'import tensorflow as tf\n'), ((3700, 3730), 'tensorflow.expand_dims', 'tf.expand_dims', (['name_batch', '(-1)'], {}), '(name_batch, -1)\n', (3714, 3730), True, 'import tensorflow as tf\n'), ((4577, 4591), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (4585, 4591), True, 'import numpy as np\n'), ((4713, 4744), 'tensorflow.expand_dims', 'tf.expand_dims', (['predictions', '(-1)'], {}), '(predictions, -1)\n', (4727, 4744), True, 'import tensorflow as tf\n'), ((5842, 5860), 'numpy.array', 'np.array', (['name_int'], {}), '(name_int)\n', (5850, 5860), True, 'import numpy as np\n'), ((6185, 6230), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['name_ints'], {}), '(name_ints)\n', (6219, 6230), True, 'import tensorflow as tf\n'), ((4144, 4155), 'time.time', 'time.time', ([], {}), '()\n', (4153, 4155), False, 'import time\n'), ((4886, 4902), 'numpy.round', 'np.round', (['number'], {}), '(number)\n', (4894, 4902), True, 'import numpy as np\n')] |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, Mapping, Optional, Sequence, Union
import numpy as np
from monai.apps.utils import get_logger
from monai.config import DtypeLike, NdarrayOrTensor, PathLike
from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd
from monai.transforms.spatial.array import Resize, SpatialResample
from monai.transforms.utils_pytorch_numpy_unification import ascontiguousarray, moveaxis
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
OptionalImportError,
convert_data_type,
look_up_option,
optional_import,
require_pkg,
)
DEFAULT_FMT = "%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s"
EXT_WILDCARD = "*"
logger = get_logger(module_name=__name__, fmt=DEFAULT_FMT)
if TYPE_CHECKING:
import itk
import nibabel as nib
from PIL import Image as PILImage
else:
itk, _ = optional_import("itk", allow_namespace_pkg=True)
nib, _ = optional_import("nibabel")
PILImage, _ = optional_import("PIL.Image")
__all__ = [
"ImageWriter",
"ITKWriter",
"NibabelWriter",
"PILWriter",
"SUPPORTED_WRITERS",
"register_writer",
"resolve_writer",
"logger",
]
SUPPORTED_WRITERS: Dict = {}
def register_writer(ext_name, *im_writers):
"""
Register ``ImageWriter``, so that writing a file with filename extension ``ext_name``
could be resolved to a tuple of potentially appropriate ``ImageWriter``.
The customised writers could be registered by:
.. code-block:: python
from monai.data import register_writer
# `MyWriter` must implement `ImageWriter` interface
register_writer("nii", MyWriter)
Args:
ext_name: the filename extension of the image.
As an indexing key, it will be converted to a lower case string.
im_writers: one or multiple ImageWriter classes with high priority ones first.
"""
fmt = f"{ext_name}".lower()
if fmt.startswith("."):
fmt = fmt[1:]
existing = look_up_option(fmt, SUPPORTED_WRITERS, default=())
all_writers = im_writers + existing
SUPPORTED_WRITERS[fmt] = all_writers
def resolve_writer(ext_name, error_if_not_found=True) -> Sequence:
"""
Resolves to a tuple of available ``ImageWriter`` in ``SUPPORTED_WRITERS``
according to the filename extension key ``ext_name``.
Args:
ext_name: the filename extension of the image.
As an indexing key it will be converted to a lower case string.
error_if_not_found: whether to raise an error if no suitable image writer is found.
if True , raise an ``OptionalImportError``, otherwise return an empty tuple. Default is ``True``.
"""
if not SUPPORTED_WRITERS:
init()
fmt = f"{ext_name}".lower()
if fmt.startswith("."):
fmt = fmt[1:]
avail_writers = []
default_writers = SUPPORTED_WRITERS.get(EXT_WILDCARD, ())
for _writer in look_up_option(fmt, SUPPORTED_WRITERS, default=default_writers):
try:
_writer() # this triggers `monai.utils.module.require_pkg` to check the system availability
avail_writers.append(_writer)
except OptionalImportError:
continue
except Exception: # other writer init errors indicating it exists
avail_writers.append(_writer)
if not avail_writers and error_if_not_found:
raise OptionalImportError(f"No ImageWriter backend found for {fmt}.")
writer_tuple = ensure_tuple(avail_writers)
SUPPORTED_WRITERS[fmt] = writer_tuple
return writer_tuple
class ImageWriter:
"""
The class is a collection of utilities to write images to disk.
Main aspects to be considered are:
- dimensionality of the data array, arrangements of spatial dimensions and channel/time dimensions
- ``convert_to_channel_last()``
- metadata of the current affine and output affine, the data array should be converted accordingly
- ``get_meta_info()``
- ``resample_if_needed()``
- data type handling of the output image (as part of ``resample_if_needed()``)
Subclasses of this class should implement the backend-specific functions:
- ``set_data_array()`` to set the data array (input must be numpy array or torch tensor)
- this method sets the backend object's data part
- ``set_metadata()`` to set the metadata and output affine
- this method sets the metadata including affine handling and image resampling
- backend-specific data object ``create_backend_obj()``
- backend-specific writing function ``write()``
The primary usage of subclasses of ``ImageWriter`` is:
.. code-block:: python
writer = MyWriter() # subclass of ImageWriter
writer.set_data_array(data_array)
writer.set_metadata(meta_dict)
writer.write(filename)
This creates an image writer object based on ``data_array`` and ``meta_dict`` and write to ``filename``.
It supports up to three spatial dimensions (with the resampling step supports for both 2D and 3D).
When saving multiple time steps or multiple channels `data_array`, time
and/or modality axes should be the at the `channel_dim`. For example,
the shape of a 2D eight-class and ``channel_dim=0``, the segmentation
probabilities to be saved could be `(8, 64, 64)`; in this case
``data_array`` will be converted to `(64, 64, 1, 8)` (the third
dimension is reserved as a spatial dimension).
The ``metadata`` could optionally have the following keys:
- ``'original_affine'``: for data original affine, it will be the
affine of the output object, defaulting to an identity matrix.
- ``'affine'``: it should specify the current data affine, defaulting to an identity matrix.
- ``'spatial_shape'``: for data output spatial shape.
When ``metadata`` is specified, the saver will may resample data from the space defined by
`"affine"` to the space defined by `"original_affine"`, for more details, please refer to the
``resample_if_needed`` method.
"""
def __init__(self, **kwargs):
"""
The constructor supports adding new instance members.
The current member in the base class is ``self.data_obj``, the subclasses can add more members,
so that necessary meta information can be stored in the object and shared among the class methods.
"""
self.data_obj = None
for k, v in kwargs.items():
setattr(self, k, v)
def set_data_array(self, data_array, **kwargs):
raise NotImplementedError(f"Subclasses of {self.__class__.__name__} must implement this method.")
def set_metadata(self, meta_dict: Optional[Mapping], **options):
raise NotImplementedError(f"Subclasses of {self.__class__.__name__} must implement this method.")
def write(self, filename: PathLike, verbose: bool = True, **kwargs):
"""subclass should implement this method to call the backend-specific writing APIs."""
if verbose:
logger.info(f"writing: {filename}")
@classmethod
def create_backend_obj(cls, data_array: NdarrayOrTensor, **kwargs) -> np.ndarray:
"""
Subclass should implement this method to return a backend-specific data representation object.
This method is used by ``cls.write`` and the input ``data_array`` is assumed 'channel-last'.
"""
return convert_data_type(data_array, np.ndarray)[0]
@classmethod
def resample_if_needed(
cls,
data_array: NdarrayOrTensor,
affine: Optional[NdarrayOrTensor] = None,
target_affine: Optional[NdarrayOrTensor] = None,
output_spatial_shape: Union[Sequence[int], int, None] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
):
"""
Convert the ``data_array`` into the coordinate system specified by
``target_affine``, from the current coordinate definition of ``affine``.
If the transform between ``affine`` and ``target_affine`` could be
achieved by simply transposing and flipping ``data_array``, no resampling
will happen. Otherwise, this function resamples ``data_array`` using the
transformation computed from ``affine`` and ``target_affine``.
This function assumes the NIfTI dimension notations. Spatially it
supports up to three dimensions, that is, H, HW, HWD for 1D, 2D, 3D
respectively. When saving multiple time steps or multiple channels,
time and/or modality axes should be appended after the first three
dimensions. For example, shape of 2D eight-class segmentation
probabilities to be saved could be `(64, 64, 1, 8)`. Also, data in
shape `(64, 64, 8)` or `(64, 64, 8, 1)` will be considered as a
single-channel 3D image. The ``convert_to_channel_last`` method can be
used to convert the data to the format described here.
Note that the shape of the resampled ``data_array`` may subject to some
rounding errors. For example, resampling a 20x20 pixel image from pixel
size (1.5, 1.5)-mm to (3.0, 3.0)-mm space will return a 10x10-pixel
image. However, resampling a 20x20-pixel image from pixel size (2.0,
2.0)-mm to (3.0, 3.0)-mm space will output a 14x14-pixel image, where
the image shape is rounded from 13.333x13.333 pixels. In this case
``output_spatial_shape`` could be specified so that this function
writes image data to a designated shape.
Args:
data_array: input data array to be converted.
affine: the current affine of ``data_array``. Defaults to identity
target_affine: the designated affine of ``data_array``.
The actual output affine might be different from this value due to precision changes.
output_spatial_shape: spatial shape of the output image.
This option is used when resampling is needed.
mode: available options are {``"bilinear"``, ``"nearest"``, ``"bicubic"``}.
This option is used when resampling is needed.
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
padding_mode: available options are {``"zeros"``, ``"border"``, ``"reflection"``}.
This option is used when resampling is needed.
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
align_corners: boolean option of ``grid_sample`` to handle the corner convention.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
dtype: data type for resampling computation. Defaults to
``np.float64`` for best precision. If ``None``, use the data type of input data.
The output data type of this method is always ``np.float32``.
"""
resampler = SpatialResample(mode=mode, padding_mode=padding_mode, align_corners=align_corners, dtype=dtype)
output_array, target_affine = resampler(
data_array[None], src_affine=affine, dst_affine=target_affine, spatial_size=output_spatial_shape
)
return output_array[0], target_affine
@classmethod
def convert_to_channel_last(
cls,
data: NdarrayOrTensor,
channel_dim: Union[None, int, Sequence[int]] = 0,
squeeze_end_dims: bool = True,
spatial_ndim: Optional[int] = 3,
contiguous: bool = False,
):
"""
Rearrange the data array axes to make the `channel_dim`-th dim the last
dimension and ensure there are ``spatial_ndim`` number of spatial
dimensions.
When ``squeeze_end_dims`` is ``True``, a postprocessing step will be
applied to remove any trailing singleton dimensions.
Args:
data: input data to be converted to "channel-last" format.
channel_dim: specifies the channel axes of the data array to move to the last.
``None`` indicates no channel dimension, a new axis will be appended as the channel dimension.
a sequence of integers indicates multiple non-spatial dimensions.
squeeze_end_dims: if ``True``, any trailing singleton dimensions will be removed (after the channel
has been moved to the end). So if input is `(H,W,D,C)` and C==1, then it will be saved as `(H,W,D)`.
If D is also 1, it will be saved as `(H,W)`. If ``False``, image will always be saved as `(H,W,D,C)`.
spatial_ndim: modifying the spatial dims if needed, so that output to have at least
this number of spatial dims. If ``None``, the output will have the same number of
spatial dimensions as the input.
contiguous: if ``True``, the output will be contiguous.
"""
# change data to "channel last" format
if channel_dim is not None:
_chns = ensure_tuple(channel_dim)
data = moveaxis(data, _chns, tuple(range(-len(_chns), 0)))
else: # adds a channel dimension
data = data[..., None]
# To ensure at least ``spatial_ndim`` number of spatial dims
if spatial_ndim:
while len(data.shape) < spatial_ndim + 1: # assuming the data has spatial + channel dims
data = data[..., None, :]
while len(data.shape) > spatial_ndim + 1:
data = data[..., 0, :]
# if desired, remove trailing singleton dimensions
while squeeze_end_dims and data.shape[-1] == 1:
data = np.squeeze(data, -1)
if contiguous:
data = ascontiguousarray(data)
return data
@classmethod
def get_meta_info(cls, metadata: Optional[Mapping] = None):
"""
Extracts relevant meta information from the metadata object (using ``.get``).
Optional keys are ``"spatial_shape"``, ``"affine"``, ``"original_affine"``.
"""
if not metadata:
metadata = {"original_affine": None, "affine": None, "spatial_shape": None}
original_affine = metadata.get("original_affine")
affine = metadata.get("affine")
spatial_shape = metadata.get("spatial_shape")
return original_affine, affine, spatial_shape
@require_pkg(pkg_name="itk")
class ITKWriter(ImageWriter):
"""
Write data and metadata into files on disk using ITK-python.
.. code-block:: python
import numpy as np
from monai.data import ITKWriter
np_data = np.arange(48).reshape(3, 4, 4)
# write as 3d spatial image no channel
writer = ITKWriter(output_dtype=np.float32)
writer.set_data_array(np_data, channel_dim=None)
# optionally set metadata affine
writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)})
writer.write("test1.nii.gz")
# write as 2d image, channel-first
writer = ITKWriter(output_dtype=np.uint8)
writer.set_data_array(np_data, channel_dim=0)
writer.set_metadata({"spatial_shape": (5, 5)})
writer.write("test1.png")
"""
def __init__(self, output_dtype: DtypeLike = np.float32, **kwargs):
"""
Args:
output_dtype: output data type.
kwargs: keyword arguments passed to ``ImageWriter``.
The constructor will create ``self.output_dtype`` internally.
``affine`` and ``channel_dim`` are initialized as instance members (default ``None``, ``0``):
- user-specified ``affine`` should be set in ``set_metadata``,
- user-specified ``channel_dim`` should be set in ``set_data_array``.
"""
super().__init__(output_dtype=output_dtype, affine=None, channel_dim=0, **kwargs)
def set_data_array(
self, data_array: NdarrayOrTensor, channel_dim: Optional[int] = 0, squeeze_end_dims: bool = True, **kwargs
):
"""
Convert ``data_array`` into 'channel-last' numpy ndarray.
Args:
data_array: input data array with the channel dimension specified by ``channel_dim``.
channel_dim: channel dimension of the data array. Defaults to 0.
``None`` indicates data without any channel dimension.
squeeze_end_dims: if ``True``, any trailing singleton dimensions will be removed.
kwargs: keyword arguments passed to ``self.convert_to_channel_last``,
currently support ``spatial_ndim`` and ``contiguous``, defauting to ``3`` and ``False`` respectively.
"""
_r = len(data_array.shape)
self.data_obj = self.convert_to_channel_last(
data=data_array,
channel_dim=channel_dim,
squeeze_end_dims=squeeze_end_dims,
spatial_ndim=kwargs.pop("spatial_ndim", 3),
contiguous=kwargs.pop("contiguous", True),
)
self.channel_dim = channel_dim if len(self.data_obj.shape) >= _r else None # channel dim is at the end
def set_metadata(self, meta_dict: Optional[Mapping] = None, resample: bool = True, **options):
"""
Resample ``self.dataobj`` if needed. This method assumes ``self.data_obj`` is a 'channel-last' ndarray.
Args:
meta_dict: a metadata dictionary for affine, original affine and spatial shape information.
Optional keys are ``"spatial_shape"``, ``"affine"``, ``"original_affine"``.
resample: if ``True``, the data will be resampled to the original affine (specified in ``meta_dict``).
options: keyword arguments passed to ``self.resample_if_needed``,
currently support ``mode``, ``padding_mode``, ``align_corners``, and ``dtype``,
defaulting to ``bilinear``, ``border``, ``False``, and ``np.float64`` respectively.
"""
original_affine, affine, spatial_shape = self.get_meta_info(meta_dict)
self.data_obj, self.affine = self.resample_if_needed(
data_array=self.data_obj,
affine=affine,
target_affine=original_affine if resample else None,
output_spatial_shape=spatial_shape if resample else None,
mode=options.pop("mode", GridSampleMode.BILINEAR),
padding_mode=options.pop("padding_mode", GridSamplePadMode.BORDER),
align_corners=options.pop("align_corners", False),
dtype=options.pop("dtype", np.float64),
)
def write(self, filename: PathLike, verbose: bool = False, **kwargs):
"""
Create an ITK object from ``self.create_backend_obj(self.obj, ...)`` and call ``itk.imwrite``.
Args:
filename: filename or PathLike object.
verbose: if ``True``, log the progress.
kwargs: keyword arguments passed to ``itk.imwrite``,
currently support ``compression`` and ``imageio``.
See also:
- https://github.com/InsightSoftwareConsortium/ITK/blob/v5.2.1/Wrapping/Generators/Python/itk/support/extras.py#L809
"""
super().write(filename, verbose=verbose)
self.data_obj = self.create_backend_obj(
self.data_obj, channel_dim=self.channel_dim, affine=self.affine, dtype=self.output_dtype, **kwargs # type: ignore
)
itk.imwrite(
self.data_obj, filename, compression=kwargs.pop("compression", False), imageio=kwargs.pop("imageio", None)
)
@classmethod
def create_backend_obj(
cls,
data_array: NdarrayOrTensor,
channel_dim: Optional[int] = 0,
affine: Optional[NdarrayOrTensor] = None,
dtype: DtypeLike = np.float32,
**kwargs,
):
"""
Create an ITK object from ``data_array``. This method assumes a 'channel-last' ``data_array``.
Args:
data_array: input data array.
channel_dim: channel dimension of the data array. This is used to create a Vector Image if it is not ``None``.
affine: affine matrix of the data array. This is used to compute `spacing`, `direction` and `origin`.
dtype: output data type.
kwargs: keyword arguments. Current `itk.GetImageFromArray` will read ``ttype`` from this dictionary.
see also:
- https://github.com/InsightSoftwareConsortium/ITK/blob/v5.2.1/Wrapping/Generators/Python/itk/support/extras.py#L389
"""
data_array = super().create_backend_obj(data_array)
_is_vec = channel_dim is not None
if _is_vec:
data_array = np.moveaxis(data_array, -1, 0) # from channel last to channel first
data_array = data_array.T.astype(dtype, copy=True, order="C")
itk_obj = itk.GetImageFromArray(data_array, is_vector=_is_vec, ttype=kwargs.pop("ttype", None))
d = len(itk.size(itk_obj))
if affine is None:
affine = np.eye(d + 1, dtype=np.float64)
_affine = convert_data_type(affine, np.ndarray)[0]
_affine = orientation_ras_lps(to_affine_nd(d, _affine))
spacing = affine_to_spacing(_affine, r=d)
_direction: np.ndarray = np.diag(1 / spacing)
_direction = _affine[:d, :d] @ _direction
itk_obj.SetSpacing(spacing.tolist())
itk_obj.SetOrigin(_affine[:d, -1].tolist())
itk_obj.SetDirection(itk.GetMatrixFromArray(_direction))
return itk_obj
@require_pkg(pkg_name="nibabel")
class NibabelWriter(ImageWriter):
"""
Write data and metadata into files on disk using Nibabel.
.. code-block:: python
import numpy as np
from monai.data import NibabelWriter
np_data = np.arange(48).reshape(3, 4, 4)
writer = NibabelWriter()
writer.set_data_array(np_data, channel_dim=None)
writer.set_metadata({"affine": np.eye(4), "original_affine": np.eye(4)})
writer.write("test1.nii.gz", verbose=True)
"""
def __init__(self, output_dtype: DtypeLike = np.float32, **kwargs):
"""
Args:
output_dtype: output data type.
kwargs: keyword arguments passed to ``ImageWriter``.
The constructor will create ``self.output_dtype`` internally.
``affine`` is initialized as instance members (default ``None``),
user-specified ``affine`` should be set in ``set_metadata``.
"""
super().__init__(output_dtype=output_dtype, affine=None, **kwargs)
def set_data_array(
self, data_array: NdarrayOrTensor, channel_dim: Optional[int] = 0, squeeze_end_dims: bool = True, **kwargs
):
"""
Convert ``data_array`` into 'channel-last' numpy ndarray.
Args:
data_array: input data array with the channel dimension specified by ``channel_dim``.
channel_dim: channel dimension of the data array. Defaults to 0.
``None`` indicates data without any channel dimension.
squeeze_end_dims: if ``True``, any trailing singleton dimensions will be removed.
kwargs: keyword arguments passed to ``self.convert_to_channel_last``,
currently support ``spatial_ndim``, defauting to ``3``.
"""
self.data_obj = self.convert_to_channel_last(
data=data_array,
channel_dim=channel_dim,
squeeze_end_dims=squeeze_end_dims,
spatial_ndim=kwargs.pop("spatial_ndim", 3),
)
def set_metadata(self, meta_dict: Optional[Mapping], resample: bool = True, **options):
"""
Resample ``self.dataobj`` if needed. This method assumes ``self.data_obj`` is a 'channel-last' ndarray.
Args:
meta_dict: a metadata dictionary for affine, original affine and spatial shape information.
Optional keys are ``"spatial_shape"``, ``"affine"``, ``"original_affine"``.
resample: if ``True``, the data will be resampled to the original affine (specified in ``meta_dict``).
options: keyword arguments passed to ``self.resample_if_needed``,
currently support ``mode``, ``padding_mode``, ``align_corners``, and ``dtype``,
defaulting to ``bilinear``, ``border``, ``False``, and ``np.float64`` respectively.
"""
original_affine, affine, spatial_shape = self.get_meta_info(meta_dict)
self.data_obj, self.affine = self.resample_if_needed(
data_array=self.data_obj,
affine=affine,
target_affine=original_affine if resample else None,
output_spatial_shape=spatial_shape if resample else None,
mode=options.pop("mode", GridSampleMode.BILINEAR),
padding_mode=options.pop("padding_mode", GridSamplePadMode.BORDER),
align_corners=options.pop("align_corners", False),
dtype=options.pop("dtype", np.float64),
)
def write(self, filename: PathLike, verbose: bool = False, **obj_kwargs):
"""
Create a Nibabel object from ``self.create_backend_obj(self.obj, ...)`` and call ``nib.save``.
Args:
filename: filename or PathLike object.
verbose: if ``True``, log the progress.
obj_kwargs: keyword arguments passed to ``self.create_backend_obj``,
See also:
- https://nipy.org/nibabel/reference/nibabel.nifti1.html#nibabel.nifti1.save
"""
super().write(filename, verbose=verbose)
self.data_obj = self.create_backend_obj(
self.data_obj, affine=self.affine, dtype=self.output_dtype, **obj_kwargs # type: ignore
)
nib.save(self.data_obj, filename)
@classmethod
def create_backend_obj(
cls, data_array: NdarrayOrTensor, affine: Optional[NdarrayOrTensor] = None, dtype: DtypeLike = None, **kwargs
):
"""
Create an Nifti1Image object from ``data_array``. This method assumes a 'channel-last' ``data_array``.
Args:
data_array: input data array.
affine: affine matrix of the data array.
dtype: output data type.
kwargs: keyword arguments. Current ``nib.nifti1.Nifti1Image`` will read
``header``, ``extra``, ``file_map`` from this dictionary.
See also:
- https://nipy.org/nibabel/reference/nibabel.nifti1.html#nibabel.nifti1.Nifti1Image
"""
data_array = super().create_backend_obj(data_array)
if dtype is not None:
data_array = data_array.astype(dtype, copy=False)
affine = convert_data_type(affine, np.ndarray)[0]
affine = to_affine_nd(r=3, affine=affine)
return nib.nifti1.Nifti1Image(
data_array,
affine,
header=kwargs.pop("header", None),
extra=kwargs.pop("extra", None),
file_map=kwargs.pop("file_map", None),
)
@require_pkg(pkg_name="PIL")
class PILWriter(ImageWriter):
"""
Write image data into files on disk using pillow.
It's based on the Image module in PIL library:
https://pillow.readthedocs.io/en/stable/reference/Image.html
.. code-block:: python
import numpy as np
from monai.data import PILWriter
np_data = np.arange(48).reshape(3, 4, 4)
writer = PILWriter(np.uint8)
writer.set_data_array(np_data, channel_dim=0)
writer.write("test1.png", verbose=True)
"""
def __init__(
self, output_dtype: DtypeLike = np.float32, channel_dim: Optional[int] = 0, scale: Optional[int] = 255, **kwargs
):
"""
Args:
output_dtype: output data type.
channel_dim: channel dimension of the data array. Defaults to 0.
``None`` indicates data without any channel dimension.
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
kwargs: keyword arguments passed to ``ImageWriter``.
"""
super().__init__(output_dtype=output_dtype, channel_dim=channel_dim, scale=scale, **kwargs)
def set_data_array(
self,
data_array: NdarrayOrTensor,
channel_dim: Optional[int] = 0,
squeeze_end_dims: bool = True,
contiguous: bool = False,
**kwargs,
):
"""
Convert ``data_array`` into 'channel-last' numpy ndarray.
Args:
data_array: input data array with the channel dimension specified by ``channel_dim``.
channel_dim: channel dimension of the data array. Defaults to 0.
``None`` indicates data without any channel dimension.
squeeze_end_dims: if ``True``, any trailing singleton dimensions will be removed.
contiguous: if ``True``, the data array will be converted to a contiguous array. Default is ``False``.
kwargs: keyword arguments passed to ``self.convert_to_channel_last``,
currently support ``spatial_ndim``, defauting to ``2``.
"""
self.data_obj = self.convert_to_channel_last(
data=data_array,
channel_dim=channel_dim,
squeeze_end_dims=squeeze_end_dims,
spatial_ndim=kwargs.pop("spatial_ndim", 2),
contiguous=contiguous,
)
def set_metadata(self, meta_dict: Optional[Mapping] = None, resample: bool = True, **options):
"""
Resample ``self.dataobj`` if needed. This method assumes ``self.data_obj`` is a 'channel-last' ndarray.
Args:
meta_dict: a metadata dictionary for affine, original affine and spatial shape information.
Optional key is ``"spatial_shape"``.
resample: if ``True``, the data will be resampled to the spatial shape specified in ``meta_dict``.
options: keyword arguments passed to ``self.resample_if_needed``,
currently support ``mode``, defaulting to ``bicubic``.
"""
spatial_shape = self.get_meta_info(meta_dict)
self.data_obj = self.resample_and_clip(
data_array=self.data_obj,
output_spatial_shape=spatial_shape if resample else None,
mode=options.pop("mode", InterpolateMode.BICUBIC),
)
def write(self, filename: PathLike, verbose: bool = False, **kwargs):
"""
Create a PIL image object from ``self.create_backend_obj(self.obj, ...)`` and call ``save``.
Args:
filename: filename or PathLike object.
verbose: if ``True``, log the progress.
kwargs: optional keyword arguments passed to ``self.create_backend_obj``
currently support ``reverse_indexing``, ``image_mode``, defaulting to ``True``, ``None`` respectively.
See also:
- https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save
"""
super().write(filename, verbose=verbose)
self.data_obj = self.create_backend_obj(
data_array=self.data_obj,
dtype=self.output_dtype, # type: ignore
reverse_indexing=kwargs.pop("reverse_indexing", True),
image_mode=kwargs.pop("image_mode", None),
scale=self.scale, # type: ignore
**kwargs,
)
self.data_obj.save(filename, **kwargs)
@classmethod
def get_meta_info(cls, metadata: Optional[Mapping] = None):
return None if not metadata else metadata.get("spatial_shape")
@classmethod
def resample_and_clip(
cls,
data_array: NdarrayOrTensor,
output_spatial_shape: Optional[Sequence[int]] = None,
mode: Union[InterpolateMode, str] = InterpolateMode.BICUBIC,
):
"""
Resample ``data_array`` to ``output_spatial_shape`` if needed.
Args:
data_array: input data array. This method assumes the 'channel-last' format.
output_spatial_shape: output spatial shape.
mode: interpolation mode, defautl is ``InterpolateMode.BICUBIC``.
"""
data: np.ndarray = convert_data_type(data_array, np.ndarray)[0]
if output_spatial_shape is not None:
output_spatial_shape_ = ensure_tuple_rep(output_spatial_shape, 2)
mode = look_up_option(mode, InterpolateMode)
align_corners = None if mode in (InterpolateMode.NEAREST, InterpolateMode.AREA) else False
xform = Resize(spatial_size=output_spatial_shape_, mode=mode, align_corners=align_corners)
_min, _max = np.min(data), np.max(data)
if len(data.shape) == 3:
data = np.moveaxis(data, -1, 0) # to channel first
data = xform(data) # type: ignore
data = np.moveaxis(data, 0, -1)
else: # (H, W)
data = np.expand_dims(data, 0) # make a channel
data = xform(data)[0] # type: ignore
if mode != InterpolateMode.NEAREST:
data = np.clip(data, _min, _max)
return data
@classmethod
def create_backend_obj(
cls,
data_array: NdarrayOrTensor,
dtype: DtypeLike = None,
scale: Optional[int] = 255,
reverse_indexing: bool = True,
**kwargs,
):
"""
Create a PIL object from ``data_array``.
Args:
data_array: input data array.
dtype: output data type.
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
reverse_indexing: if ``True``, the data array's first two dimensions will be swapped.
kwargs: keyword arguments. Currently ``PILImage.fromarray`` will read
``image_mode`` from this dictionary, defaults to ``None``.
See also:
- https://pillow.readthedocs.io/en/stable/reference/Image.html
"""
data: np.ndarray = super().create_backend_obj(data_array)
if scale:
# scale the data to be in an integer range
data = np.clip(data, 0.0, 1.0) # type: ignore # png writer only can scale data in range [0, 1]
if scale == np.iinfo(np.uint8).max:
data = (scale * data).astype(np.uint8, copy=False)
elif scale == np.iinfo(np.uint16).max:
data = (scale * data).astype(np.uint16, copy=False)
else:
raise ValueError(f"Unsupported scale: {scale}, available options are [255, 65535].")
if dtype is not None:
data = data.astype(dtype, copy=False)
if reverse_indexing:
data = np.moveaxis(data, 0, 1)
return PILImage.fromarray(data, mode=kwargs.pop("image_mode", None))
def init():
"""
Initialize the image writer modules according to the filename extension.
"""
for ext in ("png", "jpg", "jpeg", "bmp", "tiff", "tif"):
register_writer(ext, PILWriter) # TODO: test 16-bit
for ext in ("nii.gz", "nii"):
register_writer(ext, NibabelWriter, ITKWriter)
register_writer("nrrd", ITKWriter, NibabelWriter)
register_writer(EXT_WILDCARD, ITKWriter, NibabelWriter, ITKWriter)
| [
"numpy.moveaxis",
"monai.utils.OptionalImportError",
"numpy.iinfo",
"numpy.clip",
"itk.size",
"numpy.diag",
"itk.GetMatrixFromArray",
"nibabel.save",
"numpy.max",
"monai.data.utils.ensure_tuple_rep",
"monai.transforms.spatial.array.SpatialResample",
"monai.utils.require_pkg",
"monai.data.uti... | [((1349, 1398), 'monai.apps.utils.get_logger', 'get_logger', ([], {'module_name': '__name__', 'fmt': 'DEFAULT_FMT'}), '(module_name=__name__, fmt=DEFAULT_FMT)\n', (1359, 1398), False, 'from monai.apps.utils import get_logger\n'), ((15368, 15395), 'monai.utils.require_pkg', 'require_pkg', ([], {'pkg_name': '"""itk"""'}), "(pkg_name='itk')\n", (15379, 15395), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((22454, 22485), 'monai.utils.require_pkg', 'require_pkg', ([], {'pkg_name': '"""nibabel"""'}), "(pkg_name='nibabel')\n", (22465, 22485), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((27890, 27917), 'monai.utils.require_pkg', 'require_pkg', ([], {'pkg_name': '"""PIL"""'}), "(pkg_name='PIL')\n", (27901, 27917), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((1516, 1564), 'monai.utils.optional_import', 'optional_import', (['"""itk"""'], {'allow_namespace_pkg': '(True)'}), "('itk', allow_namespace_pkg=True)\n", (1531, 1564), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((1578, 1604), 'monai.utils.optional_import', 'optional_import', (['"""nibabel"""'], {}), "('nibabel')\n", (1593, 1604), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((1623, 1651), 'monai.utils.optional_import', 'optional_import', (['"""PIL.Image"""'], {}), "('PIL.Image')\n", (1638, 1651), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((2640, 2690), 'monai.utils.look_up_option', 'look_up_option', (['fmt', 'SUPPORTED_WRITERS'], {'default': '()'}), '(fmt, SUPPORTED_WRITERS, default=())\n', (2654, 2690), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((3568, 3631), 'monai.utils.look_up_option', 'look_up_option', (['fmt', 'SUPPORTED_WRITERS'], {'default': 'default_writers'}), '(fmt, SUPPORTED_WRITERS, default=default_writers)\n', (3582, 3631), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((4113, 4140), 'monai.data.utils.ensure_tuple', 'ensure_tuple', (['avail_writers'], {}), '(avail_writers)\n', (4125, 4140), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((4030, 4093), 'monai.utils.OptionalImportError', 'OptionalImportError', (['f"""No ImageWriter backend found for {fmt}."""'], {}), "(f'No ImageWriter backend found for {fmt}.')\n", (4049, 4093), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((11972, 12072), 'monai.transforms.spatial.array.SpatialResample', 'SpatialResample', ([], {'mode': 'mode', 'padding_mode': 'padding_mode', 'align_corners': 'align_corners', 'dtype': 'dtype'}), '(mode=mode, padding_mode=padding_mode, align_corners=\n align_corners, dtype=dtype)\n', (11987, 12072), False, 'from monai.transforms.spatial.array import Resize, SpatialResample\n'), ((22130, 22161), 'monai.data.utils.affine_to_spacing', 'affine_to_spacing', (['_affine'], {'r': 'd'}), '(_affine, r=d)\n', (22147, 22161), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((22195, 22215), 'numpy.diag', 'np.diag', (['(1 / spacing)'], {}), '(1 / spacing)\n', (22202, 22215), True, 'import numpy as np\n'), ((26630, 26663), 'nibabel.save', 'nib.save', (['self.data_obj', 'filename'], {}), '(self.data_obj, filename)\n', (26638, 26663), True, 'import nibabel as nib\n'), ((27618, 27650), 'monai.data.utils.to_affine_nd', 'to_affine_nd', ([], {'r': '(3)', 'affine': 'affine'}), '(r=3, affine=affine)\n', (27630, 27650), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((8122, 8163), 'monai.utils.convert_data_type', 'convert_data_type', (['data_array', 'np.ndarray'], {}), '(data_array, np.ndarray)\n', (8139, 8163), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((14024, 14049), 'monai.data.utils.ensure_tuple', 'ensure_tuple', (['channel_dim'], {}), '(channel_dim)\n', (14036, 14049), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((14663, 14683), 'numpy.squeeze', 'np.squeeze', (['data', '(-1)'], {}), '(data, -1)\n', (14673, 14683), True, 'import numpy as np\n'), ((14726, 14749), 'monai.transforms.utils_pytorch_numpy_unification.ascontiguousarray', 'ascontiguousarray', (['data'], {}), '(data)\n', (14743, 14749), False, 'from monai.transforms.utils_pytorch_numpy_unification import ascontiguousarray, moveaxis\n'), ((21630, 21660), 'numpy.moveaxis', 'np.moveaxis', (['data_array', '(-1)', '(0)'], {}), '(data_array, -1, 0)\n', (21641, 21660), True, 'import numpy as np\n'), ((21890, 21907), 'itk.size', 'itk.size', (['itk_obj'], {}), '(itk_obj)\n', (21898, 21907), False, 'import itk\n'), ((21957, 21988), 'numpy.eye', 'np.eye', (['(d + 1)'], {'dtype': 'np.float64'}), '(d + 1, dtype=np.float64)\n', (21963, 21988), True, 'import numpy as np\n'), ((22007, 22044), 'monai.utils.convert_data_type', 'convert_data_type', (['affine', 'np.ndarray'], {}), '(affine, np.ndarray)\n', (22024, 22044), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((22086, 22110), 'monai.data.utils.to_affine_nd', 'to_affine_nd', (['d', '_affine'], {}), '(d, _affine)\n', (22098, 22110), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((22392, 22426), 'itk.GetMatrixFromArray', 'itk.GetMatrixFromArray', (['_direction'], {}), '(_direction)\n', (22414, 22426), False, 'import itk\n'), ((27560, 27597), 'monai.utils.convert_data_type', 'convert_data_type', (['affine', 'np.ndarray'], {}), '(affine, np.ndarray)\n', (27577, 27597), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((33115, 33156), 'monai.utils.convert_data_type', 'convert_data_type', (['data_array', 'np.ndarray'], {}), '(data_array, np.ndarray)\n', (33132, 33156), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((33241, 33282), 'monai.data.utils.ensure_tuple_rep', 'ensure_tuple_rep', (['output_spatial_shape', '(2)'], {}), '(output_spatial_shape, 2)\n', (33257, 33282), False, 'from monai.data.utils import affine_to_spacing, ensure_tuple, ensure_tuple_rep, orientation_ras_lps, to_affine_nd\n'), ((33302, 33339), 'monai.utils.look_up_option', 'look_up_option', (['mode', 'InterpolateMode'], {}), '(mode, InterpolateMode)\n', (33316, 33339), False, 'from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, OptionalImportError, convert_data_type, look_up_option, optional_import, require_pkg\n'), ((33463, 33550), 'monai.transforms.spatial.array.Resize', 'Resize', ([], {'spatial_size': 'output_spatial_shape_', 'mode': 'mode', 'align_corners': 'align_corners'}), '(spatial_size=output_spatial_shape_, mode=mode, align_corners=\n align_corners)\n', (33469, 33550), False, 'from monai.transforms.spatial.array import Resize, SpatialResample\n'), ((35154, 35177), 'numpy.clip', 'np.clip', (['data', '(0.0)', '(1.0)'], {}), '(data, 0.0, 1.0)\n', (35161, 35177), True, 'import numpy as np\n'), ((35724, 35747), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(1)'], {}), '(data, 0, 1)\n', (35735, 35747), True, 'import numpy as np\n'), ((33571, 33583), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (33577, 33583), True, 'import numpy as np\n'), ((33585, 33597), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (33591, 33597), True, 'import numpy as np\n'), ((33658, 33682), 'numpy.moveaxis', 'np.moveaxis', (['data', '(-1)', '(0)'], {}), '(data, -1, 0)\n', (33669, 33682), True, 'import numpy as np\n'), ((33777, 33801), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(-1)'], {}), '(data, 0, -1)\n', (33788, 33801), True, 'import numpy as np\n'), ((33853, 33876), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (33867, 33876), True, 'import numpy as np\n'), ((34020, 34045), 'numpy.clip', 'np.clip', (['data', '_min', '_max'], {}), '(data, _min, _max)\n', (34027, 34045), True, 'import numpy as np\n'), ((35267, 35285), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (35275, 35285), True, 'import numpy as np\n'), ((35384, 35403), 'numpy.iinfo', 'np.iinfo', (['np.uint16'], {}), '(np.uint16)\n', (35392, 35403), True, 'import numpy as np\n')] |
from collections import namedtuple
import numpy as np
import talib
from jesse.helpers import get_candle_source
RSMK = namedtuple('RSMK', ['indicator', 'signal'])
def rsmk(candles: np.ndarray, candles_compare: np.ndarray, lookback: int = 90, period: int = 3, signal_period: int = 20,
matype: int = 1,
signal_matype: int = 1, source_type: str = "close", sequential: bool = False) -> RSMK:
"""
RSMK - Relative Strength
:param candles: np.ndarray
:param candles_compare: np.ndarray
:param period: int - default: 3
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
candles_compare = candles_compare[-240:]
source = get_candle_source(candles, source_type=source_type)
source_compare = get_candle_source(candles_compare, source_type=source_type)
a = np.log(source / source_compare)
b = talib.MOM(a, timeperiod=lookback)
res = talib.MA(b, timeperiod=period, matype=matype) * 100
signal = talib.MA(res, timeperiod=signal_period, matype=signal_matype)
if sequential:
return RSMK(res, signal)
else:
return RSMK(None if np.isnan(res[-1]) else res[-1], None if np.isnan(signal[-1]) else signal[-1])
| [
"numpy.log",
"jesse.helpers.get_candle_source",
"talib.MOM",
"talib.MA",
"numpy.isnan",
"collections.namedtuple"
] | [((121, 164), 'collections.namedtuple', 'namedtuple', (['"""RSMK"""', "['indicator', 'signal']"], {}), "('RSMK', ['indicator', 'signal'])\n", (131, 164), False, 'from collections import namedtuple\n'), ((828, 879), 'jesse.helpers.get_candle_source', 'get_candle_source', (['candles'], {'source_type': 'source_type'}), '(candles, source_type=source_type)\n', (845, 879), False, 'from jesse.helpers import get_candle_source\n'), ((901, 960), 'jesse.helpers.get_candle_source', 'get_candle_source', (['candles_compare'], {'source_type': 'source_type'}), '(candles_compare, source_type=source_type)\n', (918, 960), False, 'from jesse.helpers import get_candle_source\n'), ((970, 1001), 'numpy.log', 'np.log', (['(source / source_compare)'], {}), '(source / source_compare)\n', (976, 1001), True, 'import numpy as np\n'), ((1010, 1043), 'talib.MOM', 'talib.MOM', (['a'], {'timeperiod': 'lookback'}), '(a, timeperiod=lookback)\n', (1019, 1043), False, 'import talib\n'), ((1121, 1182), 'talib.MA', 'talib.MA', (['res'], {'timeperiod': 'signal_period', 'matype': 'signal_matype'}), '(res, timeperiod=signal_period, matype=signal_matype)\n', (1129, 1182), False, 'import talib\n'), ((1055, 1100), 'talib.MA', 'talib.MA', (['b'], {'timeperiod': 'period', 'matype': 'matype'}), '(b, timeperiod=period, matype=matype)\n', (1063, 1100), False, 'import talib\n'), ((1274, 1291), 'numpy.isnan', 'np.isnan', (['res[-1]'], {}), '(res[-1])\n', (1282, 1291), True, 'import numpy as np\n'), ((1314, 1334), 'numpy.isnan', 'np.isnan', (['signal[-1]'], {}), '(signal[-1])\n', (1322, 1334), True, 'import numpy as np\n')] |
from . import postprocessor as postp
from ..shared_code import preprocessor as prep
from tensorflow import make_tensor_proto, make_ndarray
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time
import azure.functions as func
import cv2
import grpc
import logging
import numpy as np
import os
from PIL import Image
_HOST = os.environ.get("HUMANSEGMENTATION_IPADDRESS")
_PORT = os.environ.get("HUMANSEGMENTATION_PORT")
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
_NAME = 'image'
event_id = context.invocation_id
logging.info(
f"Python humanpose function start process.\nID:{event_id}\nBack-end server host: {_HOST}:{_PORT}")
try:
method = req.method
url = req.url
files = req.files[_NAME]
if method != 'POST':
logging.warning(
f'ID:{event_id},the method was {files.content_type}.refused.')
return func.HttpResponse(f'only accept POST method', status_code=400)
if files:
if files.content_type != 'image/jpeg':
logging.warning(
f'ID:{event_id},the file type was {files.content_type}.refused.')
return func.HttpResponse(f'only accept jpeg images', status_code=400)
# pre processing
# get image_bin form request
img_bin = files.read()
img = prep.to_pil_image(img_bin)
# rotate image with orientation value(for iOS, iPadOS)
img=prep.rotate_image(img)
# get width and height value of img
w, h=img.size
# resize image to [2048, 2048]
img_np = prep.resize(img, w=2048, h=1024)
img_np = np.array(img_np)
img_np = img_np.astype(np.float32)
# hwc > bchw [1,3,2048,2048]
img_np = prep.transpose(img_np)
# semantic segmentation
request = predict_pb2.PredictRequest()
request.model_spec.name = 'semantic-segmentation-adas'
request.inputs["data"].CopyFrom(make_tensor_proto(img_np))
# send to infer model by grpc
start = time()
options = [('grpc.max_receive_message_length', 8388653)]
channel = grpc.insecure_channel("{}:{}".format(_HOST, _PORT), options = options)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result = stub.Predict(request, timeout=10.0)
# logging.warning(f'Output:{result}')
logging.warning(f'OutputType:{type(result)}')
output = make_ndarray(result.outputs['4656.1'])
#-----------------------------------------------------------
# img's gray scale image
gray=img.convert('L')
# human segmentation mask image
mask=postp.segmentation(output, w, h)
# masking 'gray' and 'mask' images
image=Image.composite(gray, img, mask)
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
#-----------------------------------------------------------
timecost = time()-start
logging.info(f"Inference complete,Takes{timecost}")
imgbytes = cv2.imencode('.jpg', image)[1].tobytes()
# imgbytes = prep.encode(image)
MIMETYPE = 'image/jpeg'
return func.HttpResponse(body=imgbytes, status_code=200, mimetype=MIMETYPE, charset='utf-8')
else:
logging.warning(f'ID:{event_id},Failed to get image,down.')
return func.HttpResponse(f'no image files', status_code=400)
except grpc.RpcError as e:
status_code = e.code()
if "DEADLINE_EXCEEDED" in status_code.name:
logging.error(e)
return func.HttpResponse(f'the grpc request timeout', status_code=408)
else:
logging.error(f"grpcError:{e}")
return func.HttpResponse(f'Failed to get grpcResponse', status_code=500)
except Exception as e:
logging.error(f"Error:{e}\n\
url:{url}\n\
method:{method}\n")
return func.HttpResponse(f'Service Error.check the log.', status_code=500) | [
"logging.error",
"tensorflow.make_tensor_proto",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
"logging.warning",
"PIL.Image.composite",
"numpy.asarray",
"tensorflow.make_ndarray",
"time.time",
"os.environ.get",
"logging.info",
"tensorflow_serving.apis.prediction_service_pb2_grpc.Predict... | [((402, 447), 'os.environ.get', 'os.environ.get', (['"""HUMANSEGMENTATION_IPADDRESS"""'], {}), "('HUMANSEGMENTATION_IPADDRESS')\n", (416, 447), False, 'import os\n'), ((456, 496), 'os.environ.get', 'os.environ.get', (['"""HUMANSEGMENTATION_PORT"""'], {}), "('HUMANSEGMENTATION_PORT')\n", (470, 496), False, 'import os\n'), ((638, 761), 'logging.info', 'logging.info', (['f"""Python humanpose function start process.\nID:{event_id}\nBack-end server host: {_HOST}:{_PORT}"""'], {}), '(\n f"""Python humanpose function start process.\nID:{event_id}\nBack-end server host: {_HOST}:{_PORT}"""\n )\n', (650, 761), False, 'import logging\n'), ((894, 972), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},the method was {files.content_type}.refused."""'], {}), "(f'ID:{event_id},the method was {files.content_type}.refused.')\n", (909, 972), False, 'import logging\n'), ((1009, 1071), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""only accept POST method"""'], {'status_code': '(400)'}), "(f'only accept POST method', status_code=400)\n", (1026, 1071), True, 'import azure.functions as func\n'), ((1808, 1824), 'numpy.array', 'np.array', (['img_np'], {}), '(img_np)\n', (1816, 1824), True, 'import numpy as np\n'), ((2016, 2044), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (2042, 2044), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((2246, 2252), 'time.time', 'time', ([], {}), '()\n', (2250, 2252), False, 'from time import time\n'), ((2434, 2492), 'tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub', 'prediction_service_pb2_grpc.PredictionServiceStub', (['channel'], {}), '(channel)\n', (2483, 2492), False, 'from tensorflow_serving.apis import prediction_service_pb2_grpc\n'), ((2681, 2719), 'tensorflow.make_ndarray', 'make_ndarray', (["result.outputs['4656.1']"], {}), "(result.outputs['4656.1'])\n", (2693, 2719), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((3024, 3056), 'PIL.Image.composite', 'Image.composite', (['gray', 'img', 'mask'], {}), '(gray, img, mask)\n', (3039, 3056), False, 'from PIL import Image\n'), ((3253, 3304), 'logging.info', 'logging.info', (['f"""Inference complete,Takes{timecost}"""'], {}), "(f'Inference complete,Takes{timecost}')\n", (3265, 3304), False, 'import logging\n'), ((3470, 3559), 'azure.functions.HttpResponse', 'func.HttpResponse', ([], {'body': 'imgbytes', 'status_code': '(200)', 'mimetype': 'MIMETYPE', 'charset': '"""utf-8"""'}), "(body=imgbytes, status_code=200, mimetype=MIMETYPE,\n charset='utf-8')\n", (3487, 3559), True, 'import azure.functions as func\n'), ((3583, 3642), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},Failed to get image,down."""'], {}), "(f'ID:{event_id},Failed to get image,down.')\n", (3598, 3642), False, 'import logging\n'), ((3662, 3715), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""no image files"""'], {'status_code': '(400)'}), "(f'no image files', status_code=400)\n", (3679, 3715), True, 'import azure.functions as func\n'), ((4122, 4238), 'logging.error', 'logging.error', (['f"""Error:{e}\n url:{url}\n method:{method}\n"""'], {}), '(\n f"""Error:{e}\n url:{url}\n method:{method}\n"""\n )\n', (4135, 4238), False, 'import logging\n'), ((4247, 4314), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""Service Error.check the log."""'], {'status_code': '(500)'}), "(f'Service Error.check the log.', status_code=500)\n", (4264, 4314), True, 'import azure.functions as func\n'), ((1158, 1244), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},the file type was {files.content_type}.refused."""'], {}), "(\n f'ID:{event_id},the file type was {files.content_type}.refused.')\n", (1173, 1244), False, 'import logging\n'), ((1284, 1346), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""only accept jpeg images"""'], {'status_code': '(400)'}), "(f'only accept jpeg images', status_code=400)\n", (1301, 1346), True, 'import azure.functions as func\n'), ((2156, 2181), 'tensorflow.make_tensor_proto', 'make_tensor_proto', (['img_np'], {}), '(img_np)\n', (2173, 2181), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((3091, 3108), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3101, 3108), True, 'import numpy as np\n'), ((3228, 3234), 'time.time', 'time', ([], {}), '()\n', (3232, 3234), False, 'from time import time\n'), ((3843, 3859), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3856, 3859), False, 'import logging\n'), ((3879, 3942), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""the grpc request timeout"""'], {'status_code': '(408)'}), "(f'the grpc request timeout', status_code=408)\n", (3896, 3942), True, 'import azure.functions as func\n'), ((3969, 4000), 'logging.error', 'logging.error', (['f"""grpcError:{e}"""'], {}), "(f'grpcError:{e}')\n", (3982, 4000), False, 'import logging\n'), ((4020, 4085), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""Failed to get grpcResponse"""'], {'status_code': '(500)'}), "(f'Failed to get grpcResponse', status_code=500)\n", (4037, 4085), True, 'import azure.functions as func\n'), ((3329, 3356), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (3341, 3356), False, 'import cv2\n')] |
"""
UTILITY DESCRIBTION:
--------------------
As the name states this is a utility with various plot tools to make it easier to make nice plots. This
utility has the following overview:
"""
# Numpy:
import numpy as np
# Python functions:
import time, sys, pylab, math, julian
# Matplotlib:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import rc, cm, gridspec
from matplotlib.ticker import FormatStrFormatter
from matplotlib.patches import Rectangle
import matplotlib.patches as patches
# Axes tools:
from mpl_toolkits.mplot3d import axes3d, Axes3D
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from mpl_toolkits.axes_grid1 import make_axes_locatable
###########################################################################################################
# GENERAL PLOTS #
###########################################################################################################
def PLOT(data, mark, xlab, ylab, title=None, subplot=0, legend=1, axis=[1,1]):
"""
General function to make fast plots in one command line:
--------INPUT:
data (array) : Data structure e.g. [data0, data1]; data0 and data1 have a x, y coloumn.
mark (list) : If one have 2 datasets use e.g. ['b-', 'k.']
xlab, ylab (string) : Labels on x and y
title (string) : Title
legpos (float) : This can be 1, 2, 3, and 4 corresponding to each quadrant.
subplot (float) : Different types of subplots.
axis (list) : Procentage edge-space in x and y. E.g. [1, 5] to 1% in x and 5% in y.
"""
# Type of subplot:
if subplot is not 0: plot_subplot(subplot)
# Plot data:
if legend is 1:
for i in range(len(data)):
plt.plot(data[i][:,0], data[i][:,1], mark[i])
plot_settings(xlab, ylab, title)
if legend is not 1:
for i in range(len(data)):
plt.plot(data[i][:,0], data[i][:,1], mark[i], label=legend[i+1])
plot_settings(xlab, ylab, title, legend[0])
# Axes setting:
plot_axis(data[0][:,0], data[0][:,1], axis[0], axis[1])
plt.show()
def SURF(x, y, z, xlab, ylab, zlab, title=None):
# Find (x, y) value for maximum peak:
z_max = np.max(z)
z_max_i = np.where(z==z_max)
print('Best Period: {:.6f} days'.format(x[z_max_i[0][0]]))
print('Best Phase : {:.6f} days'.format(y[z_max_i[1][0]]))
# 3D plot:
y, x = np.meshgrid(y, x)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='bwr', linewidth=20, antialiased=False)
cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
# cm.coolwarm
# Axes labels and title:
ax.set_xlabel(xlab, fontsize=13); ax.tick_params(axis='x', labelsize=13)
ax.set_ylabel(ylab, fontsize=13); ax.tick_params(axis='y', labelsize=13)
ax.set_zlabel(zlab, fontsize=13); ax.tick_params(axis='z', labelsize=13)
if title is not None: plt.title(title, fontsize=15)
# Extra settings:
# ax.invert_xaxis() # Invert x-axis
# ax.view_init(30, 45) # Viewing angle
# fig.colorbar(surf, shrink=0.5, aspect=8) # Colorbar
plt.show()
def HIST(hist, bins, xlab, ylab, title=None):
plt.hist(hist, bins, edgecolor='k', alpha=1, log=True)
#plot_settings(xlab, ylab, title)
def linear(img, sigma=2, img_min=None, img_max=None):
""" Performs linear scaling of the input np array. """
img_min, img_max = img.mean()-sigma*img.std(), img.mean()+sigma*img.std()
imageData=np.array(img, copy=True)
#if scale_min == None: scale_min = imageData.min()
#if scale_max == None: scale_max = imageData.max()
imageData = imageData.clip(min=img_min, max=img_max)
imageData = (imageData -img_min) / (img_max - img_min)
indices = np.where(imageData < 0)
imageData[indices] = 0.0
indices = np.where(imageData > 1)
imageData[indices] = 1.0
return imageData
def FITS(img, sigma=2, xlab=None, ylab=None, colorbar=None):
""" Easy plot of pixel array data """
plt.figure()
plt.imshow(linear(img, sigma), cmap='Blues', origin='lower')
# Labels:
if xlab is None and ylab is None: xlab, ylab = r'$x$ (pixel)', r'$y$ (pixel)'
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def FITS_colorbar_multi(img, xlab, ylab, ds=None):
# Which scale that should be used:
from Plot_Tools import linear
if scale=='linear': scale=linear
if scale=='sqrt' : scale=sqrt
if scale=='log' : scale=log
if scale=='asinh' : scale=asinh
if ds==None: ds=2
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, axes = plt.subplots(nrows=1, ncols=len(img))
for ax in axes.flat():
img_min, img_max = img[i].mean()-ds*img[i].std(), img[i].mean()+ds*img[i].std()
# Prepare for scaled colorbar:
im = ax.imshow(scale(img[i], img_min, img_max), cmap='Blues', origin='lower')
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05) # right, left, top, bottom
# Make colorbar and append ylabel and axis labels:
cbar = plt.colorbar(im, cax=cax)# orientation='horizontal')
#cbar.ax.set_ylabel('Normalized Counts')
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
###########################################################################################################
# BlueSONG PIPELINES #
###########################################################################################################
def plot_image_reduction(BF, DF, FF, TA, SF):
print('Bias -- std: {:.5g}, mean: {:.5g}'.format(np.std(BF), np.mean(BF)))
print('Dark -- std: {:.5g}, mean: {:.5g}'.format(np.std(DF), np.mean(DF)))
print('Flat -- std: {:.5g}, mean: {:.5g}'.format(np.std(FF), np.mean(FF)))
# Plot calibration images:
xlab = 'Dispersion (pixels)'; ylab = 'Cross Dispersion (pixels)'
FITS(BF); FITS(DF); FITS(FF); FITS(TA); FITS(SF)
#-----------------------------------------------------------------------------------------------------------
def plot_find_ref_cen_pos(center_rows_median, center_row_median_convolved, len_cross, smooth_win, ref_cen_pos):
""" This plot shows how the central reference point are found in each order """
# Add offset to cross-axis and 0.025 in disp-axis to colvolved:
len_cross = len(center_rows_median)
add_cross = np.ones(len_cross)*int(smooth_win/2)
data1 = np.vstack([center_rows_median/(max(center_rows_median)), range(len_cross)]).T
data2 = np.vstack([center_row_median_convolved/(max(center_row_median_convolved)) + 0.01, \
range(len_cross) - add_cross]).T
# Plot figure:
plt.figure()
plt.plot(data1[:,0], data1[:,1], 'b-', label='Collapsed: Median-filter', alpha=0.5)
plt.plot(data2[10:,0], data2[10:,1], 'g--', label='Convolved: Sum-filter')
for i in range(len(ref_cen_pos)): plt.plot([0, 1.03], [ref_cen_pos[i], ref_cen_pos[i]], 'k:')
plt.plot([0, 1], [ref_cen_pos[0], ref_cen_pos[0]], 'k:', label='Ref Orders center')
plt.xlabel('Normalized Counts'); plt.ylabel('Cross Dispersion (pixels)')
plt.legend(loc='lower right')
plt.show()
def plot_trace_order(ridge_pos_disp, ridge_pos_cross, order_trace, order_traced, \
order_trace_disp, cen_disp, ref_cen_pos):
""" This plot shows how the order are traced and fitted using sigma-clipping and a polynomial fit """
plt.figure()
plt.plot(ridge_pos_disp, ridge_pos_cross, 'b.', alpha=0.2)
for i in range(4): plt.plot(order_trace['order_{}'.format(i)][0],\
order_trace['order_{}'.format(i)][1], 'k.')
for i in range(4): plt.plot(order_trace_disp, np.polyval(order_traced['order_{}'.format(i)],\
order_trace_disp), 'c-', linewidth='1.5')
# Appearence of labels:
plt.plot(ridge_pos_disp[0], ridge_pos_cross[0], 'b.', alpha=0.3, label='Traced ridge')
plt.plot(order_trace['order_0'][0], order_trace['order_0'][1], 'k.', label='Final ridge')
plt.plot(order_trace_disp, np.polyval(order_traced['order_{}'.format(0)],\
order_trace_disp), 'c-', linewidth='1.5', label='Polynomial fit')
plt.plot(ref_cen_pos*0 + cen_disp, ref_cen_pos, 'r*', label='Ref posisions', markersize='7')
plt.xlabel('Dispersion (pixels)'); plt.ylabel('Cross Dispersion (pixels)')
plt.legend(loc='lower right', ncol=2)
plt.ylim(0, 450)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_optimal_width(widths, order, blaze_max, index_max, flux_inter, snr, optimal_order_width):
# Find best FWHM of Gauss fit to highest S/N ratio width:
from lmfit.models import GaussianModel
gmodel = GaussianModel()
# Make Gauss fit:
x = np.arange(len(order[index_max]))
profile = order[index_max] - flux_inter - np.min(order)/2
result = gmodel.fit(profile, x=x, amplitude=blaze_max, center=optimal_order_width)
fit = result.best_fit
fwhm = result.params['fwhm'].value
print(result.fit_report())
print(optimal_order_width)
# PLOT:
fig, ax = plt.subplots(1, 2, sharex=True)
# Plot S/N ratio:
ax1 = plt.subplot(121)
ax1.plot(widths[0:-1-1], snr[0:-1-1], 'mo--')
ax1.axvline(optimal_order_width, color='b', linestyle='--')
ax1.set_xlabel('Spatial width (pixels)')
ax1.set_ylabel('S/N ratio')
# Plot gauss fit:
xoff = (x[-1]-x[0])/2
ax2 = plt.subplot(122)
ax2.plot(x-xoff, profile/max(profile), 'r+')
ax2.plot(x-xoff, fit/max(profile), 'k-', label='Gauss fit')
#--------
ax2.axvline(x=(np.nanargmax(fit)-fwhm/2-xoff), color='orange', linestyle='--', label='FWHM')
ax2.axvline(x=(np.nanargmax(fit)+fwhm/2-xoff), color='orange', linestyle='--')
#--------
ax2.axvline(x=(np.nanargmax(fit)-optimal_order_width/2-xoff), color='b', linestyle='--', \
label=r'(S/N)$_{\text{max}}$')
ax2.axvline(x=(np.nanargmax(fit)+optimal_order_width/2-xoff), color='b', linestyle='--')
#--------
ax2.axvline(x=(np.nanargmax(fit)-optimal_order_width/1.5-xoff), color='g', linestyle='--', \
label=r'$4/3 \times \text{(S/N)}_{\text{max}}$')
ax2.axvline(x=(np.nanargmax(fit)+optimal_order_width/1.5-xoff), color='g', linestyle='--')
#--------
ax2.axvline(x=(np.nanargmax(fit)-(optimal_order_width-3)/2-xoff), color='r', linestyle='--', \
label=r'Final width')
ax2.axvline(x=(np.nanargmax(fit)+(optimal_order_width-3)/2-xoff), color='r', linestyle='--')
#--------
ax2.set_xlabel('Central width (pixels)')
ax2.set_ylabel('Norm. counts')
# Extra settings:
ax2.legend(loc='upper left', fontsize=11, ncol=5, bbox_to_anchor=(-1.2, 1.15))
plt.subplots_adjust(wspace=0.2, hspace=0.0)
plt.show()
def plot_inter_order_mask(data, inter_order_mask):
mask = data.T*inter_order_mask.T
# Plot:
fig, ax = plt.subplots(1,1)
im = ax.imshow(linear(data.T), cmap='Blues', origin='lower', alpha=1.0)
im = ax.imshow(linear(mask), cmap='Blues', origin='lower', alpha=0.73)
# Settings:
ax.set_xlabel('Dispersion (pixels)')
ax.set_ylabel('Cross (pixels)')
# Colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='1.5%', pad=0.0)
cbar = plt.colorbar(im, cax=cax)
plt.ylabel('Norm. Counts')
plt.show()
def plot_background_fits(s_disp, s_cros, poly_order_y, poly_order_x, \
yy, yi0, yi1, yi2, ym0, ym1, ym2, yfit0, yfit1, yfit2, \
xx, xi0, xi1, xi2, xm0, xm1, xm2, xfit0, xfit1, xfit2):
font1, font2 = 12.5, 15
# Sizes for flat:
m1, m2, m3, m4, m5, m6 = 5, 3, 3, 6, 4, 4
a1, a2, a3, a4, a5, a6 = 0.2, 0.5, 0.5, 0.01, 0.05, 0.1
# Sizes for star:
m1, m2, m3, m4, m5, m6 = 5, 3, 3, 4, 3, 3
a1, a2, a3, a4, a5, a6 = 0.4, 0.5, 0.5, 0.1, 0.1, 0.2
#---------------------
grid = plt.GridSpec(7, 4, wspace=0.0, hspace=0.0)
#---------------------
ax1 = plt.subplot(grid[:6,0])
ax1.plot(yi0, ym0, 'b.', alpha=a1, markersize=m1)#, label='Row {}'.format(s_disp[0])
ax1.plot(yi1, ym1, 'g^', alpha=a2, markersize=m2)#, label='Row {}'.format(s_disp[1])
ax1.plot(yi2, ym2, 'rd', alpha=a3, markersize=m3)#, label='Row {}'.format(s_disp[2])
ax1.plot(yy, yfit0, 'k-', linewidth=1.5, label='{}. order polyfit'.format(poly_order_y))
ax1.plot(yy, yfit1, 'k--', linewidth=1.5)#, label='{}. order polyfit'.format(poly_order_y))
ax1.plot(yy, yfit2, 'k:', linewidth=1.5)#, label='{}. order polyfit'.format(poly_order_y))
#---------------------
ax2 = plt.subplot(grid[:6,1])
ax2.plot(xi0, xm0, 'b.', alpha=a4, markersize=m4) # Not used for the labels
ax2.plot(xi1, xm1, 'g^', alpha=a5, markersize=m5)
ax2.plot(xi2, xm2, 'rd', alpha=a6, markersize=m6)
ax2.plot(xx, xfit0, 'k-', linewidth=1.5, label='{}. order polyfit'.format(poly_order_x))
ax2.plot(xx, xfit1, 'k--', linewidth=1.5)#, label='{}. order polyfit'.format(poly_order_x))
ax2.plot(xx, xfit2, 'k:', linewidth=1.5)#, label='{}. order polyfit'.format(poly_order_x))
# Settings:
ax1.legend(loc='upper left', fontsize=font1, ncol=1, bbox_to_anchor=(-0.035, 1.15))
ax2.legend(loc='upper left', fontsize=font1, ncol=1, bbox_to_anchor=(-0.035, 1.15))
ax1.set_xlabel('Cross (pixels)', fontsize=font2)
ax2.set_xlabel('')
ax2.set_xlabel('Dispersion (pixels)', fontsize=font2)
ax1.set_ylabel('Counts', fontsize=font2)
ax1.tick_params(axis='both', which='both', labelsize=font2)
ax2.tick_params(axis='both', which='both', labelsize=font2)
ax2.set_yticklabels([''])
ax1.set_xticklabels(['0', '0', '100', '', ''])
ymin1, ymax1 = min(min(ym0), min(ym1), min(ym2)), max(max(ym0), max(ym1), max(ym2))
ymin2, ymax2 = min(min(xm0), min(xm1), min(xm2)), max(max(xm0), max(xm1), max(xm2))
ax1.set_ylim(ymin1-ymin1*0.05, ymax1+ymax1*0.05)
ax2.set_ylim(ymin2-ymin2*0.05, ymax2+ymax2*0.05)
# Extra:
# ax2.plot(xi0[0], xm0[0], 'b.', alpha=0.4, markersize=mark2)#, label='Column {}'.format(s_cros[0])
# ax2.plot(xi1[0], xm1[0], 'g^', alpha=0.4, markersize=mark1)#, label='Column {}'.format(s_cros[1])
# ax2.plot(xi2[0], xm2[0], 'rd', alpha=0.4, markersize=mark1)#, label='Column {}'.format(s_cros[2])
plt.show()
def plot_background(data):
# Plot:
fig, ax = plt.subplots()
im = ax.imshow(linear(data.T), cmap='Blues', origin='lower')
# Settings:
ax.set_xlabel('Dispersion (pixels)')
ax.set_xticklabels([])
ax.set_ylabel(r'Cross Dispersion (pixels)')
# Colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='3%', pad=0.0)
cbar = plt.colorbar(im, cax=cax)
plt.ylabel(r'Counts')
plt.show()
def plot_background_residuals(F_back, S_back, S):
F = F_back/np.max(F_back)
S = S_back/np.max(S_back)
I = F - S
fig, ax = plt.subplots(2,1)
# Plot:
im0 = ax[0].imshow(linear(S.T), cmap='Blues', origin='lower')
im1 = ax[1].imshow(I.T, vmin=np.min(I), vmax=np.max(I), cmap='Blues', origin='lower')
# Settings:
fig.subplots_adjust(hspace=-0.50)
ax[0].set_xticklabels([])
ax[0].set_yticklabels(['', '', '200', '400'])
# Colorbar ax0:
cbar0 = fig.add_axes([0.9, 0.510, 0.015, 0.227])
cbar1 = fig.add_axes([0.9, 0.253, 0.015, 0.228])
fig.colorbar(im0, cax=cbar0)
fig.colorbar(im1, cax=cbar1)
# Labels:
ax[0].annotate('(a)', xy=(50,280), fontsize=15)
ax[1].annotate('(b)', xy=(50,280), fontsize=15)
ax[1].set_xlabel('Dispersion (pixels)')
ax[0].set_ylabel(r'Cross Dispersion (pixels)\qquad\qquad\qquad\tiny.')
cbar0.set_ylabel('Norm. Counts')
cbar1.set_ylabel('Residuals')
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_optimal_extraction(img):
# Prepare for scaled colorbar:
gs = gridspec.GridSpec(1, 3)
#------------
# 1. SUBPLOT:
#------------
ax1 = plt.subplot2grid((2,3), (0,0), colspan=2)
# Plot pixel image:
#im = FITS(img.T, ax=ax1)
im = ax1.imshow(linear(img.T), cmap='Blues', origin='lower')
# Plot red and green boxes (given as cornerpoint and height and width):
ax1.add_patch(Rectangle((7.5, -0.4), 1, len(img[:,0])-0.3, fill=None, edgecolor='r', linewidth=1.2))
ax1.add_patch(Rectangle((29.5, -0.4), 1, len(img[:,0])-0.3, fill=None, edgecolor='g', linewidth=1.2))
# Labels:
ax1.set_xlabel(r'$\lambda$ (pixel)')
ax1.set_ylabel(r'$x$ (pixel)')
#------------
# 2. SUBPLOT:
#------------
ax2 = plt.subplot2grid((9,3), (0,2), colspan=1, rowspan=5)
# Cross cut in the image:
l1 = img[:,8]; l2 = img[:,10]#+100*np.ones(len(l1))
# Plot step plots:
ax2.step(l1/np.max(img), range(len(l1)), color='r')
ax2.step(l2/np.max(img), range(len(l2)), color='g')
# Remove ticks and numbers from both axes:
ax2.set_xticklabels([])
ax2.set_yticklabels([])
# Plot equation inside plot:
ax2.text(-0.05, 6.5, r'$\sum\limits_x \text{P}_{x\lambda}=1$', fontsize=15)
# Put colorbar from first plot as the x label (as they are shared):
divider = make_axes_locatable(ax2)
cax = divider.append_axes('bottom', size='10%', pad=0.0) # right, left, top, bottom
cbar = plt.colorbar(im, cax=cax, orientation='horizontal')
cbar.ax.set_xlabel('Normalized Counts')
# Plot figure with small ajustment:
plt.subplots_adjust(wspace=0, hspace=-0.03)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_arc_peak(s_neg, s_conv, peaks_limit_dex, peaks_limit_val, \
peaks_all_dex, peaks_all_val, peak, limits):
fig, ax = plt.subplots()
# MAKE PLOTS:
ax.plot(s_neg, '-', color='grey', label='Normal spectrum')
ax.plot(peaks_all_dex, peaks_all_val, 'x', color='orange', label='Normal peaks')
ax.plot(s_conv, 'k', color='k' , label='Convolved spectrum')
ax.axvline(limits[0], color='deeppink', linestyle='-.')
ax.axvline(limits[1], color='deeppink', linestyle='-.')
ax.plot(peaks_limit_dex, peaks_limit_val, '^', color='deeppink', markeredgecolor='k', markersize=7, \
label='Smooth peaks')
ax.axvline(x=peak, color='b', linestyle='--', label='Line center reference')
# AXES SETTINGS:
ax.set_xlabel(r'$\lambda$ (pixel)')
ax.set_ylabel('Norm. counts')
handles,labels = ax.get_legend_handles_labels()
handles = [handles[0], handles[2], handles[1], handles[3], handles[4]]
labels = [labels[0], labels[2], labels[1], labels[3], labels[4]]
ax.legend(handles, labels, loc='best')
ax.set_xlim((0, len(s_neg)))
hk_median = np.median(s_neg)
ax.set_ylim((hk_median-1.5*abs(hk_median), hk_median+2.5*abs(hk_median)))
plt.show()
def plot_arc_fit(l_obs, l_teo, coefs, poly_order, residuals, chi2r, sigma, text):
# Calculate fit parameters:
p = np.poly1d(coefs)
xp = np.linspace(min(l_obs), max(l_obs), 1e3)
# Plot fit:
fig = plt.figure()
ax0 = plt.subplot2grid((4,1), (0,0), rowspan=3)
ax0.plot(xp, p(xp), 'r-', label='{}. order polyfit'.format(poly_order))
ax0.plot(l_obs, l_teo, 'k+', label='ThAr lines')
# Plot residuals:
ax1 = plt.subplot(414)
ax1.plot(l_obs, residuals, 'k+')
ax1.plot(xp, np.zeros(len(xp)), 'r--')
# Settings:
xanopos = max(xp)-(max(xp)-min(xp))*0.2
yanopos = (max(l_obs)-min(l_obs))
ax0.annotate(r'$\chi^2_r$ = {:.4f}'.format(chi2r[0]), (xanopos, max(l_teo)-yanopos*0.8))
ax0.annotate(r'$\sigma$ = {:.4f}'.format(sigma), (xanopos, max(l_teo)-yanopos*0.9))
ax0.set_title(text)
ax0.set_xlabel('')
ax0.set_xticklabels('')
ax0.set_ylabel(r'$\lambda_{\text{atlas}}$ (Å)')
ax1.set_xlabel(r'$\lambda_{\text{obs}}$ (Å)')
ax1.set_ylabel('Residuals (Å)')
#ax1.set_ylim([-0.025, 0.025])
fig.subplots_adjust(hspace=0)
ax0.legend(loc='best')
plt.show()
def plot_arc_scale(l_obs0, l_teo0, l_obs1, l_teo1, l, obs_results):
"""FIGURE MADE TO SPANS ENTIRE SCREEN"""
# Unpack results from peak_finder:
text, img, COF, radius = obs_results[0], obs_results[1], obs_results[2], obs_results[3]
#----------
# SUBPLOTS:
#----------
font = 17
fig = plt.figure()
# 1. subplot:
ax1 = fig.add_subplot(4,1,(1,3))
for i in range(len(l_obs0)): ax1.axvline(x=l_obs0[i], ymax=0.85, color='r', linestyle='--', alpha=1)
for i in range(len(l_obs1)): ax1.axvline(x=l_obs1[i], ymax=0.85, color='r', linestyle='-', alpha=1)
for i in range(len(l_teo0)): ax1.axvline(x=l_teo0[i], ymax=0.85, color='b', linestyle='--', alpha=1)
for i in range(len(l_teo1)): ax1.axvline(x=l_teo1[i], ymax=0.85, color='b', linestyle='-', alpha=1)
ax1.plot(l, img.sum(axis=1)/np.max(img.sum(axis=1)), 'k-', label='ThAr spectrum')
# 2. subplot:
ax2 = fig.add_subplot(313)
ax2.imshow(linear(img.T), cmap='Blues')
ax2.scatter(COF[:,0], COF[:,1], s=radius*12, facecolors='none', edgecolors='r', marker='s')
# Annotation:
for i in range(len(l_teo0)):
ax1.annotate(l_teo0[i], (l_teo0[i]-0.5, 1.15), rotation=45, fontsize=11, color='b')
# Labels:
ax1.set_ylabel('Normalized Counts', fontsize=font)
ax1.set_xlabel(r'$\lambda$ (Å)', fontsize=font)
ax1.tick_params(labelsize=font)
#------
#ax1.set_xticklabels([])
#ax2.set_yticklabels(['0', ''])
ax2.set_xlabel(r'$\lambda$ (pixel)', fontsize=font)
ax2.set_ylabel(r'$x$ (pixel)', fontsize=font)
ax2.tick_params(labelsize=font)
# Axes:
ax1.set_xlim((min(l), max(l)))
ax1.set_ylim((0, 1.2))
ax2.set_xlim((0, 2749))
ax2.invert_yaxis()
plt.show()
def plot_arc_check(l, img, l_ca, text):
fig = plt.figure()
# Plot:
ax1 = fig.add_subplot(4,1,(1,3))
for i in range(len(l_ca)): ax1.axvline(x=l_ca[i], ymax=1, color='g', linestyle='-.')
ax1.plot(l[0], img[0].sum(axis=1)/np.max(img[0].sum(axis=1)), 'b-')
ax1.plot(l[1], img[1].sum(axis=1)/np.max(img[1].sum(axis=1)), 'k-')
# Settings:
plt.title(text)
plt.xlabel(r'$\lambda_{\text{obs}}$ (Å)')
plt.ylabel(r'Counts')
plt.show()
def plot_arc_illustration(l_obs0, l_teo0, l_obs1, l_teo1, l, obs_results):
"""FIGURE MADE TO SPANS ENTIRE SCREEN"""
# Unpack results from peak_finder:
text, img, COF, radius = obs_results[0], obs_results[1], obs_results[2], obs_results[3]
#----------
# SUBPLOTS:
#----------
font = 12
fig = plt.figure()
h = 0.85
# 1. subplot:
ax1 = fig.add_subplot(10,1,(1,9))
for i in range(len(l_obs0)): ax1.axvline(x=l_obs0[i], ymax=h, color='r', linestyle=':', alpha=1)
#for i in range(len(l_obs1)): ax1.axvline(x=l_obs1[i], ymax=0.85, color='r', linestyle='-', alpha=1)
for i in range(len(l_teo0)): ax1.axvline(x=l_teo0[i], ymax=h, color='b', linestyle=':', alpha=1)
for i in range(len(l_teo1)): ax1.axvline(x=l_teo1[i], ymax=h, color='limegreen', linestyle='-', alpha=1)
ax1.plot(l, img.sum(axis=1)/np.max(img.sum(axis=1)), 'k-', label='ThAr spectrum')
# 2. subplot:
ax2 = fig.add_subplot(10,1,10)
ax2.imshow(linear(img.T), cmap='Blues')
ax2.scatter(COF[:,0], COF[:,1], s=radius*12, facecolors='none', edgecolors='r', marker='s')
#-----------------
# GLOBAL SETTINGS:
#-----------------
# Legend:
ax1.axvline(x=l_obs0[0], ymax=h, color='r', linestyle='--', alpha=0.4, label='COF')
ax1.axvline(x=l_teo0[i], ymax=h, color='b', linestyle=':', alpha=1.0, label='Atlas lines')
ax1.axvline(x=l_teo1[i], ymax=h, color='limegreen', linestyle='-', alpha=0.2, label='Final lines')
ax1.legend(bbox_to_anchor=(0.93, 1.14), ncol=4, fontsize=font)
# Annotation:
for i in range(len(l_teo0)):
ax1.annotate(l_teo0[i], (l_teo0[i]-0.5, 1.15), rotation=45, fontsize=8, color='darkblue')
# Labels:
ax1.set_ylabel('Norm. Counts', fontsize=font)
ax1.tick_params(labelsize=font)
ax1.set_xticklabels([])
ax1.set_yticklabels(['', '', '0.2', '0.4', '0.6', '0.8', '1.0', '1.2'])
#ax2.set_yticklabels(['0', ''])
ax2.set_xlabel(r'$\lambda$ (pixel)', fontsize=font)
ax2.set_ylabel(r'$\Delta x$', fontsize=font)
ax2.tick_params(labelsize=font)
# Axes:
ax1.set_xlim((l[820], l[1850]))
ax2.set_xlim((820, 1850))
ax1.set_ylim((-0.02, 1.2))
ax2.invert_yaxis()
fig.subplots_adjust(hspace=-0.35)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_blaze(s_orders, f_orders, f_lincor, dif_max):
# Normalize everyting:
yoff = 2500
dy = np.max(f_lincor[1]/dif_max[1]) + yoff
# Blaze + un-blaze spectrum:
plt.figure()
plt.plot(s_orders[1]+yoff, 'k-', linewidth=0.5, label='Object with cosmics')
plt.plot(s_orders[0], 'k-', linewidth=0.5)
plt.plot(f_orders[1]/dif_max[1]+yoff, 'r-', label='Blaze hot pixels')
plt.plot(f_orders[0]/dif_max[0], 'r-')
plt.plot(f_lincor[1]/dif_max[1]+yoff, 'b-', label='Blaze Ca order')
plt.plot(f_lincor[0]/dif_max[0], 'c-', label='Blaze order below')
plt.legend(loc='best')
plt.xlabel(r'$\lambda$ (pixel)')
plt.ylabel('Counts')
plt.ylim(-dy*0.1, dy+dy*0.1)
plt.show()
def plot_deblaze(s_blazecor):
# Blaze + un-blaze spectrum:
plt.figure()
grid = plt.GridSpec(3, 1, wspace=0.0, hspace=0.0)
# Order #57:
ax1 = plt.subplot(grid[0,:])
ax1.plot(s_blazecor[1]/np.median(s_blazecor[1]), 'k-')
ax1.axhline(1, color='b', linestyle='--')
ax1.set_ylim(-0.2, 3)
# Order #58:
ax2 = plt.subplot(grid[1,:])
ax2.plot(s_blazecor[0]/np.median(s_blazecor[0]), 'k-')
ax2.axhline(1, color='b', linestyle='--')
ax2.set_ylim(-0.2, 3)
# Settings:
ax2.set_xlabel(r'$\lambda$ (pixel)')
ax2.set_ylabel('Normalized Counts', y = 1)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_merge(s, l, l_ca):
plt.figure()
plt.plot(l, s, '-', color='k', linewidth=0.5)
for i in range(len(l_ca)):
plt.axvline(x=l_ca[i], ymax=1, color='g', linestyle='-.', alpha=1.0)
plt.axhline(0, color='r', linestyle='--')
# Settings:
plt.xlabel(r'$\lambda_{\text{obs}}$ (Å)')
plt.ylabel(r'Counts')
#plt.ylim(-0.001, 0.002)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_continuum_norm(l, s, l_points, s_points, s_norm, poly, l_ca):
# Initial parameters:
pref = np.median(poly(l))
# Plot:
plt.figure()
plt.plot(l, s/pref, '-', color='darkgrey', linewidth=0.5)
for i in range(len(l_ca)): plt.axvline(x=l_ca[i], color='g', linestyle='-.')
plt.plot(l, poly(l)/pref, 'r--')
plt.plot(l_points, s_points/pref, 'ro')
plt.plot(l, s_norm-1, 'k-', linewidth=0.5)
plt.axhline(0, color='r', linestyle=':')
# Settings:
plt.xlabel(r'$\lambda$ (Å)')
plt.ylabel(r'Norm. Counts')
plt.show()
def plot_continuum_norm_all(l, s, l_points, s_points, s_norm, poly, l_ca):
# Initial parameters:
pref0 = np.median(poly[0](l))
pref1 = np.median(poly[1](l))
pref2 = np.median(poly[2](l))
# Plot:
fig, ax = plt.subplots()
plt.plot(l, s/pref1, '-', color='darkgrey', linewidth=0.5, label='Spectrum before')
#-------
plt.plot(l, poly[1](l)/pref1, 'r--', label='Continuum peak')
plt.plot(l, poly[0](l)/pref1, 'r-.', label='Continuum point', alpha=0.28)
plt.plot(l, poly[2](l)/pref1, 'r:', label='Continuum mean', alpha=0.40)
plt.plot(l_points[0], s_points[0]/pref1, 'ro', ms=4, alpha=0.3)
plt.plot(l_points[1], s_points[1]/pref1, 'ro', ms=4)
plt.plot(l_points[2], s_points[2]/pref1, 'ro', ms=4, alpha=0.3)
#-------
plt.plot(l, s_norm[1]-1, 'k-', linewidth=0.5, label='Spectrum after')
plt.axhline(0, color='g', linestyle='--', label='Continuum')
plt.axvline(x=l_ca[0], color='g', linestyle=':', label='Ca lines')
plt.axvline(x=l_ca[1], color='g', linestyle=':')
# Legend:
handles,labels = ax.get_legend_handles_labels()
handles = [handles[1], handles[2], handles[3], handles[0], handles[4], handles[5], handles[6]]
labels = [ labels[1], labels[2], labels[3], labels[0], labels[4], labels[5], labels[6]]
ax.legend(handles, labels, loc='best', ncol=1, bbox_to_anchor=(1, 0.8))
# Settings:
plt.xlabel(r'$\lambda$ (Å)')
plt.ylabel(r'Norm. Counts')
plt.ylim(-1, 1.4)
plt.show()
#-----------------------------------------------------------------------------------------------------------
def plot_sindex_scatter(l, s_dif, s_std0, s_std, bands):
# Plot:
plt.figure()
plt.plot(l, s_dif*100, '-', color='lightgrey', label=r'scatter($i$)')
plt.plot(l, s_std0*100, 'k-', linewidth=1.0, label=r'$\sigma_i$')
plt.plot(l, s_std*100, 'r-', linewidth=1.2, label=r'$\mu_i(\sigma_i)$')
plt.axhline(0, linestyle=':', color='k')
plt.axvline(bands[0], linestyle=':', color='b')
plt.axvline(bands[1], linestyle=':', color='g')
plt.axvline(bands[2], linestyle=':', color='g')
plt.axvline(bands[3], linestyle=':', color='r')
# Settings:
plt.xlabel(r'$\lambda$ (Å)')
plt.ylabel(r'Uncertainty (\%)')
plt.legend(loc='best', ncol=1)
plt.xlim(min(l), max(l))
plt.ylim(-10, 50)
plt.show()
def plot_sindex_bands(l, s, s_tri_K, s_tri_H, K2_indices, H2_indices, K2_fluxes, H2_fluxes, \
l_k1_inter, l_k2_inter, l_h1_inter, l_h2_inter, \
s_k1_inter, s_k2_inter, s_h1_inter, s_h2_inter, \
Kp_wave, Hp_wave, Kp_fluxes, Hp_fluxes, Km_indices, Hm_indices,\
K, H, K1_indices, H1_indices):
s_scale = 1.2
y = 11
grid = plt.GridSpec(y, 4, wspace=0.0, hspace=0.0)
#---------------------
ax0 = plt.subplot(grid[:int(y/2),:3])
# K zoom subplot:
ax0.plot(l, s, 'k-', lw=0.5, label='Spectrum')
ax0.axvline(l[K1_indices[0]], c='g', ls='--', label='Bandpass 1.09 Å')
ax0.axvline(l[K1_indices[-1]], c='g', ls='--')
ax0.plot(l[K2_indices], s_tri_K, 'g:', lw=1.3, label='Bandpass triangle')
ax0.plot(l[K2_indices], K2_fluxes, c='deeppink', ls='-', lw=1.2, label='Triangle grid')
ax0.plot(Kp_wave, Kp_fluxes, c='b', ls='-', lw=1.2, label='Polygon grid')
#ax0.plot(l[Km_indices], s[Km_indices], c='deeppink', lw=1.2, label='Mean grid')
ax0.plot(l_k1_inter, s_k1_inter, 'ro', alpha=0.3, label='Intersections')
ax0.plot(l_k2_inter, s_k2_inter, 'ro', alpha=0.3)
# H zoom Subplots:
ax1 = plt.subplot(grid[int(y/2)+1:,:3])
ax1.axvline(l[H1_indices[0]], c='g', ls='--')
ax1.axvline(l[H1_indices[-1]], c='g', ls='--')
ax1.plot(l, s, 'k-', lw=0.5)
ax1.plot(l[H2_indices], s_tri_H, 'g:', lw=1.3)
ax1.plot(l[H2_indices], H2_fluxes, c='deeppink', ls='-', lw=1.2)
ax1.plot(Hp_wave, Hp_fluxes, c='b', ls='-', lw=1.2)
#ax1.plot(l[Hm_indices], s[Hm_indices], c='deeppink', lw=1.2)
ax1.plot(l_h1_inter, s_h1_inter, 'ro', alpha=0.3)
ax1.plot(l_h2_inter, s_h2_inter, 'ro', alpha=0.3)
# Settings:
ax0.set_ylim([0, 0.2])
ax0.set_xlim([K-1.3, K+1.3])
ax1.set_ylim([0, 0.2])
ax1.set_xlim([H-1.3, H+1.3])
#ax0.set_xticklabels([])
ax0.annotate('K bandpass', (K-0.2, 0.02))
ax1.annotate('H bandpass', (H-0.2, 0.02))
ax0.set_ylabel('Norma. Counts')
ax1.set_ylabel('Norma. Counts')
ax1.set_xlabel(r'$\lambda$ (Å)')
ax0.legend(loc='upper left', fontsize=12, ncol=1, bbox_to_anchor=(1.0, 0.5))
plt.show()
def plot_sindex_fluxes(l, s, band_indices, fluxes, X):
V, K, H, R = X[0], X[1], X[2], X[3]
# Plot:
fig, ax = plt.subplots()
s_scale = 1.2
#------
ax.fill_between(l[band_indices[0]], fluxes[0], color='b', alpha=0.3)
ax.fill_between(l[band_indices[1]], fluxes[1], color='r', alpha=0.3)
ax.fill_between(fluxes[8][0], fluxes[8][1], color='g', alpha=0.5)
ax.fill_between(fluxes[9][0], fluxes[9][1], color='g', alpha=0.5)
#------
ax.plot([l[band_indices[6][ 0]], l[band_indices[8]]], [0, 1], c='g', ls=':')
ax.plot([l[band_indices[6][-1]], l[band_indices[8]]], [0, 1], c='g', ls=':')
ax.plot([l[band_indices[7][ 0]], l[band_indices[9]]], [0, 1], c='g', ls=':')
ax.plot([l[band_indices[7][-1]], l[band_indices[9]]], [0, 1], c='g', ls=':')
ax.axvline(V, ymax=1/s_scale, c='b', ls=':')
ax.axvline(R, ymax=1/s_scale, c='r', ls=':')
#------
ax.annotate(r'$V$', (V-0.5, 1.07))
ax.annotate(r'$K$', (K-0.5, 1.07))
ax.annotate(r'$H$', (H-0.5, 1.07))
ax.annotate(r'$R$', (R-0.5, 1.07))
#------
ax.plot(l, s, 'k-', label='Spectrum', lw=0.3)
# Settings:
fig.subplots_adjust(wspace=0, hspace=0.0)
ax.set_xlabel(r'$\lambda$ (Å)')
ax.set_ylabel( 'Norm. Counts')
ax.set_ylim([0, s_scale])
plt.show()
#########################################################################################################
# PLOTS FOR GENERAL UTILITIES #
#########################################################################################################
def plot_sky_background(image, disp, yfit_below, yfit_above, yfit_order, l_sky):
#Plot spectrum with inter-order fits:
plt.figure()
FITS(image.T, 'linear')
plt.plot(disp, yfit_below, 'r-', linewidth='1.5', label='Polynomial fit')
plt.plot(disp, yfit_above, 'g-', linewidth='1.5', label='Polynomial fit')
plt.plot(disp, yfit_order.round(), 'm-', linewidth='1.5', label='Polynomial fit')
plt.xlabel('Dispersion (pixels)'); plt.ylabel('Cross Dispersion (pixels)')
plt.show()
# Plot final sky background as function of wavelength:
plt.figure()
plt.plot(l_sky, 'k-')
plt.xlabel('Dispersion (pixels)'); plt.ylabel('Median Counts')
plt.show()
def plot_locate(t, dif0, dif1, cut_off, n, above, below):
cut_up = np.ones(len(t))*cut_off
cut_dw = -cut_up
plt.figure()
plt.plot(t, dif0, '-', color='lightgrey', linewidth=1.3, label='Outliers')
plt.plot(t, dif1, 'k-', label='Corrected', linewidth=0.8)
plt.plot(t, cut_up, 'r--')
plt.plot(t, cut_dw, 'r--', label='Cutoff')
#plt.plot(above, below, 'b+')
# Print used n:
xpos = t[0]+(t[-1]-t[0])*0.1
ypos = max(dif0)-(max(dif0)-min(dif0))*0.1
plt.text(xpos, ypos, '$n={}$'.format(n), fontsize=15)
plt.legend(loc='best')
plt.show()
#########################################################################################################
# PLOTS FOR TEST #
#########################################################################################################
def plot_blue_moves(x, y, time):
""" This function makes a plot of the spectral displacement/scatter over time. """
plt.figure()
sc = plt.scatter(x, y, c=time, s=40, linewidth=0.5, edgecolor='k', cmap='rainbow')
plt.colorbar(sc)
plt.xlabel('Dispersion (pixel)')
plt.ylabel('Cross Dispersion (pixel)')
plt.show()
def plot_rv_stability(time, y, sigma):
""" This function makes a plot of the spectral displacement/scatter over time. """
# Find parameters:
import scipy.constants
import datetime
#import pandas as pd
#--------------
t = [julian.from_jd(i, fmt='jd') for i in time]
lx, sx = 0.052783326947974274 * y, 0.052783326947974274 * sigma
rv = lx/4000*scipy.constants.c*1e-3
sigma = sx/4000*scipy.constants.c*1e-3
# Plot:
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(t, y, 'wo')
ax2.errorbar(t, rv, sigma, None, 'o', ms=3, lw=0.5, c='grey', mec='mediumvioletred', mfc='mediumvioletred')
#ax2.plot(t, rv, 'o', ms=3.0, c='mediumvioletred')
#ax2.plot(t, rv, '-', lw=0.3, c='mediumvioletred')
# Settings:
ax1.set_xlabel('Date')
ax1.set_ylabel('Dispersion (pixel)')
ax2.set_ylabel(r'RV drift (km s$^-1$)')
plt.grid(color='lightgrey')
#plt.xticks(rotation=-45)
plt.show()
def plot_match_coordinates(array1, array2, indices1, indices2):
# indices1: array1 coordinates within threshold.
# indices2: array2 coordinates matching array1 (hence final list)
# Plotting a histogram showing the best threshold:
#pt.HIST(dis, 2000, 'Bins', '$\Delta N$', [0.0, 0.0025], [0, 1e3])
# Plot coordinates and match:
#plt.figure()
plt.scatter(array1[:,1], array1[:,0], marker='o', facecolors='none', edgecolors='darkgrey')
plt.scatter(array2[:,1], array2[:,0], marker='o', facecolors='none', edgecolors='b')
plt.scatter(array2[:,1][indices2], array2[:,0][indices2], s=1e2, marker='+', edgecolors='r')
plt.title('{} lines in common out of ({}, {}) available'.format(len(indices2), \
len(array1), len(array2)))
plt.xlabel(r'$x$ (pixel)'); plt.ylabel(r'$y$ (pixel)')
plt.show()
#########################################################################################################
# PRINT TO BASH #
#########################################################################################################
def loading(i, i_max):
""" This function print the loading status of. """
sys.stdout.write(u"\u001b[1000D")
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(str(i + 1) + "Loding... %")
sys.stdout.flush()
def compilation(i, i_max, text):
""" This function print out a compilation time menu bar in the terminal. """
percent = (i + 1) / (i_max * 1.0) * 100
# We here divide by 2 as the length of the bar is only 50 characters:
bar = "[" + "-" * int(percent/2) + '>' + " " *(50-int(percent/2)) + "] {}% {}".format(int(percent), text)
sys.stdout.write(u"\u001b[1000D" + bar)
sys.stdout.flush()
# hdul = fits.open('file')
# data = hdul[0].data
# hdr = hdul[0].header
# hdr['targname'] = 'NGC121-a'
# hdr[27] = 99
# hdr.set('observer', 'Edwin Hubble')
# fits.writeto('{}TA_{}.fits'.format(self.path, self.date), TA, TA_hdu, overwrite=True)
#box = plt.gca()
#box.add_patch(Rectangle((V-VR/2, axes[0]), VR, axes_ylen, color='w', linestyle=':'))
#box.add_patch(Rectangle((H-HK/2, axes[0]), HK, axes_ylen, color='w', linestyle=':'))
########################################
# # Find the mean, unitless difference, and the std:
# s_mea = self.convolve(s[1], 'mean', 3)
# s_dif = s[1]/s_mea - 1
# # First make sure to remove obvious outliers:
# s_std0 = self.convolve(s_dif, 'std', 100)
# # Iterate to smoothen out sharp peaks:
# s_wei = self.convolve(s[1], 'mean', 200)
# s_std = self.convolve(s_std0, 'mean', 100) * s_wei/np.max(s_wei)
| [
"matplotlib.pyplot.title",
"sys.stdout.write",
"matplotlib.pyplot.subplot2grid",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"sys.stdout.flush",
"matplotlib.pyplot.axvline",
"numpy.meshgrid",
"julian.from_jd",
"Plot_Tools.linear",
"numpy.std",
"matplotlib.pyplot.colorbar",
"num... | [((2232, 2242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2240, 2242), True, 'import matplotlib.pyplot as plt\n'), ((2350, 2359), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (2356, 2359), True, 'import numpy as np\n'), ((2374, 2394), 'numpy.where', 'np.where', (['(z == z_max)'], {}), '(z == z_max)\n', (2382, 2394), True, 'import numpy as np\n'), ((2545, 2562), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (2556, 2562), True, 'import numpy as np\n'), ((2574, 2586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2584, 2586), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3360, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3415, 3469), 'matplotlib.pyplot.hist', 'plt.hist', (['hist', 'bins'], {'edgecolor': '"""k"""', 'alpha': '(1)', 'log': '(True)'}), "(hist, bins, edgecolor='k', alpha=1, log=True)\n", (3423, 3469), True, 'import matplotlib.pyplot as plt\n'), ((3719, 3743), 'numpy.array', 'np.array', (['img'], {'copy': '(True)'}), '(img, copy=True)\n', (3727, 3743), True, 'import numpy as np\n'), ((3984, 4007), 'numpy.where', 'np.where', (['(imageData < 0)'], {}), '(imageData < 0)\n', (3992, 4007), True, 'import numpy as np\n'), ((4051, 4074), 'numpy.where', 'np.where', (['(imageData > 1)'], {}), '(imageData > 1)\n', (4059, 4074), True, 'import numpy as np\n'), ((4234, 4246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4244, 4246), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (4422, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4433, 4449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (4443, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4454, 4464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4462, 4464), True, 'import matplotlib.pyplot as plt\n'), ((7136, 7148), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7146, 7148), True, 'import matplotlib.pyplot as plt\n'), ((7153, 7242), 'matplotlib.pyplot.plot', 'plt.plot', (['data1[:, 0]', 'data1[:, 1]', '"""b-"""'], {'label': '"""Collapsed: Median-filter"""', 'alpha': '(0.5)'}), "(data1[:, 0], data1[:, 1], 'b-', label='Collapsed: Median-filter',\n alpha=0.5)\n", (7161, 7242), True, 'import matplotlib.pyplot as plt\n'), ((7246, 7322), 'matplotlib.pyplot.plot', 'plt.plot', (['data2[10:, 0]', 'data2[10:, 1]', '"""g--"""'], {'label': '"""Convolved: Sum-filter"""'}), "(data2[10:, 0], data2[10:, 1], 'g--', label='Convolved: Sum-filter')\n", (7254, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7423, 7511), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[ref_cen_pos[0], ref_cen_pos[0]]', '"""k:"""'], {'label': '"""Ref Orders center"""'}), "([0, 1], [ref_cen_pos[0], ref_cen_pos[0]], 'k:', label=\n 'Ref Orders center')\n", (7431, 7511), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized Counts"""'], {}), "('Normalized Counts')\n", (7521, 7542), True, 'import matplotlib.pyplot as plt\n'), ((7544, 7583), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Dispersion (pixels)"""'], {}), "('Cross Dispersion (pixels)')\n", (7554, 7583), True, 'import matplotlib.pyplot as plt\n'), ((7588, 7617), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (7598, 7617), True, 'import matplotlib.pyplot as plt\n'), ((7622, 7632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7630, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7890, 7902), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7900, 7902), True, 'import matplotlib.pyplot as plt\n'), ((7907, 7965), 'matplotlib.pyplot.plot', 'plt.plot', (['ridge_pos_disp', 'ridge_pos_cross', '"""b."""'], {'alpha': '(0.2)'}), "(ridge_pos_disp, ridge_pos_cross, 'b.', alpha=0.2)\n", (7915, 7965), True, 'import matplotlib.pyplot as plt\n'), ((8317, 8408), 'matplotlib.pyplot.plot', 'plt.plot', (['ridge_pos_disp[0]', 'ridge_pos_cross[0]', '"""b."""'], {'alpha': '(0.3)', 'label': '"""Traced ridge"""'}), "(ridge_pos_disp[0], ridge_pos_cross[0], 'b.', alpha=0.3, label=\n 'Traced ridge')\n", (8325, 8408), True, 'import matplotlib.pyplot as plt\n'), ((8408, 8502), 'matplotlib.pyplot.plot', 'plt.plot', (["order_trace['order_0'][0]", "order_trace['order_0'][1]", '"""k."""'], {'label': '"""Final ridge"""'}), "(order_trace['order_0'][0], order_trace['order_0'][1], 'k.', label=\n 'Final ridge')\n", (8416, 8502), True, 'import matplotlib.pyplot as plt\n'), ((8689, 8788), 'matplotlib.pyplot.plot', 'plt.plot', (['(ref_cen_pos * 0 + cen_disp)', 'ref_cen_pos', '"""r*"""'], {'label': '"""Ref posisions"""', 'markersize': '"""7"""'}), "(ref_cen_pos * 0 + cen_disp, ref_cen_pos, 'r*', label=\n 'Ref posisions', markersize='7')\n", (8697, 8788), True, 'import matplotlib.pyplot as plt\n'), ((8786, 8819), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dispersion (pixels)"""'], {}), "('Dispersion (pixels)')\n", (8796, 8819), True, 'import matplotlib.pyplot as plt\n'), ((8821, 8860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Dispersion (pixels)"""'], {}), "('Cross Dispersion (pixels)')\n", (8831, 8860), True, 'import matplotlib.pyplot as plt\n'), ((8865, 8902), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'ncol': '(2)'}), "(loc='lower right', ncol=2)\n", (8875, 8902), True, 'import matplotlib.pyplot as plt\n'), ((8907, 8923), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(450)'], {}), '(0, 450)\n', (8915, 8923), True, 'import matplotlib.pyplot as plt\n'), ((8928, 8938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8936, 8938), True, 'import matplotlib.pyplot as plt\n'), ((9267, 9282), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (9280, 9282), False, 'from lmfit.models import GaussianModel\n'), ((9662, 9693), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)'}), '(1, 2, sharex=True)\n', (9674, 9693), True, 'import matplotlib.pyplot as plt\n'), ((9726, 9742), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (9737, 9742), True, 'import matplotlib.pyplot as plt\n'), ((9992, 10008), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (10003, 10008), True, 'import matplotlib.pyplot as plt\n'), ((11287, 11330), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.2)', 'hspace': '(0.0)'}), '(wspace=0.2, hspace=0.0)\n', (11306, 11330), True, 'import matplotlib.pyplot as plt\n'), ((11335, 11345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11343, 11345), True, 'import matplotlib.pyplot as plt\n'), ((11465, 11483), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (11477, 11483), True, 'import matplotlib.pyplot as plt\n'), ((11759, 11782), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (11778, 11782), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((11862, 11887), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (11874, 11887), True, 'import matplotlib.pyplot as plt\n'), ((11892, 11918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Norm. Counts"""'], {}), "('Norm. Counts')\n", (11902, 11918), True, 'import matplotlib.pyplot as plt\n'), ((11923, 11933), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11931, 11933), True, 'import matplotlib.pyplot as plt\n'), ((12490, 12532), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(7)', '(4)'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(7, 4, wspace=0.0, hspace=0.0)\n', (12502, 12532), True, 'import matplotlib.pyplot as plt\n'), ((12570, 12594), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[:6, 0]'], {}), '(grid[:6, 0])\n', (12581, 12594), True, 'import matplotlib.pyplot as plt\n'), ((13184, 13208), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[:6, 1]'], {}), '(grid[:6, 1])\n', (13195, 13208), True, 'import matplotlib.pyplot as plt\n'), ((14906, 14916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14914, 14916), True, 'import matplotlib.pyplot as plt\n'), ((14975, 14989), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14987, 14989), True, 'import matplotlib.pyplot as plt\n'), ((15217, 15240), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (15236, 15240), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((15318, 15343), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (15330, 15343), True, 'import matplotlib.pyplot as plt\n'), ((15348, 15368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (15358, 15368), True, 'import matplotlib.pyplot as plt\n'), ((15374, 15384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15382, 15384), True, 'import matplotlib.pyplot as plt\n'), ((15525, 15543), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (15537, 15543), True, 'import matplotlib.pyplot as plt\n'), ((16354, 16364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16362, 16364), True, 'import matplotlib.pyplot as plt\n'), ((16558, 16581), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {}), '(1, 3)\n', (16575, 16581), False, 'from matplotlib import rc, cm, gridspec\n'), ((16646, 16689), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 3)', '(0, 0)'], {'colspan': '(2)'}), '((2, 3), (0, 0), colspan=2)\n', (16662, 16689), True, 'import matplotlib.pyplot as plt\n'), ((17249, 17303), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(9, 3)', '(0, 2)'], {'colspan': '(1)', 'rowspan': '(5)'}), '((9, 3), (0, 2), colspan=1, rowspan=5)\n', (17265, 17303), True, 'import matplotlib.pyplot as plt\n'), ((17826, 17850), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax2'], {}), '(ax2)\n', (17845, 17850), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((17955, 18006), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax', 'orientation': '"""horizontal"""'}), "(im, cax=cax, orientation='horizontal')\n", (17967, 18006), True, 'import matplotlib.pyplot as plt\n'), ((18095, 18138), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(-0.03)'}), '(wspace=0, hspace=-0.03)\n', (18114, 18138), True, 'import matplotlib.pyplot as plt\n'), ((18143, 18153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18151, 18153), True, 'import matplotlib.pyplot as plt\n'), ((18422, 18436), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18434, 18436), True, 'import matplotlib.pyplot as plt\n'), ((19445, 19461), 'numpy.median', 'np.median', (['s_neg'], {}), '(s_neg)\n', (19454, 19461), True, 'import numpy as np\n'), ((19544, 19554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19552, 19554), True, 'import matplotlib.pyplot as plt\n'), ((19693, 19709), 'numpy.poly1d', 'np.poly1d', (['coefs'], {}), '(coefs)\n', (19702, 19709), True, 'import numpy as np\n'), ((19793, 19805), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19803, 19805), True, 'import matplotlib.pyplot as plt\n'), ((19816, 19859), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 1)', '(0, 0)'], {'rowspan': '(3)'}), '((4, 1), (0, 0), rowspan=3)\n', (19832, 19859), True, 'import matplotlib.pyplot as plt\n'), ((20022, 20038), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(414)'], {}), '(414)\n', (20033, 20038), True, 'import matplotlib.pyplot as plt\n'), ((20731, 20741), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20739, 20741), True, 'import matplotlib.pyplot as plt\n'), ((21064, 21076), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21074, 21076), True, 'import matplotlib.pyplot as plt\n'), ((22476, 22486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22484, 22486), True, 'import matplotlib.pyplot as plt\n'), ((22539, 22551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22549, 22551), True, 'import matplotlib.pyplot as plt\n'), ((22854, 22869), 'matplotlib.pyplot.title', 'plt.title', (['text'], {}), '(text)\n', (22863, 22869), True, 'import matplotlib.pyplot as plt\n'), ((22874, 22916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda_{\\\\text{obs}}$ (Å)"""'], {}), "('$\\\\lambda_{\\\\text{obs}}$ (Å)')\n", (22884, 22916), True, 'import matplotlib.pyplot as plt\n'), ((22920, 22940), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (22930, 22940), True, 'import matplotlib.pyplot as plt\n'), ((22946, 22956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22954, 22956), True, 'import matplotlib.pyplot as plt\n'), ((23282, 23294), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23292, 23294), True, 'import matplotlib.pyplot as plt\n'), ((25219, 25229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25227, 25229), True, 'import matplotlib.pyplot as plt\n'), ((25523, 25535), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25533, 25535), True, 'import matplotlib.pyplot as plt\n'), ((25540, 25618), 'matplotlib.pyplot.plot', 'plt.plot', (['(s_orders[1] + yoff)', '"""k-"""'], {'linewidth': '(0.5)', 'label': '"""Object with cosmics"""'}), "(s_orders[1] + yoff, 'k-', linewidth=0.5, label='Object with cosmics')\n", (25548, 25618), True, 'import matplotlib.pyplot as plt\n'), ((25621, 25663), 'matplotlib.pyplot.plot', 'plt.plot', (['s_orders[0]', '"""k-"""'], {'linewidth': '(0.5)'}), "(s_orders[0], 'k-', linewidth=0.5)\n", (25629, 25663), True, 'import matplotlib.pyplot as plt\n'), ((25673, 25746), 'matplotlib.pyplot.plot', 'plt.plot', (['(f_orders[1] / dif_max[1] + yoff)', '"""r-"""'], {'label': '"""Blaze hot pixels"""'}), "(f_orders[1] / dif_max[1] + yoff, 'r-', label='Blaze hot pixels')\n", (25681, 25746), True, 'import matplotlib.pyplot as plt\n'), ((25747, 25787), 'matplotlib.pyplot.plot', 'plt.plot', (['(f_orders[0] / dif_max[0])', '"""r-"""'], {}), "(f_orders[0] / dif_max[0], 'r-')\n", (25755, 25787), True, 'import matplotlib.pyplot as plt\n'), ((25795, 25866), 'matplotlib.pyplot.plot', 'plt.plot', (['(f_lincor[1] / dif_max[1] + yoff)', '"""b-"""'], {'label': '"""Blaze Ca order"""'}), "(f_lincor[1] / dif_max[1] + yoff, 'b-', label='Blaze Ca order')\n", (25803, 25866), True, 'import matplotlib.pyplot as plt\n'), ((25867, 25934), 'matplotlib.pyplot.plot', 'plt.plot', (['(f_lincor[0] / dif_max[0])', '"""c-"""'], {'label': '"""Blaze order below"""'}), "(f_lincor[0] / dif_max[0], 'c-', label='Blaze order below')\n", (25875, 25934), True, 'import matplotlib.pyplot as plt\n'), ((25942, 25964), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (25952, 25964), True, 'import matplotlib.pyplot as plt\n'), ((25969, 26001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ (pixel)"""'], {}), "('$\\\\lambda$ (pixel)')\n", (25979, 26001), True, 'import matplotlib.pyplot as plt\n'), ((26006, 26026), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (26016, 26026), True, 'import matplotlib.pyplot as plt\n'), ((26031, 26065), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-dy * 0.1)', '(dy + dy * 0.1)'], {}), '(-dy * 0.1, dy + dy * 0.1)\n', (26039, 26065), True, 'import matplotlib.pyplot as plt\n'), ((26064, 26074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26072, 26074), True, 'import matplotlib.pyplot as plt\n'), ((26143, 26155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26153, 26155), True, 'import matplotlib.pyplot as plt\n'), ((26167, 26209), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(3, 1, wspace=0.0, hspace=0.0)\n', (26179, 26209), True, 'import matplotlib.pyplot as plt\n'), ((26237, 26260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[0, :]'], {}), '(grid[0, :])\n', (26248, 26260), True, 'import matplotlib.pyplot as plt\n'), ((26418, 26441), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[1, :]'], {}), '(grid[1, :])\n', (26429, 26441), True, 'import matplotlib.pyplot as plt\n'), ((26680, 26690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26688, 26690), True, 'import matplotlib.pyplot as plt\n'), ((26834, 26846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26844, 26846), True, 'import matplotlib.pyplot as plt\n'), ((26851, 26896), 'matplotlib.pyplot.plot', 'plt.plot', (['l', 's', '"""-"""'], {'color': '"""k"""', 'linewidth': '(0.5)'}), "(l, s, '-', color='k', linewidth=0.5)\n", (26859, 26896), True, 'import matplotlib.pyplot as plt\n'), ((27014, 27055), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""r"""', 'linestyle': '"""--"""'}), "(0, color='r', linestyle='--')\n", (27025, 27055), True, 'import matplotlib.pyplot as plt\n'), ((27076, 27118), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda_{\\\\text{obs}}$ (Å)"""'], {}), "('$\\\\lambda_{\\\\text{obs}}$ (Å)')\n", (27086, 27118), True, 'import matplotlib.pyplot as plt\n'), ((27122, 27142), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (27132, 27142), True, 'import matplotlib.pyplot as plt\n'), ((27177, 27187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27185, 27187), True, 'import matplotlib.pyplot as plt\n'), ((27442, 27454), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27452, 27454), True, 'import matplotlib.pyplot as plt\n'), ((27459, 27518), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s / pref)', '"""-"""'], {'color': '"""darkgrey"""', 'linewidth': '(0.5)'}), "(l, s / pref, '-', color='darkgrey', linewidth=0.5)\n", (27467, 27518), True, 'import matplotlib.pyplot as plt\n'), ((27639, 27680), 'matplotlib.pyplot.plot', 'plt.plot', (['l_points', '(s_points / pref)', '"""ro"""'], {}), "(l_points, s_points / pref, 'ro')\n", (27647, 27680), True, 'import matplotlib.pyplot as plt\n'), ((27683, 27727), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s_norm - 1)', '"""k-"""'], {'linewidth': '(0.5)'}), "(l, s_norm - 1, 'k-', linewidth=0.5)\n", (27691, 27727), True, 'import matplotlib.pyplot as plt\n'), ((27730, 27770), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""r"""', 'linestyle': '""":"""'}), "(0, color='r', linestyle=':')\n", (27741, 27770), True, 'import matplotlib.pyplot as plt\n'), ((27791, 27819), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ (Å)"""'], {}), "('$\\\\lambda$ (Å)')\n", (27801, 27819), True, 'import matplotlib.pyplot as plt\n'), ((27824, 27850), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Norm. Counts"""'], {}), "('Norm. Counts')\n", (27834, 27850), True, 'import matplotlib.pyplot as plt\n'), ((27856, 27866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27864, 27866), True, 'import matplotlib.pyplot as plt\n'), ((28097, 28111), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (28109, 28111), True, 'import matplotlib.pyplot as plt\n'), ((28116, 28206), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s / pref1)', '"""-"""'], {'color': '"""darkgrey"""', 'linewidth': '(0.5)', 'label': '"""Spectrum before"""'}), "(l, s / pref1, '-', color='darkgrey', linewidth=0.5, label=\n 'Spectrum before')\n", (28124, 28206), True, 'import matplotlib.pyplot as plt\n'), ((28438, 28503), 'matplotlib.pyplot.plot', 'plt.plot', (['l_points[0]', '(s_points[0] / pref1)', '"""ro"""'], {'ms': '(4)', 'alpha': '(0.3)'}), "(l_points[0], s_points[0] / pref1, 'ro', ms=4, alpha=0.3)\n", (28446, 28503), True, 'import matplotlib.pyplot as plt\n'), ((28506, 28560), 'matplotlib.pyplot.plot', 'plt.plot', (['l_points[1]', '(s_points[1] / pref1)', '"""ro"""'], {'ms': '(4)'}), "(l_points[1], s_points[1] / pref1, 'ro', ms=4)\n", (28514, 28560), True, 'import matplotlib.pyplot as plt\n'), ((28563, 28628), 'matplotlib.pyplot.plot', 'plt.plot', (['l_points[2]', '(s_points[2] / pref1)', '"""ro"""'], {'ms': '(4)', 'alpha': '(0.3)'}), "(l_points[2], s_points[2] / pref1, 'ro', ms=4, alpha=0.3)\n", (28571, 28628), True, 'import matplotlib.pyplot as plt\n'), ((28644, 28715), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s_norm[1] - 1)', '"""k-"""'], {'linewidth': '(0.5)', 'label': '"""Spectrum after"""'}), "(l, s_norm[1] - 1, 'k-', linewidth=0.5, label='Spectrum after')\n", (28652, 28715), True, 'import matplotlib.pyplot as plt\n'), ((28718, 28778), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""g"""', 'linestyle': '"""--"""', 'label': '"""Continuum"""'}), "(0, color='g', linestyle='--', label='Continuum')\n", (28729, 28778), True, 'import matplotlib.pyplot as plt\n'), ((28791, 28857), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'l_ca[0]', 'color': '"""g"""', 'linestyle': '""":"""', 'label': '"""Ca lines"""'}), "(x=l_ca[0], color='g', linestyle=':', label='Ca lines')\n", (28802, 28857), True, 'import matplotlib.pyplot as plt\n'), ((28863, 28911), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'l_ca[1]', 'color': '"""g"""', 'linestyle': '""":"""'}), "(x=l_ca[1], color='g', linestyle=':')\n", (28874, 28911), True, 'import matplotlib.pyplot as plt\n'), ((29272, 29300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ (Å)"""'], {}), "('$\\\\lambda$ (Å)')\n", (29282, 29300), True, 'import matplotlib.pyplot as plt\n'), ((29305, 29331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Norm. Counts"""'], {}), "('Norm. Counts')\n", (29315, 29331), True, 'import matplotlib.pyplot as plt\n'), ((29337, 29354), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1.4)'], {}), '(-1, 1.4)\n', (29345, 29354), True, 'import matplotlib.pyplot as plt\n'), ((29359, 29369), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29367, 29369), True, 'import matplotlib.pyplot as plt\n'), ((29554, 29566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29564, 29566), True, 'import matplotlib.pyplot as plt\n'), ((29571, 29641), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s_dif * 100)', '"""-"""'], {'color': '"""lightgrey"""', 'label': '"""scatter($i$)"""'}), "(l, s_dif * 100, '-', color='lightgrey', label='scatter($i$)')\n", (29579, 29641), True, 'import matplotlib.pyplot as plt\n'), ((29646, 29713), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s_std0 * 100)', '"""k-"""'], {'linewidth': '(1.0)', 'label': '"""$\\\\sigma_i$"""'}), "(l, s_std0 * 100, 'k-', linewidth=1.0, label='$\\\\sigma_i$')\n", (29654, 29713), True, 'import matplotlib.pyplot as plt\n'), ((29716, 29790), 'matplotlib.pyplot.plot', 'plt.plot', (['l', '(s_std * 100)', '"""r-"""'], {'linewidth': '(1.2)', 'label': '"""$\\\\mu_i(\\\\sigma_i)$"""'}), "(l, s_std * 100, 'r-', linewidth=1.2, label='$\\\\mu_i(\\\\sigma_i)$')\n", (29724, 29790), True, 'import matplotlib.pyplot as plt\n'), ((29793, 29833), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'linestyle': '""":"""', 'color': '"""k"""'}), "(0, linestyle=':', color='k')\n", (29804, 29833), True, 'import matplotlib.pyplot as plt\n'), ((29838, 29885), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bands[0]'], {'linestyle': '""":"""', 'color': '"""b"""'}), "(bands[0], linestyle=':', color='b')\n", (29849, 29885), True, 'import matplotlib.pyplot as plt\n'), ((29890, 29937), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bands[1]'], {'linestyle': '""":"""', 'color': '"""g"""'}), "(bands[1], linestyle=':', color='g')\n", (29901, 29937), True, 'import matplotlib.pyplot as plt\n'), ((29942, 29989), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bands[2]'], {'linestyle': '""":"""', 'color': '"""g"""'}), "(bands[2], linestyle=':', color='g')\n", (29953, 29989), True, 'import matplotlib.pyplot as plt\n'), ((29994, 30041), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bands[3]'], {'linestyle': '""":"""', 'color': '"""r"""'}), "(bands[3], linestyle=':', color='r')\n", (30005, 30041), True, 'import matplotlib.pyplot as plt\n'), ((30062, 30090), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ (Å)"""'], {}), "('$\\\\lambda$ (Å)')\n", (30072, 30090), True, 'import matplotlib.pyplot as plt\n'), ((30095, 30126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Uncertainty (\\\\%)"""'], {}), "('Uncertainty (\\\\%)')\n", (30105, 30126), True, 'import matplotlib.pyplot as plt\n'), ((30131, 30161), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'ncol': '(1)'}), "(loc='best', ncol=1)\n", (30141, 30161), True, 'import matplotlib.pyplot as plt\n'), ((30195, 30212), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-10)', '(50)'], {}), '(-10, 50)\n', (30203, 30212), True, 'import matplotlib.pyplot as plt\n'), ((30217, 30227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30225, 30227), True, 'import matplotlib.pyplot as plt\n'), ((30647, 30689), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['y', '(4)'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(y, 4, wspace=0.0, hspace=0.0)\n', (30659, 30689), True, 'import matplotlib.pyplot as plt\n'), ((32443, 32453), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32451, 32453), True, 'import matplotlib.pyplot as plt\n'), ((32580, 32594), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32592, 32594), True, 'import matplotlib.pyplot as plt\n'), ((33742, 33752), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33750, 33752), True, 'import matplotlib.pyplot as plt\n'), ((34204, 34216), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (34214, 34216), True, 'import matplotlib.pyplot as plt\n'), ((34249, 34322), 'matplotlib.pyplot.plot', 'plt.plot', (['disp', 'yfit_below', '"""r-"""'], {'linewidth': '"""1.5"""', 'label': '"""Polynomial fit"""'}), "(disp, yfit_below, 'r-', linewidth='1.5', label='Polynomial fit')\n", (34257, 34322), True, 'import matplotlib.pyplot as plt\n'), ((34327, 34400), 'matplotlib.pyplot.plot', 'plt.plot', (['disp', 'yfit_above', '"""g-"""'], {'linewidth': '"""1.5"""', 'label': '"""Polynomial fit"""'}), "(disp, yfit_above, 'g-', linewidth='1.5', label='Polynomial fit')\n", (34335, 34400), True, 'import matplotlib.pyplot as plt\n'), ((34491, 34524), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dispersion (pixels)"""'], {}), "('Dispersion (pixels)')\n", (34501, 34524), True, 'import matplotlib.pyplot as plt\n'), ((34526, 34565), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Dispersion (pixels)"""'], {}), "('Cross Dispersion (pixels)')\n", (34536, 34565), True, 'import matplotlib.pyplot as plt\n'), ((34570, 34580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34578, 34580), True, 'import matplotlib.pyplot as plt\n'), ((34644, 34656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (34654, 34656), True, 'import matplotlib.pyplot as plt\n'), ((34661, 34682), 'matplotlib.pyplot.plot', 'plt.plot', (['l_sky', '"""k-"""'], {}), "(l_sky, 'k-')\n", (34669, 34682), True, 'import matplotlib.pyplot as plt\n'), ((34687, 34720), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dispersion (pixels)"""'], {}), "('Dispersion (pixels)')\n", (34697, 34720), True, 'import matplotlib.pyplot as plt\n'), ((34722, 34749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Median Counts"""'], {}), "('Median Counts')\n", (34732, 34749), True, 'import matplotlib.pyplot as plt\n'), ((34754, 34764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34762, 34764), True, 'import matplotlib.pyplot as plt\n'), ((34886, 34898), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (34896, 34898), True, 'import matplotlib.pyplot as plt\n'), ((34903, 34977), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'dif0', '"""-"""'], {'color': '"""lightgrey"""', 'linewidth': '(1.3)', 'label': '"""Outliers"""'}), "(t, dif0, '-', color='lightgrey', linewidth=1.3, label='Outliers')\n", (34911, 34977), True, 'import matplotlib.pyplot as plt\n'), ((34982, 35039), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'dif1', '"""k-"""'], {'label': '"""Corrected"""', 'linewidth': '(0.8)'}), "(t, dif1, 'k-', label='Corrected', linewidth=0.8)\n", (34990, 35039), True, 'import matplotlib.pyplot as plt\n'), ((35044, 35070), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'cut_up', '"""r--"""'], {}), "(t, cut_up, 'r--')\n", (35052, 35070), True, 'import matplotlib.pyplot as plt\n'), ((35075, 35117), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'cut_dw', '"""r--"""'], {'label': '"""Cutoff"""'}), "(t, cut_dw, 'r--', label='Cutoff')\n", (35083, 35117), True, 'import matplotlib.pyplot as plt\n'), ((35314, 35336), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (35324, 35336), True, 'import matplotlib.pyplot as plt\n'), ((35341, 35351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35349, 35351), True, 'import matplotlib.pyplot as plt\n'), ((35800, 35812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35810, 35812), True, 'import matplotlib.pyplot as plt\n'), ((35823, 35900), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'time', 's': '(40)', 'linewidth': '(0.5)', 'edgecolor': '"""k"""', 'cmap': '"""rainbow"""'}), "(x, y, c=time, s=40, linewidth=0.5, edgecolor='k', cmap='rainbow')\n", (35834, 35900), True, 'import matplotlib.pyplot as plt\n'), ((35905, 35921), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (35917, 35921), True, 'import matplotlib.pyplot as plt\n'), ((35926, 35958), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dispersion (pixel)"""'], {}), "('Dispersion (pixel)')\n", (35936, 35958), True, 'import matplotlib.pyplot as plt\n'), ((35963, 36001), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Dispersion (pixel)"""'], {}), "('Cross Dispersion (pixel)')\n", (35973, 36001), True, 'import matplotlib.pyplot as plt\n'), ((36006, 36016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36014, 36016), True, 'import matplotlib.pyplot as plt\n'), ((36496, 36510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (36508, 36510), True, 'import matplotlib.pyplot as plt\n'), ((36913, 36940), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""lightgrey"""'}), "(color='lightgrey')\n", (36921, 36940), True, 'import matplotlib.pyplot as plt\n'), ((36975, 36985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36983, 36985), True, 'import matplotlib.pyplot as plt\n'), ((37361, 37458), 'matplotlib.pyplot.scatter', 'plt.scatter', (['array1[:, 1]', 'array1[:, 0]'], {'marker': '"""o"""', 'facecolors': '"""none"""', 'edgecolors': '"""darkgrey"""'}), "(array1[:, 1], array1[:, 0], marker='o', facecolors='none',\n edgecolors='darkgrey')\n", (37372, 37458), True, 'import matplotlib.pyplot as plt\n'), ((37457, 37547), 'matplotlib.pyplot.scatter', 'plt.scatter', (['array2[:, 1]', 'array2[:, 0]'], {'marker': '"""o"""', 'facecolors': '"""none"""', 'edgecolors': '"""b"""'}), "(array2[:, 1], array2[:, 0], marker='o', facecolors='none',\n edgecolors='b')\n", (37468, 37547), True, 'import matplotlib.pyplot as plt\n'), ((37546, 37647), 'matplotlib.pyplot.scatter', 'plt.scatter', (['array2[:, 1][indices2]', 'array2[:, 0][indices2]'], {'s': '(100.0)', 'marker': '"""+"""', 'edgecolors': '"""r"""'}), "(array2[:, 1][indices2], array2[:, 0][indices2], s=100.0, marker\n ='+', edgecolors='r')\n", (37557, 37647), True, 'import matplotlib.pyplot as plt\n'), ((37823, 37848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$ (pixel)"""'], {}), "('$x$ (pixel)')\n", (37833, 37848), True, 'import matplotlib.pyplot as plt\n'), ((37851, 37876), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$ (pixel)"""'], {}), "('$y$ (pixel)')\n", (37861, 37876), True, 'import matplotlib.pyplot as plt\n'), ((37882, 37892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37890, 37892), True, 'import matplotlib.pyplot as plt\n'), ((38299, 38330), 'sys.stdout.write', 'sys.stdout.write', (['u"""\x1b[1000D"""'], {}), "(u'\\x1b[1000D')\n", (38315, 38330), False, 'import time, sys, pylab, math, julian\n'), ((38337, 38355), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38353, 38355), False, 'import time, sys, pylab, math, julian\n'), ((38360, 38373), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (38370, 38373), False, 'import time, sys, pylab, math, julian\n'), ((38427, 38445), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38443, 38445), False, 'import time, sys, pylab, math, julian\n'), ((38798, 38835), 'sys.stdout.write', 'sys.stdout.write', (["(u'\\x1b[1000D' + bar)"], {}), "(u'\\x1b[1000D' + bar)\n", (38814, 38835), False, 'import time, sys, pylab, math, julian\n'), ((38843, 38861), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38859, 38861), False, 'import time, sys, pylab, math, julian\n'), ((3102, 3131), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(15)'}), '(title, fontsize=15)\n', (3111, 3131), True, 'import matplotlib.pyplot as plt\n'), ((4262, 4280), 'Plot_Tools.linear', 'linear', (['img', 'sigma'], {}), '(img, sigma)\n', (4268, 4280), False, 'from Plot_Tools import linear\n'), ((5293, 5316), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5312, 5316), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5487, 5512), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (5499, 5512), True, 'import matplotlib.pyplot as plt\n'), ((6834, 6852), 'numpy.ones', 'np.ones', (['len_cross'], {}), '(len_cross)\n', (6841, 6852), True, 'import numpy as np\n'), ((7359, 7418), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1.03]', '[ref_cen_pos[i], ref_cen_pos[i]]', '"""k:"""'], {}), "([0, 1.03], [ref_cen_pos[i], ref_cen_pos[i]], 'k:')\n", (7367, 7418), True, 'import matplotlib.pyplot as plt\n'), ((11502, 11516), 'Plot_Tools.linear', 'linear', (['data.T'], {}), '(data.T)\n', (11508, 11516), False, 'from Plot_Tools import linear\n'), ((11578, 11590), 'Plot_Tools.linear', 'linear', (['mask'], {}), '(mask)\n', (11584, 11590), False, 'from Plot_Tools import linear\n'), ((15009, 15023), 'Plot_Tools.linear', 'linear', (['data.T'], {}), '(data.T)\n', (15015, 15023), False, 'from Plot_Tools import linear\n'), ((15452, 15466), 'numpy.max', 'np.max', (['F_back'], {}), '(F_back)\n', (15458, 15466), True, 'import numpy as np\n'), ((15482, 15496), 'numpy.max', 'np.max', (['S_back'], {}), '(S_back)\n', (15488, 15496), True, 'import numpy as np\n'), ((15578, 15589), 'Plot_Tools.linear', 'linear', (['S.T'], {}), '(S.T)\n', (15584, 15589), False, 'from Plot_Tools import linear\n'), ((16762, 16775), 'Plot_Tools.linear', 'linear', (['img.T'], {}), '(img.T)\n', (16768, 16775), False, 'from Plot_Tools import linear\n'), ((21702, 21715), 'Plot_Tools.linear', 'linear', (['img.T'], {}), '(img.T)\n', (21708, 21715), False, 'from Plot_Tools import linear\n'), ((23935, 23948), 'Plot_Tools.linear', 'linear', (['img.T'], {}), '(img.T)\n', (23941, 23948), False, 'from Plot_Tools import linear\n'), ((25448, 25480), 'numpy.max', 'np.max', (['(f_lincor[1] / dif_max[1])'], {}), '(f_lincor[1] / dif_max[1])\n', (25454, 25480), True, 'import numpy as np\n'), ((26936, 27004), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'l_ca[i]', 'ymax': '(1)', 'color': '"""g"""', 'linestyle': '"""-."""', 'alpha': '(1.0)'}), "(x=l_ca[i], ymax=1, color='g', linestyle='-.', alpha=1.0)\n", (26947, 27004), True, 'import matplotlib.pyplot as plt\n'), ((27548, 27597), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'l_ca[i]', 'color': '"""g"""', 'linestyle': '"""-."""'}), "(x=l_ca[i], color='g', linestyle='-.')\n", (27559, 27597), True, 'import matplotlib.pyplot as plt\n'), ((36268, 36295), 'julian.from_jd', 'julian.from_jd', (['i'], {'fmt': '"""jd"""'}), "(i, fmt='jd')\n", (36282, 36295), False, 'import time, sys, pylab, math, julian\n'), ((1868, 1915), 'matplotlib.pyplot.plot', 'plt.plot', (['data[i][:, 0]', 'data[i][:, 1]', 'mark[i]'], {}), '(data[i][:, 0], data[i][:, 1], mark[i])\n', (1876, 1915), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2099), 'matplotlib.pyplot.plot', 'plt.plot', (['data[i][:, 0]', 'data[i][:, 1]', 'mark[i]'], {'label': 'legend[i + 1]'}), '(data[i][:, 0], data[i][:, 1], mark[i], label=legend[i + 1])\n', (2039, 2099), True, 'import matplotlib.pyplot as plt\n'), ((6062, 6072), 'numpy.std', 'np.std', (['BF'], {}), '(BF)\n', (6068, 6072), True, 'import numpy as np\n'), ((6074, 6085), 'numpy.mean', 'np.mean', (['BF'], {}), '(BF)\n', (6081, 6085), True, 'import numpy as np\n'), ((6141, 6151), 'numpy.std', 'np.std', (['DF'], {}), '(DF)\n', (6147, 6151), True, 'import numpy as np\n'), ((6153, 6164), 'numpy.mean', 'np.mean', (['DF'], {}), '(DF)\n', (6160, 6164), True, 'import numpy as np\n'), ((6220, 6230), 'numpy.std', 'np.std', (['FF'], {}), '(FF)\n', (6226, 6230), True, 'import numpy as np\n'), ((6232, 6243), 'numpy.mean', 'np.mean', (['FF'], {}), '(FF)\n', (6239, 6243), True, 'import numpy as np\n'), ((9398, 9411), 'numpy.min', 'np.min', (['order'], {}), '(order)\n', (9404, 9411), True, 'import numpy as np\n'), ((15654, 15663), 'numpy.min', 'np.min', (['I'], {}), '(I)\n', (15660, 15663), True, 'import numpy as np\n'), ((15670, 15679), 'numpy.max', 'np.max', (['I'], {}), '(I)\n', (15676, 15679), True, 'import numpy as np\n'), ((17427, 17438), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (17433, 17438), True, 'import numpy as np\n'), ((17483, 17494), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (17489, 17494), True, 'import numpy as np\n'), ((26287, 26311), 'numpy.median', 'np.median', (['s_blazecor[1]'], {}), '(s_blazecor[1])\n', (26296, 26311), True, 'import numpy as np\n'), ((26468, 26492), 'numpy.median', 'np.median', (['s_blazecor[0]'], {}), '(s_blazecor[0])\n', (26477, 26492), True, 'import numpy as np\n'), ((10155, 10172), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10167, 10172), True, 'import numpy as np\n'), ((10252, 10269), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10264, 10269), True, 'import numpy as np\n'), ((10349, 10366), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10361, 10366), True, 'import numpy as np\n'), ((10491, 10508), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10503, 10508), True, 'import numpy as np\n'), ((10598, 10615), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10610, 10615), True, 'import numpy as np\n'), ((10760, 10777), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10772, 10777), True, 'import numpy as np\n'), ((10869, 10886), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (10881, 10886), True, 'import numpy as np\n'), ((11006, 11023), 'numpy.nanargmax', 'np.nanargmax', (['fit'], {}), '(fit)\n', (11018, 11023), True, 'import numpy as np\n')] |
import collections
import logging
import os
import tempfile
import numpy
from svviz.utilities import mean, stddev
from svviz.kde import gaussian_kde
from svviz import plotting
def removeOutliers(data, m = 10.):
""" a method of trimming outliers from a list/array using
outlier-safe methods of calculating the center and variance;
only removes the upper tail, not the lower tail """
if len(data) < 2:
return data
data = numpy.array(data)
d_abs = numpy.abs(data - numpy.median(data))
d = data - numpy.median(data)
mdev = numpy.median(d_abs)
s = d/mdev if mdev else 0.
return data[s<m]
def chooseOrientation(orientations):
logging.info(" counts +/-:{:<6} -/+:{:<6} +/+:{:<6} -/-:{:<6} unpaired:{:<6}".format(orientations[False, True],
orientations[True, False],
orientations[True, True],
orientations[False, False],
orientations["unpaired"]))
ranked = sorted(orientations, key=lambda x: orientations[x])
chosenOrientations = [ranked.pop()]
while len(ranked) > 0:
candidate = ranked.pop()
if orientations[chosenOrientations[-1]] < 2* orientations[candidate]:
chosenOrientations.append(candidate)
else:
break
if chosenOrientations[0] == "unpaired":
chosenOrientations = "any"
else:
d = {False: "+", True:"-"}
chosenOrientations = ["".join(d[x] for x in o) for o in chosenOrientations]
return chosenOrientations
def getSearchRegions(bam, minLength=0):
# get the chromosomes and move X, Y, M/MT to the end
chromosomes = []
for i in range(bam.nreferences):
if bam.lengths[i] > minLength:
chromosomes.append(bam.getrname(i))
for start, end in [(2500000, 50000000), (None, None)]:
for chrom in sorted(chromosomes):
yield chrom, start, end
def sampleInsertSizes(bam, maxreads=50000, skip=0, minmapq=40, maxExpectedSize=20000, keepReads=False):
""" get the insert size distribution, cutting off the tail at the high end,
and removing most oddly mapping pairs
50,000 reads seems to be sufficient to get a nice distribution, and higher values
don't tend to change the distribution much """
inserts = []
readLengths = []
count = 0
reads = []
orientations = collections.Counter()
NMs = []
INDELs = []
for chrom, start, end in getSearchRegions(bam):
for read in bam.fetch(chrom, start, end):
if skip > 0:
skip -= 1
continue
try:
NMs.append(read.opt("NM")/float(len(read.seq)))
except KeyError:
pass
try:
INDELs.append(sum(1 for i in zip(*read.cigartuples)[1] if i in[1,2])/float(len(read.seq)))
except TypeError:
pass
if orientations["unpaired"] > 2500 and count < 1000:
# bail out early if it looks like it's single-ended
break
if not read.is_paired:
orientations["unpaired"] += 1
readLengths.append(len(read.seq))
continue
if not read.is_read1:
continue
if not read.is_proper_pair:
continue
if read.is_unmapped or read.mate_is_unmapped:
continue
if read.tid != read.rnext:
continue
if read.mapq < minmapq:
continue
# if abs(read.isize) < 20000:
inserts.append(abs(read.isize))
curOrient = (read.is_reverse, read.mate_is_reverse)
if read.reference_start > read.next_reference_start:
curOrient = not curOrient[0], not curOrient[1]
orientations[curOrient] += 1
readLengths.append(len(read.seq))
if keepReads:
reads.append(read)
count += 1
if count > maxreads:
break
if count >= maxreads:
break
chosenOrientations = chooseOrientation(orientations)
return removeOutliers(inserts), reads, chosenOrientations, numpy.array(readLengths)
class ReadStatistics(object):
def __init__(self, bam, keepReads=False):
self.insertSizes = []
self.readLengths = []
self.orientations = []
self._insertSizeKDE = None
self.singleEnded = False
self._insertSizeScores = {} # cache
try:
self.insertSizes, self.reads, self.orientations, self.readLengths = sampleInsertSizes(bam, keepReads=keepReads)
if len(self.insertSizes) > 1:
logging.info(" insert size mean: {:.2f} std: {:.2f}".format(numpy.mean(self.insertSizes), numpy.std(self.insertSizes)))
except ValueError as e:
print("*"*100, "here")
print("ERROR:", e)
def hasInsertSizeDistribution(self):
if len(self.insertSizes) > 1000:
return True
return False
def meanInsertSize(self):
if self.hasInsertSizeDistribution():
return mean(self.insertSizes)
return None
def stddevInsertSize(self):
if self.hasInsertSizeDistribution():
return stddev(self.insertSizes)
return None
def scoreInsertSize(self, isize):
if not self.hasInsertSizeDistribution():
return 0
if self._insertSizeKDE is None:
self._insertSizeKDE = gaussian_kde(self.insertSizes)
# the gaussian kde call is pretty slow with ~50,000 data points in it, so we'll cache the result for a bit of a speed-up
isize = abs(isize)
if not isize in self._insertSizeScores:
self._insertSizeScores[isize] = self._insertSizeKDE(isize)
return self._insertSizeScores[isize]
def hasReadLengthDistribution(self):
if len(self.readLengths) > 1000:
return True
return False
def meanReadLength(self):
if self.hasReadLengthDistribution():
return mean(self.readLengths)
return None
def stddevReadLength(self):
if self.hasReadLengthDistribution():
return stddev(self.readLengths)
return None
def readLengthUpperQuantile(self):
if self.hasReadLengthDistribution():
return numpy.percentile(self.readLengths, 99)
return None
def plotInsertSizeDistribution(isd, sampleName, dataHub):
try:
from rpy2 import robjects as ro
d = tempfile.mkdtemp()
filename = os.path.join(d, sampleName)
if not filename.endswith(".png"):
filename += ".png"
ro.r.png(filename, res=250, width=1200, height=1200)
alleles = ["alt", "ref", "amb"]
others = [[len(chosenSet) for chosenSet in dataHub.samples[sampleName].chosenSets(allele)] for allele in alleles]
plotting.ecdf([isd.insertSizes]+others, ["average"]+alleles, xlab="Insert size (bp)", main=sampleName, legendWhere="bottomright", lwd=2)
ro.r["dev.off"]()
data = open(filename).read()
return data
except ImportError:
return None
| [
"numpy.median",
"numpy.std",
"svviz.kde.gaussian_kde",
"rpy2.robjects.r.png",
"numpy.percentile",
"tempfile.mkdtemp",
"numpy.array",
"svviz.utilities.stddev",
"numpy.mean",
"collections.Counter",
"svviz.plotting.ecdf",
"os.path.join",
"svviz.utilities.mean"
] | [((459, 476), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (470, 476), False, 'import numpy\n'), ((571, 590), 'numpy.median', 'numpy.median', (['d_abs'], {}), '(d_abs)\n', (583, 590), False, 'import numpy\n'), ((2523, 2544), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2542, 2544), False, 'import collections\n'), ((541, 559), 'numpy.median', 'numpy.median', (['data'], {}), '(data)\n', (553, 559), False, 'import numpy\n'), ((4426, 4450), 'numpy.array', 'numpy.array', (['readLengths'], {}), '(readLengths)\n', (4437, 4450), False, 'import numpy\n'), ((6788, 6806), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6804, 6806), False, 'import tempfile\n'), ((6826, 6853), 'os.path.join', 'os.path.join', (['d', 'sampleName'], {}), '(d, sampleName)\n', (6838, 6853), False, 'import os\n'), ((6937, 6989), 'rpy2.robjects.r.png', 'ro.r.png', (['filename'], {'res': '(250)', 'width': '(1200)', 'height': '(1200)'}), '(filename, res=250, width=1200, height=1200)\n', (6945, 6989), True, 'from rpy2 import robjects as ro\n'), ((7161, 7306), 'svviz.plotting.ecdf', 'plotting.ecdf', (['([isd.insertSizes] + others)', "(['average'] + alleles)"], {'xlab': '"""Insert size (bp)"""', 'main': 'sampleName', 'legendWhere': '"""bottomright"""', 'lwd': '(2)'}), "([isd.insertSizes] + others, ['average'] + alleles, xlab=\n 'Insert size (bp)', main=sampleName, legendWhere='bottomright', lwd=2)\n", (7174, 7306), False, 'from svviz import plotting\n'), ((506, 524), 'numpy.median', 'numpy.median', (['data'], {}), '(data)\n', (518, 524), False, 'import numpy\n'), ((5371, 5393), 'svviz.utilities.mean', 'mean', (['self.insertSizes'], {}), '(self.insertSizes)\n', (5375, 5393), False, 'from svviz.utilities import mean, stddev\n'), ((5511, 5535), 'svviz.utilities.stddev', 'stddev', (['self.insertSizes'], {}), '(self.insertSizes)\n', (5517, 5535), False, 'from svviz.utilities import mean, stddev\n'), ((5741, 5771), 'svviz.kde.gaussian_kde', 'gaussian_kde', (['self.insertSizes'], {}), '(self.insertSizes)\n', (5753, 5771), False, 'from svviz.kde import gaussian_kde\n'), ((6318, 6340), 'svviz.utilities.mean', 'mean', (['self.readLengths'], {}), '(self.readLengths)\n', (6322, 6340), False, 'from svviz.utilities import mean, stddev\n'), ((6458, 6482), 'svviz.utilities.stddev', 'stddev', (['self.readLengths'], {}), '(self.readLengths)\n', (6464, 6482), False, 'from svviz.utilities import mean, stddev\n'), ((6607, 6645), 'numpy.percentile', 'numpy.percentile', (['self.readLengths', '(99)'], {}), '(self.readLengths, 99)\n', (6623, 6645), False, 'import numpy\n'), ((4990, 5018), 'numpy.mean', 'numpy.mean', (['self.insertSizes'], {}), '(self.insertSizes)\n', (5000, 5018), False, 'import numpy\n'), ((5020, 5047), 'numpy.std', 'numpy.std', (['self.insertSizes'], {}), '(self.insertSizes)\n', (5029, 5047), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 15:08:47 2015
@author: Ben
"""
import clearplot.plot_functions as pf
import numpy as np
xa = np.arange(0,10,0.01)
ya = np.sqrt(xa)
xb = xa
yb = 3.0 * (xb/10)**3 - 2.0 * (xb/10)**2
[fig, ax, curves] = pf.plot('two_curves-legend.png', [xa, xb], [ya, yb], \
labels = ['\kappa_y', '\kappa_z'], \
x_label = ['\zeta_{yz}', 'in'], y_label = ['\kappa_j', 'lbf']) | [
"numpy.arange",
"clearplot.plot_functions.plot",
"numpy.sqrt"
] | [((145, 167), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.01)'], {}), '(0, 10, 0.01)\n', (154, 167), True, 'import numpy as np\n'), ((171, 182), 'numpy.sqrt', 'np.sqrt', (['xa'], {}), '(xa)\n', (178, 182), True, 'import numpy as np\n'), ((253, 405), 'clearplot.plot_functions.plot', 'pf.plot', (['"""two_curves-legend.png"""', '[xa, xb]', '[ya, yb]'], {'labels': "['\\\\kappa_y', '\\\\kappa_z']", 'x_label': "['\\\\zeta_{yz}', 'in']", 'y_label': "['\\\\kappa_j', 'lbf']"}), "('two_curves-legend.png', [xa, xb], [ya, yb], labels=['\\\\kappa_y',\n '\\\\kappa_z'], x_label=['\\\\zeta_{yz}', 'in'], y_label=['\\\\kappa_j', 'lbf'])\n", (260, 405), True, 'import clearplot.plot_functions as pf\n')] |
import numpy as np
import os, sys
from os.path import join, dirname
import argparse
from tqdm import tqdm
import pandas as pd
import pickle
import matplotlib
matplotlib.use('agg') # use matplotlib without GUI support
sys.path.append('./auxiliary/')
from auxiliary.model import PoseEstimator
from auxiliary.dataset import Pascal3D
from auxiliary.utils import load_checkpoint
from evaluation import test_category
# =================PARAMETERS=============================== #
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=None, help='model path')
parser.add_argument('--class_data', type=str, default=None, help='offline computed mean class data path')
parser.add_argument('--output', type=str, default='results', help='testing results save path')
parser.add_argument('--setting', type=str, default=None, choices=['IntraDataset', 'InterDataset'])
parser.add_argument('--root_dir', type=str, default=None, help='dataset directory')
parser.add_argument('--input_dim', type=int, default=224, help='input image dimension')
parser.add_argument('--point_num', type=int, default=2500, help='number of points used in each sample')
parser.add_argument('--img_feature_dim', type=int, default=512, help='feature dimension for images')
parser.add_argument('--shape_feature_dim', type=int, default=512, help='feature dimension for shapes')
parser.add_argument('--bin_size', type=int, default=15, help='bin size for the euler angle classification')
opt = parser.parse_args()
print(opt)
# ========================================================== #
# ================CREATE NETWORK============================ #
azi_classes, ele_classes, inp_classes = int(360 / opt.bin_size), int(180 / opt.bin_size), int(360 / opt.bin_size)
model = PoseEstimator(shape_feature_dim=opt.shape_feature_dim, img_feature_dim=opt.img_feature_dim,
azi_classes=azi_classes, ele_classes=ele_classes, inp_classes=inp_classes)
model.cuda()
load_checkpoint(model, opt.model)
# ========================================================== #
# =============DEFINE stuff for logs ======================= #
# write basic information into the log file
if not os.path.isdir(opt.output):
os.mkdir(opt.output)
logname = os.path.join(opt.output, 'testing.txt')
f = open(logname, mode='w')
f.write('\n')
f.close()
# ========================================================== #
if opt.setting == 'IntraDataset':
annotation_file = 'ObjectNet3D.txt'
test_cls = ['bed', 'bookshelf', 'calculator', 'cellphone', 'computer', 'door', 'filing_cabinet', 'guitar', 'iron',
'knife', 'microwave', 'pen', 'pot', 'rifle', 'shoe', 'slipper', 'stove', 'toilet', 'tub', 'wheelchair']
elif opt.setting == 'InterDataset':
annotation_file = 'Pascal3D.txt'
test_cls = ['aeroplane', 'bicycle', 'boat', 'bottle', 'bus', 'car',
'chair', 'diningtable', 'motorbike', 'sofa', 'train', 'tvmonitor']
else:
sys.exit('Wrong setting!')
mean_class_data = pickle.load(open(opt.class_data, 'rb'))
Err_All = []
for cls in test_cls:
class_data = mean_class_data[cls]
dataset_test = Pascal3D(train=False, root_dir=opt.root_dir, annotation_file=annotation_file,
cls_choice=[cls], input_dim=opt.input_dim, point_num=opt.point_num)
Acc, Med, Errs = test_category(dataset_test, model, opt.bin_size, cls, opt.output, logname, class_data)
Err_All.extend(Errs)
print('Acc30 is {:.2f} and MerErr is {:.2f} for {} images in class {}\n'.format(Acc, Med, len(dataset_test), cls))
print('Performance across all classes: Acc_pi/6 is {:.2f} and Med_Err is {:.2f}'.format(
np.mean(np.array(Err_All) <= 30), np.median(np.array(Err_All))
)) | [
"sys.path.append",
"os.mkdir",
"argparse.ArgumentParser",
"os.path.isdir",
"evaluation.test_category",
"auxiliary.utils.load_checkpoint",
"auxiliary.dataset.Pascal3D",
"auxiliary.model.PoseEstimator",
"matplotlib.use",
"numpy.array",
"os.path.join",
"sys.exit"
] | [((159, 180), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (173, 180), False, 'import matplotlib\n'), ((220, 251), 'sys.path.append', 'sys.path.append', (['"""./auxiliary/"""'], {}), "('./auxiliary/')\n", (235, 251), False, 'import os, sys\n'), ((487, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (510, 512), False, 'import argparse\n'), ((1765, 1940), 'auxiliary.model.PoseEstimator', 'PoseEstimator', ([], {'shape_feature_dim': 'opt.shape_feature_dim', 'img_feature_dim': 'opt.img_feature_dim', 'azi_classes': 'azi_classes', 'ele_classes': 'ele_classes', 'inp_classes': 'inp_classes'}), '(shape_feature_dim=opt.shape_feature_dim, img_feature_dim=opt.\n img_feature_dim, azi_classes=azi_classes, ele_classes=ele_classes,\n inp_classes=inp_classes)\n', (1778, 1940), False, 'from auxiliary.model import PoseEstimator\n'), ((1967, 2000), 'auxiliary.utils.load_checkpoint', 'load_checkpoint', (['model', 'opt.model'], {}), '(model, opt.model)\n', (1982, 2000), False, 'from auxiliary.utils import load_checkpoint\n'), ((2242, 2281), 'os.path.join', 'os.path.join', (['opt.output', '"""testing.txt"""'], {}), "(opt.output, 'testing.txt')\n", (2254, 2281), False, 'import os, sys\n'), ((2180, 2205), 'os.path.isdir', 'os.path.isdir', (['opt.output'], {}), '(opt.output)\n', (2193, 2205), False, 'import os, sys\n'), ((2211, 2231), 'os.mkdir', 'os.mkdir', (['opt.output'], {}), '(opt.output)\n', (2219, 2231), False, 'import os, sys\n'), ((3131, 3286), 'auxiliary.dataset.Pascal3D', 'Pascal3D', ([], {'train': '(False)', 'root_dir': 'opt.root_dir', 'annotation_file': 'annotation_file', 'cls_choice': '[cls]', 'input_dim': 'opt.input_dim', 'point_num': 'opt.point_num'}), '(train=False, root_dir=opt.root_dir, annotation_file=\n annotation_file, cls_choice=[cls], input_dim=opt.input_dim, point_num=\n opt.point_num)\n', (3139, 3286), False, 'from auxiliary.dataset import Pascal3D\n'), ((3326, 3416), 'evaluation.test_category', 'test_category', (['dataset_test', 'model', 'opt.bin_size', 'cls', 'opt.output', 'logname', 'class_data'], {}), '(dataset_test, model, opt.bin_size, cls, opt.output, logname,\n class_data)\n', (3339, 3416), False, 'from evaluation import test_category\n'), ((2953, 2979), 'sys.exit', 'sys.exit', (['"""Wrong setting!"""'], {}), "('Wrong setting!')\n", (2961, 2979), False, 'import os, sys\n'), ((3695, 3712), 'numpy.array', 'np.array', (['Err_All'], {}), '(Err_All)\n', (3703, 3712), True, 'import numpy as np\n'), ((3659, 3676), 'numpy.array', 'np.array', (['Err_All'], {}), '(Err_All)\n', (3667, 3676), True, 'import numpy as np\n')] |
#
# Created on Thu Dec 09 2021 6:44:55 AM
# Author: <NAME> (<EMAIL>)
# Objective: Gaussian Sparse Parity Experiment
#
# import standard libraries
import numpy as np
from tensorflow import keras
from keras import layers
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# importing internal libraries
from kdg.utils import gaussian_sparse_parity
from kdg.kdn import *
# define the experimental setup
p = 20 # total dimensions of the data vector
p_star = 3 # number of signal dimensions of the data vector
sample_size = [1000, 5000, 10000, 50000] # sample size under consideration
n_test = 1000 # test set size
reps = 5 # number of replicates
df = pd.DataFrame()
reps_list = []
accuracy_kdn = []
accuracy_kdn_ = []
accuracy_nn = []
accuracy_nn_ = []
sample_list = []
X_val, y_val = gaussian_sparse_parity(1000)
# NN params
compile_kwargs = {
"loss": "binary_crossentropy",
"optimizer": keras.optimizers.Adam(3e-4),
}
callback = keras.callbacks.EarlyStopping(monitor="val_loss", patience=10, verbose=False)
fit_kwargs = {
"epochs": 200,
"batch_size": 64,
"verbose": False,
"validation_data": (X_val, keras.utils.to_categorical(y_val)),
"callbacks": [callback],
}
# network architecture
def getNN():
network_base = keras.Sequential()
network_base.add(layers.Dense(5, activation="relu", input_shape=(20,)))
network_base.add(layers.Dense(5, activation="relu"))
network_base.add(layers.Dense(units=2, activation="softmax"))
network_base.compile(**compile_kwargs)
return network_base
# run experiment
for sample in sample_size:
print("Doing sample %d" % sample)
for ii in range(reps):
X, y = gaussian_sparse_parity(sample, p_star=p_star, p=p)
X_test, y_test = gaussian_sparse_parity(n_test, p_star=p_star, p=p)
# train Vanilla NN
vanilla_nn = getNN()
vanilla_nn.fit(X, keras.utils.to_categorical(y), **fit_kwargs)
# train KDN
model_kdn = kdn(
network=vanilla_nn,
polytope_compute_method="all",
k=1e-6,
weighting_method="lin",
T=2,
c=2,
verbose=False,
)
model_kdn.fit(X, y)
accuracy_kdn.append(np.mean(model_kdn.predict(X_test) == y_test))
accuracy_nn.append(
np.mean(np.argmax(vanilla_nn.predict(X_test), axis=1) == y_test)
)
reps_list.append(ii)
sample_list.append(sample)
df["accuracy kdn"] = accuracy_kdn
df["accuracy nn"] = accuracy_nn
df["reps"] = reps_list
df["sample"] = sample_list
# save the results (CHANGE HERE)
df.to_csv("results/gsp.csv")
# plot
# Specify which results to plot (CHANGE HERE)
filename = "results/gsp.csv"
df = pd.read_csv(filename)
sample_size = [1000, 5000, 10000, 50000]
err_nn_med = []
err_nn_25_quantile = []
err_nn_75_quantile = []
err_kdn_med = []
err_kdn_25_quantile = []
err_kdn_75_quantile = []
for sample in sample_size:
err_nn = 1 - df["accuracy nn"][df["sample"] == sample]
err_kdn = 1 - df["accuracy kdn"][df["sample"] == sample]
err_nn_med.append(np.median(err_nn))
err_nn_25_quantile.append(np.quantile(err_nn, [0.25])[0])
err_nn_75_quantile.append(np.quantile(err_nn, [0.75])[0])
err_kdn_med.append(np.median(err_kdn))
err_kdn_25_quantile.append(np.quantile(err_kdn, [0.25])[0])
err_kdn_75_quantile.append(np.quantile(err_kdn, [0.75])[0])
sns.set_context("talk")
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
ax.plot(sample_size, err_nn_med, c="k", label="NN")
ax.fill_between(
sample_size, err_nn_25_quantile, err_nn_75_quantile, facecolor="k", alpha=0.3
)
ax.plot(sample_size, err_kdn_med, c="r", label="KDN")
ax.fill_between(
sample_size, err_kdn_25_quantile, err_kdn_75_quantile, facecolor="r", alpha=0.3
)
right_side = ax.spines["right"]
right_side.set_visible(False)
top_side = ax.spines["top"]
top_side.set_visible(False)
ax.set_xscale("log")
ax.set_xlabel("Sample size")
ax.set_ylabel("error")
ax.legend(frameon=False)
# Specify the figure save path (CHANGE HERE)
plt.savefig("plots/gsp.pdf")
| [
"pandas.DataFrame",
"numpy.quantile",
"tensorflow.keras.utils.to_categorical",
"pandas.read_csv",
"tensorflow.keras.Sequential",
"numpy.median",
"tensorflow.keras.callbacks.EarlyStopping",
"keras.layers.Dense",
"tensorflow.keras.optimizers.Adam",
"kdg.utils.gaussian_sparse_parity",
"matplotlib.p... | [((680, 694), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (692, 694), True, 'import pandas as pd\n'), ((815, 843), 'kdg.utils.gaussian_sparse_parity', 'gaussian_sparse_parity', (['(1000)'], {}), '(1000)\n', (837, 843), False, 'from kdg.utils import gaussian_sparse_parity\n'), ((970, 1047), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(10)', 'verbose': '(False)'}), "(monitor='val_loss', patience=10, verbose=False)\n", (999, 1047), False, 'from tensorflow import keras\n'), ((2747, 2768), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2758, 2768), True, 'import pandas as pd\n'), ((3431, 3454), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (3446, 3454), True, 'import seaborn as sns\n'), ((3465, 3499), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 8)'}), '(1, 1, figsize=(8, 8))\n', (3477, 3499), True, 'import matplotlib.pyplot as plt\n'), ((4076, 4104), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/gsp.pdf"""'], {}), "('plots/gsp.pdf')\n", (4087, 4104), True, 'import matplotlib.pyplot as plt\n'), ((928, 957), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0003)'], {}), '(0.0003)\n', (949, 957), False, 'from tensorflow import keras\n'), ((1280, 1298), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (1296, 1298), False, 'from tensorflow import keras\n'), ((1157, 1190), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_val'], {}), '(y_val)\n', (1183, 1190), False, 'from tensorflow import keras\n'), ((1320, 1373), 'keras.layers.Dense', 'layers.Dense', (['(5)'], {'activation': '"""relu"""', 'input_shape': '(20,)'}), "(5, activation='relu', input_shape=(20,))\n", (1332, 1373), False, 'from keras import layers\n'), ((1396, 1430), 'keras.layers.Dense', 'layers.Dense', (['(5)'], {'activation': '"""relu"""'}), "(5, activation='relu')\n", (1408, 1430), False, 'from keras import layers\n'), ((1453, 1496), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(2)', 'activation': '"""softmax"""'}), "(units=2, activation='softmax')\n", (1465, 1496), False, 'from keras import layers\n'), ((1691, 1741), 'kdg.utils.gaussian_sparse_parity', 'gaussian_sparse_parity', (['sample'], {'p_star': 'p_star', 'p': 'p'}), '(sample, p_star=p_star, p=p)\n', (1713, 1741), False, 'from kdg.utils import gaussian_sparse_parity\n'), ((1767, 1817), 'kdg.utils.gaussian_sparse_parity', 'gaussian_sparse_parity', (['n_test'], {'p_star': 'p_star', 'p': 'p'}), '(n_test, p_star=p_star, p=p)\n', (1789, 1817), False, 'from kdg.utils import gaussian_sparse_parity\n'), ((3115, 3132), 'numpy.median', 'np.median', (['err_nn'], {}), '(err_nn)\n', (3124, 3132), True, 'import numpy as np\n'), ((3282, 3300), 'numpy.median', 'np.median', (['err_kdn'], {}), '(err_kdn)\n', (3291, 3300), True, 'import numpy as np\n'), ((1901, 1930), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y'], {}), '(y)\n', (1927, 1930), False, 'from tensorflow import keras\n'), ((3164, 3191), 'numpy.quantile', 'np.quantile', (['err_nn', '[0.25]'], {}), '(err_nn, [0.25])\n', (3175, 3191), True, 'import numpy as np\n'), ((3226, 3253), 'numpy.quantile', 'np.quantile', (['err_nn', '[0.75]'], {}), '(err_nn, [0.75])\n', (3237, 3253), True, 'import numpy as np\n'), ((3333, 3361), 'numpy.quantile', 'np.quantile', (['err_kdn', '[0.25]'], {}), '(err_kdn, [0.25])\n', (3344, 3361), True, 'import numpy as np\n'), ((3397, 3425), 'numpy.quantile', 'np.quantile', (['err_kdn', '[0.75]'], {}), '(err_kdn, [0.75])\n', (3408, 3425), True, 'import numpy as np\n')] |
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class HelicalSegment(object):
"""Generator of helical segments.
> *Input arguments*
* `center` (*type:* `list`): Center of the helix in meters
* `radius` (*type:* `float`): Radius of the helix in meters
* `n_turns` (*type:* `int`): Number of turns
* `delta_z` (*type:* `float`): Length of the step in the Z direction between each turn of the helix in meters
* `angle_offset` (*type:* `float`): Angle offset to start the helix
* `is_clockwise` (*type:* `bool`, *default:* `True`): If `True`, the helix is generated clockwise.
> *Example*
```python
radius = 3
center = [2, 2, 2]
n_turns = 2
delta_z = 1
angle_offset = 0.0
is_clockwise = True
helix = HelicalSegment(center, radius, n_turns, delta_z, angle_offset, is_clockwise)
u = numpy.linspace(0, 1, 100)
pnts = numpy.array([helix.interpolate(i) for i in u])
```
"""
def __init__(self, center, radius, n_turns, delta_z, angle_offset, is_clockwise=True):
self._center = np.array(center)
assert self._center.size == 3, 'Size of center point vector must be 3'
assert radius > 0, 'Helix radius must be greater than zero'
assert n_turns > 0, 'Number of turns must be greater than zero'
assert isinstance(is_clockwise, bool), 'is_clockwise flag must be a boolean'
self._radius = radius
self._n_turns = n_turns
self._angle_offset = angle_offset
self._is_clockwise = is_clockwise
self._delta_z = delta_z
self._step_z = float(self._delta_z) / self._n_turns
# =========================================================================
def get_length(self):
"""Return the length of the helix in meters"""
return self._n_turns * np.sqrt(self._step_z**2 + (2 * np.pi * self._radius)**2)
# =========================================================================
def get_pitch(self):
"""Return the pitch angle of the helical path in radians"""
return np.sin(self._step_z / np.sqrt(self._step_z**2 + (2 * np.pi * self._radius)**2))
# =========================================================================
def interpolate(self, u):
"""Compute the 3D point on the helical path
> *Input arguments*
* `param` (*type:* `data_type`, *default:* `data`): Parameter description
> *Returns*
Description of return values
"""
u = max(u, 0)
u = min(u, 1)
delta = 1 if self._is_clockwise else -1
x = self._radius * np.cos(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)
y = self._radius * np.sin(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)
z = self._n_turns * u * self._step_z
return self._center + np.array([x, y, z])
| [
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.sqrt"
] | [((1940, 1956), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1948, 1956), True, 'import numpy as np\n'), ((2702, 2762), 'numpy.sqrt', 'np.sqrt', (['(self._step_z ** 2 + (2 * np.pi * self._radius) ** 2)'], {}), '(self._step_z ** 2 + (2 * np.pi * self._radius) ** 2)\n', (2709, 2762), True, 'import numpy as np\n'), ((3534, 3600), 'numpy.cos', 'np.cos', (['(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)'], {}), '(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)\n', (3540, 3600), True, 'import numpy as np\n'), ((3628, 3694), 'numpy.sin', 'np.sin', (['(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)'], {}), '(self._n_turns * 2 * np.pi * u * delta + self._angle_offset)\n', (3634, 3694), True, 'import numpy as np\n'), ((3771, 3790), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3779, 3790), True, 'import numpy as np\n'), ((2970, 3030), 'numpy.sqrt', 'np.sqrt', (['(self._step_z ** 2 + (2 * np.pi * self._radius) ** 2)'], {}), '(self._step_z ** 2 + (2 * np.pi * self._radius) ** 2)\n', (2977, 3030), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from nose.tools import raises
def Pan_linear_left_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoublePanFilter
from numpy.testing import assert_almost_equal
t = np.arange(1000, dtype=np.float64)[None, :]
input = np.sin(t * 1000 * 2 * np.pi / 48000)
output = np.ascontiguousarray(np.zeros(2000, dtype=np.float64).reshape(2, -1))
inputfilter = DoubleInPointerFilter(input, False)
panfilter = DoublePanFilter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.set_output_sampling_rate(48000)
panfilter.set_input_sampling_rate(48000)
panfilter.set_pan_law(DoublePanFilter.LINEAR_TAPER)
panfilter.set_pan(-1)
outputfilter.set_input_sampling_rate(48000)
panfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, panfilter, 0)
outputfilter.set_input_port(1, panfilter, 1)
outputfilter.process(1000)
assert_almost_equal(input[0], output[0])
assert_almost_equal(0, output[1])
def Pan_linear_right_test():
import numpy as np
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoublePanFilter
from numpy.testing import assert_almost_equal
t = np.arange(1000, dtype=np.float64)[None, :]
input = np.sin(t * 1000 * 2 * np.pi / 48000)
output = np.ascontiguousarray(np.zeros(2000, dtype=np.float64).reshape(2, -1))
inputfilter = DoubleInPointerFilter(input, False)
panfilter = DoublePanFilter()
outputfilter = DoubleOutPointerFilter(output, False)
inputfilter.set_output_sampling_rate(48000)
panfilter.set_input_sampling_rate(48000)
panfilter.set_pan_law(DoublePanFilter.LINEAR_TAPER)
panfilter.set_pan(1)
outputfilter.set_input_sampling_rate(48000)
panfilter.set_input_port(0, inputfilter, 0)
outputfilter.set_input_port(0, panfilter, 0)
outputfilter.set_input_port(1, panfilter, 1)
outputfilter.process(1000)
assert_almost_equal(input[0], output[1])
assert_almost_equal(0, output[0])
| [
"ATK.Core.DoubleInPointerFilter",
"ATK.Core.DoubleOutPointerFilter",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.sin",
"numpy.arange",
"ATK.Tools.DoublePanFilter"
] | [((323, 359), 'numpy.sin', 'np.sin', (['(t * 1000 * 2 * np.pi / 48000)'], {}), '(t * 1000 * 2 * np.pi / 48000)\n', (329, 359), True, 'import numpy as np\n'), ((458, 493), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (479, 493), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((508, 525), 'ATK.Tools.DoublePanFilter', 'DoublePanFilter', ([], {}), '()\n', (523, 525), False, 'from ATK.Tools import DoublePanFilter\n'), ((543, 580), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (565, 580), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((969, 1009), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['input[0]', 'output[0]'], {}), '(input[0], output[0])\n', (988, 1009), False, 'from numpy.testing import assert_almost_equal\n'), ((1012, 1045), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(0)', 'output[1]'], {}), '(0, output[1])\n', (1031, 1045), False, 'from numpy.testing import assert_almost_equal\n'), ((1319, 1355), 'numpy.sin', 'np.sin', (['(t * 1000 * 2 * np.pi / 48000)'], {}), '(t * 1000 * 2 * np.pi / 48000)\n', (1325, 1355), True, 'import numpy as np\n'), ((1456, 1491), 'ATK.Core.DoubleInPointerFilter', 'DoubleInPointerFilter', (['input', '(False)'], {}), '(input, False)\n', (1477, 1491), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((1506, 1523), 'ATK.Tools.DoublePanFilter', 'DoublePanFilter', ([], {}), '()\n', (1521, 1523), False, 'from ATK.Tools import DoublePanFilter\n'), ((1541, 1578), 'ATK.Core.DoubleOutPointerFilter', 'DoubleOutPointerFilter', (['output', '(False)'], {}), '(output, False)\n', (1563, 1578), False, 'from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter\n'), ((1974, 2014), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['input[0]', 'output[1]'], {}), '(input[0], output[1])\n', (1993, 2014), False, 'from numpy.testing import assert_almost_equal\n'), ((2017, 2050), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(0)', 'output[0]'], {}), '(0, output[0])\n', (2036, 2050), False, 'from numpy.testing import assert_almost_equal\n'), ((270, 303), 'numpy.arange', 'np.arange', (['(1000)'], {'dtype': 'np.float64'}), '(1000, dtype=np.float64)\n', (279, 303), True, 'import numpy as np\n'), ((1266, 1299), 'numpy.arange', 'np.arange', (['(1000)'], {'dtype': 'np.float64'}), '(1000, dtype=np.float64)\n', (1275, 1299), True, 'import numpy as np\n'), ((392, 424), 'numpy.zeros', 'np.zeros', (['(2000)'], {'dtype': 'np.float64'}), '(2000, dtype=np.float64)\n', (400, 424), True, 'import numpy as np\n'), ((1388, 1420), 'numpy.zeros', 'np.zeros', (['(2000)'], {'dtype': 'np.float64'}), '(2000, dtype=np.float64)\n', (1396, 1420), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training script of VanillaLinearVRNN on dataset SimDial.
This script trains model VanillaLinearVRNN on SimDial data and report the
evaluation results.
"""
import collections
import json
import os
import time
from typing import Any, Dict, Optional, Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_dict
import ml_collections.config_flags
import numpy as np
import tensorflow as tf
import bert_utils # local file import from baselines.clinc_intent
from uncertainty_baselines.datasets import datasets
import data_preprocessor as preprocessor # local file import from experimental.language_structure.vrnn
import data_utils # local file import from experimental.language_structure.vrnn
import linear_vrnn # local file import from experimental.language_structure.vrnn
import psl_utils # local file import from experimental.language_structure.vrnn
import train_lib # local file import from experimental.language_structure.vrnn
import utils # local file import from experimental.language_structure.vrnn
_STATE_LABEL_NAME = preprocessor.STATE_LABEL_NAME
_DIAL_TURN_ID_NAME = preprocessor.DIAL_TURN_ID_NAME
_INPUT_ID_NAME = 'input_word_ids'
_INPUT_MASK_NAME = 'input_mask'
_LABEL_SAMPLE_MODE_KEY = 'mode'
_LABEL_RATIO_MODE = 'ratios'
_LABEL_SHOT_MODE = 'shots'
_TRAIN = 'train'
_TEST = 'test'
_SPLITS = [_TRAIN, _TEST]
# The metric used for early stopping.
_PRIMARY_METRIC_KEY = f'{_TEST}/hidden_state_class_balanced_mixed_accuracy'
_PRIMARY_METRIC_SHOULD_DECREASE = False
FLAGS = flags.FLAGS
_CONFIG = ml_collections.config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/vrnn',
'Output directory.')
_SEED = flags.DEFINE_integer('seed', 8, 'Random seed.')
# Accelerator flags.
_USE_GPU = flags.DEFINE_bool('use_gpu', False,
'Whether to run on GPU or otherwise TPU.')
_NUM_CORES = flags.DEFINE_integer('num_cores', 8,
'Number of TPU cores or number of GPUs.')
_TPU = flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
_MetricMap = Dict[str, tf.keras.metrics.Metric]
def _label_count_map(labels) -> Dict[int, int]:
unique_labels, counts = np.unique(labels, return_counts=True)
return dict(zip(unique_labels, counts))
def _primary_metric_improved(metrics: _MetricMap, current_best: tf.Tensor,
min_delta: float) -> bool:
"""Returns whether the primary metric is improved."""
if _PRIMARY_METRIC_SHOULD_DECREASE:
return metrics[_PRIMARY_METRIC_KEY] + abs(min_delta) < current_best
else:
return metrics[_PRIMARY_METRIC_KEY] - abs(min_delta) > current_best
def _get_unmasked_dialog_turn_ids(labels: tf.Tensor, dialog_turn_ids: tf.Tensor,
label_sample_map: Dict[int, float],
label_sample_mode: str,
seed: int) -> tf.Tensor:
"""Samples unmasked dialog turn ids from label_sample_map."""
if label_sample_mode not in (_LABEL_RATIO_MODE, _LABEL_SHOT_MODE):
raise NotImplementedError(
'Only support label sample mode: %s, %s. Found %s.' %
(_LABEL_RATIO_MODE, _LABEL_SHOT_MODE, label_sample_mode))
labels = labels.numpy().flatten()
if label_sample_mode == _LABEL_RATIO_MODE:
# Compute number of labeled examples to be sampled in each class.
label_counts = _label_count_map(labels)
label_sample_map = {
label: round(label_sample_map[label] * label_counts.get(label, 0))
for label in label_sample_map
}
else:
label_sample_map = {
label: int(num_samples)
for label, num_samples in label_sample_map.items()
}
# Summarize dialog turn ids for each class.
label_dialog_turn_id_map = collections.defaultdict(list)
for label, dialog_turn_id in zip(labels, dialog_turn_ids.numpy().flatten()):
label_dialog_turn_id_map[label].append(dialog_turn_id)
# Sample given number of labeled dialog turns.
dialog_turn_ids = []
rng = np.random.default_rng(seed=seed)
for label in sorted(label_sample_map):
if label_dialog_turn_id_map[label]:
num_samples = min(
len(label_dialog_turn_id_map[label]), label_sample_map[label])
dialog_turn_ids.append(
rng.choice(
label_dialog_turn_id_map[label], num_samples, replace=False))
if dialog_turn_ids:
dialog_turn_ids = np.concatenate(dialog_turn_ids)
return tf.constant(dialog_turn_ids, dtype=tf.int32)
def _should_generate_labeled_dialog_turn_ids(with_label: bool,
num_model_latent_states: int,
num_latent_states: int,
label_sampling_path: str) -> bool:
"""Determines whether to generate labeled dialog turn ids."""
if not with_label:
return False
if num_model_latent_states != num_latent_states:
raise ValueError(
'Expected model num_states equal to the latent states of the'
'dataset in semi-supervised mode, found {} vs {}'.format(
num_model_latent_states, num_latent_states))
return label_sampling_path is not None
def _generate_labeled_dialog_turn_ids(label_sampling_path: str,
labels: tf.Tensor,
dialog_turn_ids: tf.Tensor,
seed: int) -> tf.Tensor:
"""Generates labeled dialog turn ids and saves them to `output_dir`."""
with tf.io.gfile.GFile(label_sampling_path, 'r') as file:
data = json.loads(file.read())
label_sample_mode = data[_LABEL_SAMPLE_MODE_KEY]
label_sample_map = {
int(label): float(value)
for label, value in data.items()
if label != _LABEL_SAMPLE_MODE_KEY and value > 0
}
labeled_dialog_turn_ids = _get_unmasked_dialog_turn_ids(
labels, dialog_turn_ids, label_sample_map, label_sample_mode, seed)
return labeled_dialog_turn_ids
# TODO(yquan): Create a class to manage metrics and re-organize namespaces.
def _create_metrics(
splits: Sequence[str], few_shots: Sequence[int],
few_shots_l2_weights: Sequence[float],
psl_constraint_rule_names: Optional[Sequence[str]]) -> _MetricMap:
"""Creates metrics to be tracked in the training."""
def _create_metrics_of_type(
metric_names: Sequence[str],
metric_type: Any,
namespaces: Optional[Sequence[str]] = splits) -> _MetricMap:
metrics = {}
for namespace in namespaces:
for metric_name in metric_names:
metrics['{}/{}'.format(namespace, metric_name)] = metric_type()
return metrics
mean_type_metrics = [
'total_loss',
'rc_loss',
'kl_loss',
'bow_loss',
'cls_loss',
'elbo',
'constraint_loss',
'hidden_state_loss',
'hidden_state_accuracy',
'hidden_state_class_balanced_accuracy',
'hidden_state_domain_loss',
'hidden_state_domain_accuracy',
'hidden_state_domain_class_balanced_accuracy',
'hidden_state_class_balanced_mixed_accuracy',
'adjusted_mutual_info',
'cluster_purity',
'unique_prediction_class_count',
]
for rule_name in psl_constraint_rule_names:
mean_type_metrics.append('constraint_loss_%s' % rule_name)
accuracy_type_metrics = ['accuracy', 'class_balanced_accuracy']
return {
**_create_metrics_of_type(mean_type_metrics, tf.keras.metrics.Mean),
**_create_metrics_of_type(accuracy_type_metrics,
tf.keras.metrics.Accuracy),
}
def _update_loss_metrics(metrics: _MetricMap, split: str, losses: Sequence[Any],
psl_constraint_rule_names: Optional[Sequence[str]]):
"""Updates loss metrics."""
(total_loss, rc_loss, kl_loss, bow_loss, classification_loss, constraint_loss,
elbo, constraint_loss_per_rule) = losses
metrics['{}/total_loss'.format(split)].update_state(total_loss)
metrics['{}/elbo'.format(split)].update_state(elbo)
metrics['{}/rc_loss'.format(split)].update_state(rc_loss)
metrics['{}/kl_loss'.format(split)].update_state(kl_loss)
metrics['{}/bow_loss'.format(split)].update_state(bow_loss)
metrics['{}/cls_loss'.format(split)].update_state(classification_loss)
metrics['{}/constraint_loss'.format(split)].update_state(constraint_loss)
if constraint_loss_per_rule is not None:
for rule_name, rule_loss in zip(psl_constraint_rule_names,
constraint_loss_per_rule):
metrics['{}/constraint_loss_{}'.format(split,
rule_name)].update_state(rule_loss)
def _log_metric_results(metrics: _MetricMap, split: str):
logging.info(
'%s Accuracy: %.4f, Adjusted_Mutual_Information:'
' %.4f, Cluster_Purity: %.4f, Total Loss: %.4f, '
'RC_Loss: %.4f, KL_Loss: %.4f, BOW_Loss: %.4f, CLS_loss: %.4f, '
'PSL_Loss: %.4f, ELBO: %.4f, Hidden_State_Loss: %.4f, '
'Hidden_State_Accuracy: %.4f, Hidden_State_Accuracy (balanced): %.4f, '
'Hidden_State_Domain_Loss: %.4f, Hidden_State_Domain_Accuracy: %.4f, '
'Hidden_State_Domain_Accuracy (balanced): %.4f', split,
metrics['{}/accuracy'.format(split)].result(),
metrics['{}/adjusted_mutual_info'.format(split)].result(),
metrics['{}/cluster_purity'.format(split)].result(),
metrics['{}/total_loss'.format(split)].result(),
metrics['{}/rc_loss'.format(split)].result(),
metrics['{}/kl_loss'.format(split)].result(),
metrics['{}/bow_loss'.format(split)].result(),
metrics['{}/cls_loss'.format(split)].result(),
metrics['{}/constraint_loss'.format(split)].result(),
metrics['{}/elbo'.format(split)].result(),
metrics['{}/hidden_state_loss'.format(split)].result(),
metrics['{}/hidden_state_accuracy'.format(split)].result(),
metrics['{}/hidden_state_class_balanced_accuracy'.format(split)].result(),
metrics['{}/hidden_state_domain_loss'.format(split)].result(),
metrics['{}/hidden_state_domain_accuracy'.format(split)].result(),
metrics['{}/hidden_state_domain_class_balanced_accuracy'.format(
split)].result())
def _load_data_from_files(config: config_dict.ConfigDict):
"""Update config by data read from files."""
with tf.io.gfile.GFile(config.vocab_file_path, 'r') as f:
vocab_size = len(f.read()[:-1].split('\n'))
config.model.vocab_size = config.model.vae_cell.vocab_size = vocab_size
if config.model.vae_cell.shared_bert_embedding:
with tf.io.gfile.GFile(os.path.join(config.bert_dir,
'bert_config.json')) as config_file:
config.model.vae_cell.shared_bert_embedding_config = json.load(
config_file)
if config.psl_config_file:
with tf.io.gfile.GFile(config.psl_config_file, 'r') as file:
config.psl = json.loads(file.read())
def _save_model_results(outputs: Sequence[tf.Tensor], output_dir: str,
split: str):
"""Saves the model predictions, labels and latent state representations."""
latent_state, label, prediction, domain_label = outputs
for file_name, data in zip(
['label.npy', 'prediction.npy', 'latent_state.npy', 'domain_label.npy'],
[label, prediction, latent_state, domain_label]):
with tf.io.gfile.GFile(
os.path.join(output_dir, '{}-{}'.format(split, file_name)), 'wb') as f:
np.save(f, data.numpy())
def _update_hidden_state_model_metrics(
metrics: _MetricMap, splits: Sequence[str],
evaluation_results: Sequence[Sequence[float]]):
"""Updates hidden state model specific metrics."""
hidden_state_model_metrics = [
'hidden_state_loss',
'hidden_state_accuracy',
'hidden_state_class_balanced_accuracy',
'hidden_state_domain_loss',
'hidden_state_domain_accuracy',
'hidden_state_domain_class_balanced_accuracy',
]
for split, split_evaluation_results in zip(splits, evaluation_results):
for key, value in zip(hidden_state_model_metrics, split_evaluation_results):
metrics['{}/{}'.format(split, key)].update_state(value)
metrics['{}/hidden_state_class_balanced_mixed_accuracy'.format(
split)].update_state(
(split_evaluation_results[2] + split_evaluation_results[5]) / 2)
def _update_model_prediction_metrics(metrics: _MetricMap, split: str,
label_id: tf.Tensor,
prediction: tf.Tensor):
"""Updates metrics related to model prediction quality."""
# Updates clustering related metrics.
metrics['{}/adjusted_mutual_info'.format(split)].update_state(
utils.adjusted_mutual_info(label_id, prediction))
metrics['{}/cluster_purity'.format(split)].update_state(
utils.cluster_purity(label_id, prediction))
prediction_classes, _ = tf.unique(tf.reshape(prediction, shape=[-1]))
metrics['{}/unique_prediction_class_count'.format(split)].update_state(
tf.size(prediction_classes))
# Updates accuracies.
metrics['{}/accuracy'.format(split)].update_state(label_id, prediction,
tf.sign(label_id))
class_balanced_weight = utils.create_rebalanced_sample_weights(label_id)
metrics['{}/class_balanced_accuracy'.format(split)].update_state(
label_id, prediction, class_balanced_weight)
def _transform_hidden_representation(
inputs: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Flatten the hidden representation and labels and filtering out paddings."""
inputs = tf.reshape(inputs, [-1, inputs.shape[-1]])
labels = tf.reshape(labels, [-1])
padding_mask = labels > 0
return tf.boolean_mask(inputs,
padding_mask), tf.boolean_mask(labels, padding_mask)
def _evaluate_hidden_state_model(input_size: int, num_classes: int,
train_x: tf.Tensor, train_y: tf.Tensor,
test_x: tf.Tensor, test_y: tf.Tensor,
train_epochs: int, learning_rate: float):
"""Evaluates the hidden state representation."""
train_x, train_y = _transform_hidden_representation(train_x, train_y)
test_x, test_y = _transform_hidden_representation(test_x, test_y)
model = train_lib.build_hidden_state_model(input_size, num_classes,
learning_rate)
model.fit(train_x, train_y, epochs=train_epochs, verbose=0)
train_results = model.evaluate(
train_x,
train_y,
sample_weight=utils.create_rebalanced_sample_weights(train_y),
verbose=0)
test_results = model.evaluate(
test_x,
test_y,
sample_weight=utils.create_rebalanced_sample_weights(test_y),
verbose=0)
return train_results, test_results
def _load_class_map(file_path: str) -> Dict[int, str]:
"""Loads class {id, name} mapping from the given file."""
with tf.io.gfile.GFile(file_path) as class_map_file:
class_map = json.load(class_map_file)
class_map = {int(key): value for key, value in class_map.items()}
return class_map
def _create_fewshot_dataset_and_sample_weights(
feautres: tf.Tensor, labels: tf.Tensor,
repr_fn: Any) -> Tuple[tf.data.Dataset, tf.Tensor]:
"""Creates dataset for few-shot evaluation and the rebalanced sample weights."""
_, label = repr_fn(feautres, labels)
sample_weights = utils.create_rebalanced_sample_weights(label)
dataset = tf.data.Dataset.from_tensor_slices((feautres, labels))
dataset = dataset.batch(labels.shape[0]).repeat()
return dataset, sample_weights
def _json_dump(config: config_dict.ConfigDict, filename: str):
"""Dumps the config into a json file."""
with tf.io.gfile.GFile(filename, 'w') as f:
json.dump(config.to_dict(), f)
def run_experiment(config: config_dict.ConfigDict, output_dir: str):
"""Runs training/evaluation experiment."""
seed = config.get('seed', 0)
logging.info('Config: %s', config)
_load_data_from_files(config)
tf.io.gfile.makedirs(output_dir)
logging.info('Model checkpoint will be saved at %s', output_dir)
tf.random.set_seed(seed)
if config.model_base_dir:
dir_name = os.path.basename(output_dir)
model_dir = os.path.join(config.model_base_dir, dir_name)
logging.info('Model outputs will be saved at %s', model_dir)
tf.io.gfile.makedirs(model_dir)
_json_dump(config, os.path.join(model_dir, 'config.json'))
_json_dump(config.model, os.path.join(model_dir, 'model_config.json'))
else:
model_dir = None
if _USE_GPU.value:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
drop_remainder = False
else:
logging.info('Use TPU at %s',
_TPU.value if _TPU.value is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=_TPU.value)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
drop_remainder = True
with_label = config.classification_loss_weight > 0
# Create dataset builders
train_dataset_builder = datasets.get(
config.dataset,
split=_TRAIN,
data_dir=config.dataset_dir,
shuffle_buffer_size=config.train_batch_size * 10,
seed=seed,
add_dialog_turn_id=with_label,
drop_remainder=drop_remainder)
test_dataset_builder = datasets.get(
config.dataset,
split=_TEST,
data_dir=config.dataset_dir,
shuffle_buffer_size=config.eval_batch_size * 10,
drop_remainder=drop_remainder)
test_datasets_builders = {_TEST: test_dataset_builder}
inference_train_dataset_builder = datasets.get(
config.dataset,
split=_TRAIN,
data_dir=config.dataset_dir,
shuffle_buffer_size=config.inference_batch_size * 10,
seed=config.inference_seed,
is_training=False)
inference_test_dataset_builder = datasets.get(
config.dataset,
split=_TEST,
data_dir=config.dataset_dir,
shuffle_buffer_size=config.inference_batch_size * 10,
seed=config.inference_seed)
# Choose labeled dialog turns.
num_latent_states = data_utils.get_dataset_num_latent_states(config.dataset)
if _should_generate_labeled_dialog_turn_ids(with_label,
config.model.num_states,
num_latent_states,
config.label_sampling_path):
inputs = preprocessor.get_full_dataset_outputs(train_dataset_builder)
labeled_dialog_turn_ids = _generate_labeled_dialog_turn_ids(
config.label_sampling_path, inputs[_STATE_LABEL_NAME],
inputs[_DIAL_TURN_ID_NAME], seed)
if model_dir:
with tf.io.gfile.GFile(
os.path.join(model_dir, 'labeled_dialog_turn_ids.txt'), 'w') as f:
f.write('\n'.join(
str(id) for id in labeled_dialog_turn_ids.numpy().tolist()))
else:
labeled_dialog_turn_ids = None
# Initialize bert embedding preprocessor.
if config.shared_bert_embedding:
bert_preprocess_model = utils.BertPreprocessor(
config.bert_embedding_preprocess_tfhub_url,
config.model.vae_cell.max_seq_length)
if bert_preprocess_model.vocab_size != config.model.vocab_size:
raise ValueError(
'Expect BERT preprocess model vocab size align with the model '
'config, found {} and {}.'.format(bert_preprocess_model.vocab_size,
config.model.vocab_size))
preprocess_fn = preprocessor.BertDataPreprocessor(
bert_preprocess_model, config.model.num_states,
labeled_dialog_turn_ids).create_feature_and_label
else:
preprocess_fn = preprocessor.DataPreprocessor(
config.model.num_states,
labeled_dialog_turn_ids).create_feature_and_label
# Load PSL configs
psl_learning = config.psl_constraint_learning_weight > 0
psl_inference = config.psl_constraint_inference_weight > 0
if psl_learning or psl_inference:
with tf.io.gfile.GFile(config.vocab_file_path, 'r') as f:
vocab = f.read()[:-1].split('\n')
preprocess_fn = psl_utils.psl_feature_mixin(preprocess_fn, config.dataset,
config.psl, vocab)
# Load datasets
# TODO(yquan): invesigate why distributed training fails when using BERT
# Failure example: https://xm2a.corp.google.com/experiments/33275459
distributed_training = (not psl_learning and not psl_inference and
not config.shared_bert_embedding)
train_dataset = preprocessor.create_dataset(train_dataset_builder,
config.train_batch_size,
preprocess_fn, strategy,
distributed_training)
steps_per_epoch = train_dataset_builder.num_examples // config.train_batch_size
test_datasets = {}
steps_per_eval = {}
for dataset_name, dataset_builder in test_datasets_builders.items():
steps_per_eval[dataset_name] = (
dataset_builder.num_examples // config.eval_batch_size)
test_datasets[dataset_name] = preprocessor.create_dataset(
dataset_builder, config.eval_batch_size, preprocess_fn, strategy,
distributed_training)
distributed_inference = not config.shared_bert_embedding
inference_train_dataset = preprocessor.create_dataset(
inference_train_dataset_builder, config.inference_batch_size,
preprocess_fn, strategy, distributed_inference)
num_inference_train_steps = (
inference_train_dataset_builder.num_examples //
config.inference_batch_size)
inference_test_dataset = preprocessor.create_dataset(
inference_test_dataset_builder, config.inference_batch_size,
preprocess_fn, strategy, distributed_inference)
num_inference_test_steps = (
inference_test_dataset_builder.num_examples //
config.inference_batch_size)
# Initialize word weights.
word_weights = np.ones((config.model.vocab_size), dtype=np.float32)
if config.word_weights_path:
w = config.word_weights_file_weight
if w > 1 or w < 0:
raise ValueError(
'Expected word_weights_file_weight between 0 and 1, found {}'.format(
w))
with tf.io.gfile.GFile(config.word_weights_path, 'rb') as word_weights_file:
word_weights_from_file = np.load(word_weights_file)
word_weights = w * word_weights_from_file + (1 - w) * word_weights
_json_dump(config.model, os.path.join(output_dir, 'model_config.json'))
with strategy.scope():
model = linear_vrnn.VanillaLinearVRNN(config.model)
optimizer = tf.keras.optimizers.Adam(
config.base_learning_rate, beta_1=1.0 - config.one_minus_momentum)
metrics = _create_metrics(_SPLITS, config.few_shots,
config.few_shots_l2_weights,
config.psl_constraint_rule_names)
if psl_learning or psl_inference:
psl_model = psl_utils.get_psl_model(
config.dataset,
config.psl_constraint_rule_names,
config.psl_constraint_rule_weights,
config=config.psl)
else:
psl_model = None
if psl_inference:
psl_optimizer = tf.keras.optimizers.Adam(
config.base_learning_rate, beta_1=1.0 - config.one_minus_momentum)
else:
psl_optimizer = None
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, directory=output_dir, max_to_keep=None)
if model_dir:
best_model_checkpoint_manager = tf.train.CheckpointManager(
checkpoint, directory=model_dir, max_to_keep=1)
else:
best_model_checkpoint_manager = None
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
latest_checkpoint = checkpoint_manager.restore_or_initialize()
initial_epoch = 0
if latest_checkpoint:
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
logging.info('Loaded checkpoint %s. Initialize from epoch %s',
latest_checkpoint, initial_epoch)
elif config.shared_bert_embedding:
# load BERT from initial checkpoint
bert_ckpt_dir = config.model.vae_cell.shared_bert_embedding_ckpt_dir
(model.vae_cell.shared_embedding_layer, _,
_) = bert_utils.load_bert_weight_from_ckpt(
bert_model=model.vae_cell.shared_embedding_layer,
bert_ckpt_dir=bert_ckpt_dir)
logging.info('Loaded BERT checkpoint %s', bert_ckpt_dir)
def train_step(batch_size: int, config: config_dict.ConfigDict):
@tf.function
def _train_step(inputs: Sequence[tf.Tensor]):
"""Training step function."""
(input_1, input_2, label_id, label_mask, initial_state, initial_sample,
_) = inputs[:7]
if psl_learning:
psl_inputs = inputs[-1]
# Explicitly specify the batch size as PSL model now requires known
# batch size.
psl_inputs = tf.ensure_shape(
psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.shape[2]))
else:
psl_inputs = None
model_inputs = [input_1, input_2, initial_state, initial_sample]
if with_label:
model_inputs.extend([label_id, label_mask])
with tf.GradientTape() as tape:
# Set learning phase to enable dropout etc during training.
model_outputs = model(model_inputs, training=True)
losses = linear_vrnn.compute_loss(
input_1[_INPUT_ID_NAME],
input_2[_INPUT_ID_NAME],
input_1[_INPUT_MASK_NAME],
input_2[_INPUT_MASK_NAME],
model_outputs,
latent_label_id=label_id,
latent_label_mask=label_mask,
word_weights=word_weights,
with_bpr=config.with_bpr,
kl_loss_weight=config.kl_loss_weight,
with_bow=config.with_bow,
bow_loss_weight=config.bow_loss_weight,
num_latent_states=num_latent_states,
classification_loss_weight=config.classification_loss_weight,
psl_constraint_model=psl_model,
psl_inputs=psl_inputs,
psl_constraint_loss_weight=config.psl_constraint_learning_weight)
total_loss = losses[0]
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
_update_loss_metrics(metrics, _TRAIN, losses,
config.psl_constraint_rule_names)
return _train_step
def test_step(split: str, batch_size: int, config: config_dict.ConfigDict):
@tf.function
def _test_step(inputs: Sequence[tf.Tensor]):
"""Evaluation step function."""
(input_1, input_2, label_id, label_mask, initial_state, initial_sample,
_) = inputs[:7]
if psl_inference:
psl_inputs = inputs[-1]
# Explicitly specify the batch size as PSL model now requires known
# batch size.
psl_inputs = tf.ensure_shape(
psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.shape[2]))
else:
psl_inputs = None
# In evaluation, don't provide label as a guidance.
model_inputs = [input_1, input_2, initial_state, initial_sample]
model_outputs = model(model_inputs, training=False)
losses = linear_vrnn.compute_loss(
input_1[_INPUT_ID_NAME],
input_2[_INPUT_ID_NAME],
input_1[_INPUT_MASK_NAME],
input_2[_INPUT_MASK_NAME],
model_outputs,
latent_label_id=label_id,
latent_label_mask=label_mask,
word_weights=word_weights,
with_bpr=config.with_bpr,
kl_loss_weight=config.kl_loss_weight,
with_bow=config.with_bow,
bow_loss_weight=config.bow_loss_weight,
num_latent_states=num_latent_states,
classification_loss_weight=config.classification_loss_weight,
psl_constraint_model=psl_model,
psl_inputs=psl_inputs,
psl_constraint_loss_weight=config.psl_constraint_inference_weight)
_update_loss_metrics(metrics, split, losses,
config.psl_constraint_rule_names)
return _test_step
def inference_step(psl_inference: bool, batch_size: int,
config: config_dict.ConfigDict):
@tf.function
def _inference_step(inputs: Sequence[tf.Tensor]) -> Sequence[tf.Tensor]:
(input_1, input_2, label, _, initial_state, initial_sample,
domain_label) = inputs[:7]
model_inputs = [input_1, input_2, initial_state, initial_sample]
model_outputs = model(model_inputs, training=False)
if psl_inference:
psl_inputs = inputs[-1]
# Explicitly specify the batch size as PSL model now requires known
# batch size.
psl_inputs = tf.ensure_shape(
psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.shape[2]))
logits = psl_utils.update_logits(model, psl_optimizer, model_inputs,
linear_vrnn.get_logits, psl_model,
psl_inputs,
config.psl_constraint_inference_steps,
config.psl_constraint_inference_weight)
else:
logits = linear_vrnn.get_logits(model_outputs)
prediction = linear_vrnn.get_prediction(logits)
latent_state = model_outputs[0]
return latent_state, label, prediction, domain_label
return _inference_step
summary_writer = tf.summary.create_file_writer(
os.path.join(output_dir, 'summaries'))
if model_dir:
best_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'summaries'))
else:
best_summary_writer = None
run_train_steps = train_lib.create_run_steps_fn(
train_step(config.train_batch_size, config),
strategy,
distributed=distributed_training)
run_test_steps_map = {}
for split in test_datasets:
run_test_steps_map[split] = train_lib.create_run_steps_fn(
test_step(split, config.eval_batch_size, config),
strategy,
distributed=distributed_training)
run_inference_steps = train_lib.create_run_steps_fn(
inference_step(psl_inference, config.inference_batch_size, config),
strategy,
distributed=distributed_inference,
output_dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
)
fixed_train_epoch = config.patience < 0
primary_metric = tf.constant(0.)
out_of_patience = 0
train_model_outputs = None
test_model_outputs = None
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, config.train_epochs):
if not fixed_train_epoch and out_of_patience > config.patience:
logging.info(
'Found primary metric %s keeping being worse than the '
'current best %.4f for %s evaluation cycles, early stop '
'at epoch %s', _PRIMARY_METRIC_KEY, primary_metric, out_of_patience,
epoch)
break
logging.info('Starting to run epoch: %s', epoch)
run_train_steps(train_iterator, tf.cast(steps_per_epoch, tf.int32))
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * config.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps, epoch + 1, config.train_epochs,
steps_per_sec, eta_seconds / 60, time_elapsed / 60))
logging.info(message)
if (epoch + 1) % config.evaluation_interval == 0:
for dataset_name, test_dataset in test_datasets.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
logging.info('Starting to run eval at epoch: %s', epoch)
run_test_steps_map[dataset_name](test_iterator,
tf.cast(steps_per_eval[dataset_name],
tf.int32))
logging.info('Done with testing on %s', dataset_name)
(train_hidden_state, train_label, train_prediction,
train_domain_label) = run_inference_steps(
iter(inference_train_dataset), num_inference_train_steps)
(test_hidden_state, test_label, test_prediction,
test_domain_label) = run_inference_steps(
iter(inference_test_dataset), num_inference_test_steps)
logging.info('Evaluating hidden representation learning.')
input_size = config.model.vae_cell.encoder_projection_sizes[-1]
train_results, test_results = _evaluate_hidden_state_model(
input_size, config.model.num_states, train_hidden_state, train_label,
test_hidden_state, test_label, config.hidden_state_model_train_epochs,
config.hidden_state_model_learning_rate)
domain_train_results, domain_test_results = _evaluate_hidden_state_model(
input_size, data_utils.get_dataset_num_domains(config.dataset),
train_hidden_state, train_domain_label, test_hidden_state,
test_domain_label, config.hidden_state_model_train_epochs,
config.hidden_state_model_learning_rate)
_update_hidden_state_model_metrics(metrics, _SPLITS, [
train_results + domain_train_results,
test_results + domain_test_results
])
_update_model_prediction_metrics(metrics, _TRAIN, train_label,
train_prediction)
_update_model_prediction_metrics(metrics, _TEST, test_label,
test_prediction)
for split in _SPLITS:
_log_metric_results(metrics, split)
total_results = {
name: metric.result() for name, metric in metrics.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch)
train_model_outputs = [
train_hidden_state, train_label, train_prediction, train_domain_label
]
test_model_outputs = [
test_hidden_state, test_label, test_prediction, test_domain_label
]
if _primary_metric_improved(total_results, primary_metric,
config.min_delta):
primary_metric = total_results[_PRIMARY_METRIC_KEY]
out_of_patience = 0
if best_model_checkpoint_manager:
best_model_checkpoint_manager.save(checkpoint_number=epoch)
if best_summary_writer:
with best_summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch)
if model_dir:
_save_model_results(train_model_outputs, model_dir, _TRAIN)
_save_model_results(test_model_outputs, model_dir, _TEST)
else:
out_of_patience += 1
for metric in metrics.values():
metric.reset_states()
if (config.checkpoint_interval > 0 and
(epoch + 1) % config.checkpoint_interval == 0):
logging.info('Saving checkpoint.')
checkpoint_manager.save(checkpoint_number=epoch)
if fixed_train_epoch:
_save_model_results(train_model_outputs, output_dir, _TRAIN)
_save_model_results(test_model_outputs, output_dir, _TEST)
return False
if __name__ == '__main__':
def _main(argv):
"""Main entry function."""
del argv # unused
num_restarts = 0
config = _CONFIG.value
output_dir = _OUTPUT_DIR.value
keep_running = True
while keep_running:
try:
keep_running = run_experiment(config, output_dir)
except tf.errors.UnavailableError as err:
num_restarts += 1
logging.warn(
'Error encountered during experiment: %s. Will now try to recover.',
err,
exc_info=True)
app.run(_main)
| [
"tensorflow.random.set_seed",
"numpy.load",
"psl_utils.update_logits",
"data_utils.get_dataset_num_latent_states",
"tensorflow.reshape",
"numpy.ones",
"collections.defaultdict",
"numpy.random.default_rng",
"absl.logging.info",
"tensorflow.io.gfile.makedirs",
"data_preprocessor.BertDataPreprocess... | [((2328, 2395), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', '"""/tmp/vrnn"""', '"""Output directory."""'], {}), "('output_dir', '/tmp/vrnn', 'Output directory.')\n", (2347, 2395), False, 'from absl import flags\n'), ((2439, 2486), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""', '(8)', '"""Random seed."""'], {}), "('seed', 8, 'Random seed.')\n", (2459, 2486), False, 'from absl import flags\n'), ((2520, 2598), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_gpu"""', '(False)', '"""Whether to run on GPU or otherwise TPU."""'], {}), "('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')\n", (2537, 2598), False, 'from absl import flags\n'), ((2641, 2719), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cores"""', '(8)', '"""Number of TPU cores or number of GPUs."""'], {}), "('num_cores', 8, 'Number of TPU cores or number of GPUs.')\n", (2661, 2719), False, 'from absl import flags\n'), ((2761, 2848), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tpu"""', 'None', '"""Name of the TPU. Only used if use_gpu is False."""'], {}), "('tpu', None,\n 'Name of the TPU. Only used if use_gpu is False.')\n", (2780, 2848), False, 'from absl import flags\n'), ((2997, 3034), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (3006, 3034), True, 'import numpy as np\n'), ((4564, 4593), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4587, 4593), False, 'import collections\n'), ((4813, 4845), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (4834, 4845), True, 'import numpy as np\n'), ((5239, 5283), 'tensorflow.constant', 'tf.constant', (['dialog_turn_ids'], {'dtype': 'tf.int32'}), '(dialog_turn_ids, dtype=tf.int32)\n', (5250, 5283), True, 'import tensorflow as tf\n'), ((13968, 14016), 'utils.create_rebalanced_sample_weights', 'utils.create_rebalanced_sample_weights', (['label_id'], {}), '(label_id)\n', (14006, 14016), False, 'import utils\n'), ((14344, 14386), 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, inputs.shape[-1]]'], {}), '(inputs, [-1, inputs.shape[-1]])\n', (14354, 14386), True, 'import tensorflow as tf\n'), ((14398, 14422), 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), '(labels, [-1])\n', (14408, 14422), True, 'import tensorflow as tf\n'), ((15054, 15128), 'train_lib.build_hidden_state_model', 'train_lib.build_hidden_state_model', (['input_size', 'num_classes', 'learning_rate'], {}), '(input_size, num_classes, learning_rate)\n', (15088, 15128), False, 'import train_lib\n'), ((16164, 16209), 'utils.create_rebalanced_sample_weights', 'utils.create_rebalanced_sample_weights', (['label'], {}), '(label)\n', (16202, 16209), False, 'import utils\n'), ((16222, 16276), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(feautres, labels)'], {}), '((feautres, labels))\n', (16256, 16276), True, 'import tensorflow as tf\n'), ((16702, 16736), 'absl.logging.info', 'logging.info', (['"""Config: %s"""', 'config'], {}), "('Config: %s', config)\n", (16714, 16736), False, 'from absl import logging\n'), ((16773, 16805), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['output_dir'], {}), '(output_dir)\n', (16793, 16805), True, 'import tensorflow as tf\n'), ((16808, 16872), 'absl.logging.info', 'logging.info', (['"""Model checkpoint will be saved at %s"""', 'output_dir'], {}), "('Model checkpoint will be saved at %s', output_dir)\n", (16820, 16872), False, 'from absl import logging\n'), ((16875, 16899), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (16893, 16899), True, 'import tensorflow as tf\n'), ((17917, 18119), 'uncertainty_baselines.datasets.datasets.get', 'datasets.get', (['config.dataset'], {'split': '_TRAIN', 'data_dir': 'config.dataset_dir', 'shuffle_buffer_size': '(config.train_batch_size * 10)', 'seed': 'seed', 'add_dialog_turn_id': 'with_label', 'drop_remainder': 'drop_remainder'}), '(config.dataset, split=_TRAIN, data_dir=config.dataset_dir,\n shuffle_buffer_size=config.train_batch_size * 10, seed=seed,\n add_dialog_turn_id=with_label, drop_remainder=drop_remainder)\n', (17929, 18119), False, 'from uncertainty_baselines.datasets import datasets\n'), ((18180, 18339), 'uncertainty_baselines.datasets.datasets.get', 'datasets.get', (['config.dataset'], {'split': '_TEST', 'data_dir': 'config.dataset_dir', 'shuffle_buffer_size': '(config.eval_batch_size * 10)', 'drop_remainder': 'drop_remainder'}), '(config.dataset, split=_TEST, data_dir=config.dataset_dir,\n shuffle_buffer_size=config.eval_batch_size * 10, drop_remainder=\n drop_remainder)\n', (18192, 18339), False, 'from uncertainty_baselines.datasets import datasets\n'), ((18457, 18638), 'uncertainty_baselines.datasets.datasets.get', 'datasets.get', (['config.dataset'], {'split': '_TRAIN', 'data_dir': 'config.dataset_dir', 'shuffle_buffer_size': '(config.inference_batch_size * 10)', 'seed': 'config.inference_seed', 'is_training': '(False)'}), '(config.dataset, split=_TRAIN, data_dir=config.dataset_dir,\n shuffle_buffer_size=config.inference_batch_size * 10, seed=config.\n inference_seed, is_training=False)\n', (18469, 18638), False, 'from uncertainty_baselines.datasets import datasets\n'), ((18702, 18863), 'uncertainty_baselines.datasets.datasets.get', 'datasets.get', (['config.dataset'], {'split': '_TEST', 'data_dir': 'config.dataset_dir', 'shuffle_buffer_size': '(config.inference_batch_size * 10)', 'seed': 'config.inference_seed'}), '(config.dataset, split=_TEST, data_dir=config.dataset_dir,\n shuffle_buffer_size=config.inference_batch_size * 10, seed=config.\n inference_seed)\n', (18714, 18863), False, 'from uncertainty_baselines.datasets import datasets\n'), ((18942, 18998), 'data_utils.get_dataset_num_latent_states', 'data_utils.get_dataset_num_latent_states', (['config.dataset'], {}), '(config.dataset)\n', (18982, 18998), False, 'import data_utils\n'), ((21382, 21508), 'data_preprocessor.create_dataset', 'preprocessor.create_dataset', (['train_dataset_builder', 'config.train_batch_size', 'preprocess_fn', 'strategy', 'distributed_training'], {}), '(train_dataset_builder, config.train_batch_size,\n preprocess_fn, strategy, distributed_training)\n', (21409, 21508), True, 'import data_preprocessor as preprocessor\n'), ((22196, 22338), 'data_preprocessor.create_dataset', 'preprocessor.create_dataset', (['inference_train_dataset_builder', 'config.inference_batch_size', 'preprocess_fn', 'strategy', 'distributed_inference'], {}), '(inference_train_dataset_builder, config.\n inference_batch_size, preprocess_fn, strategy, distributed_inference)\n', (22223, 22338), True, 'import data_preprocessor as preprocessor\n'), ((22495, 22636), 'data_preprocessor.create_dataset', 'preprocessor.create_dataset', (['inference_test_dataset_builder', 'config.inference_batch_size', 'preprocess_fn', 'strategy', 'distributed_inference'], {}), '(inference_test_dataset_builder, config.\n inference_batch_size, preprocess_fn, strategy, distributed_inference)\n', (22522, 22636), True, 'import data_preprocessor as preprocessor\n'), ((22811, 22861), 'numpy.ones', 'np.ones', (['config.model.vocab_size'], {'dtype': 'np.float32'}), '(config.model.vocab_size, dtype=np.float32)\n', (22818, 22861), True, 'import numpy as np\n'), ((31390, 31406), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (31401, 31406), True, 'import tensorflow as tf\n'), ((31539, 31550), 'time.time', 'time.time', ([], {}), '()\n', (31548, 31550), False, 'import time\n'), ((36937, 36951), 'absl.app.run', 'app.run', (['_main'], {}), '(_main)\n', (36944, 36951), False, 'from absl import app\n'), ((5198, 5229), 'numpy.concatenate', 'np.concatenate', (['dialog_turn_ids'], {}), '(dialog_turn_ids)\n', (5212, 5229), True, 'import numpy as np\n'), ((6315, 6358), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['label_sampling_path', '"""r"""'], {}), "(label_sampling_path, 'r')\n", (6332, 6358), True, 'import tensorflow as tf\n'), ((11075, 11121), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config.vocab_file_path', '"""r"""'], {}), "(config.vocab_file_path, 'r')\n", (11092, 11121), True, 'import tensorflow as tf\n'), ((13433, 13481), 'utils.adjusted_mutual_info', 'utils.adjusted_mutual_info', (['label_id', 'prediction'], {}), '(label_id, prediction)\n', (13459, 13481), False, 'import utils\n'), ((13548, 13590), 'utils.cluster_purity', 'utils.cluster_purity', (['label_id', 'prediction'], {}), '(label_id, prediction)\n', (13568, 13590), False, 'import utils\n'), ((13628, 13662), 'tensorflow.reshape', 'tf.reshape', (['prediction'], {'shape': '[-1]'}), '(prediction, shape=[-1])\n', (13638, 13662), True, 'import tensorflow as tf\n'), ((13744, 13771), 'tensorflow.size', 'tf.size', (['prediction_classes'], {}), '(prediction_classes)\n', (13751, 13771), True, 'import tensorflow as tf\n'), ((13923, 13940), 'tensorflow.sign', 'tf.sign', (['label_id'], {}), '(label_id)\n', (13930, 13940), True, 'import tensorflow as tf\n'), ((14461, 14498), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['inputs', 'padding_mask'], {}), '(inputs, padding_mask)\n', (14476, 14498), True, 'import tensorflow as tf\n'), ((14525, 14562), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['labels', 'padding_mask'], {}), '(labels, padding_mask)\n', (14540, 14562), True, 'import tensorflow as tf\n'), ((15694, 15722), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['file_path'], {}), '(file_path)\n', (15711, 15722), True, 'import tensorflow as tf\n'), ((15758, 15783), 'json.load', 'json.load', (['class_map_file'], {}), '(class_map_file)\n', (15767, 15783), False, 'import json\n'), ((16477, 16509), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (16494, 16509), True, 'import tensorflow as tf\n'), ((16944, 16972), 'os.path.basename', 'os.path.basename', (['output_dir'], {}), '(output_dir)\n', (16960, 16972), False, 'import os\n'), ((16989, 17034), 'os.path.join', 'os.path.join', (['config.model_base_dir', 'dir_name'], {}), '(config.model_base_dir, dir_name)\n', (17001, 17034), False, 'import os\n'), ((17039, 17099), 'absl.logging.info', 'logging.info', (['"""Model outputs will be saved at %s"""', 'model_dir'], {}), "('Model outputs will be saved at %s', model_dir)\n", (17051, 17099), False, 'from absl import logging\n'), ((17104, 17135), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['model_dir'], {}), '(model_dir)\n', (17124, 17135), True, 'import tensorflow as tf\n'), ((17329, 17352), 'absl.logging.info', 'logging.info', (['"""Use GPU"""'], {}), "('Use GPU')\n", (17341, 17352), False, 'from absl import logging\n'), ((17368, 17400), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (17398, 17400), True, 'import tensorflow as tf\n'), ((17440, 17525), 'absl.logging.info', 'logging.info', (['"""Use TPU at %s"""', "(_TPU.value if _TPU.value is not None else 'local')"], {}), "('Use TPU at %s', _TPU.value if _TPU.value is not None else 'local'\n )\n", (17452, 17525), False, 'from absl import logging\n'), ((17553, 17618), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {'tpu': '_TPU.value'}), '(tpu=_TPU.value)\n', (17602, 17618), True, 'import tensorflow as tf\n'), ((17623, 17674), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['resolver'], {}), '(resolver)\n', (17664, 17674), True, 'import tensorflow as tf\n'), ((17679, 17730), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['resolver'], {}), '(resolver)\n', (17720, 17730), True, 'import tensorflow as tf\n'), ((17746, 17781), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['resolver'], {}), '(resolver)\n', (17771, 17781), True, 'import tensorflow as tf\n'), ((19281, 19341), 'data_preprocessor.get_full_dataset_outputs', 'preprocessor.get_full_dataset_outputs', (['train_dataset_builder'], {}), '(train_dataset_builder)\n', (19318, 19341), True, 'import data_preprocessor as preprocessor\n'), ((19888, 19997), 'utils.BertPreprocessor', 'utils.BertPreprocessor', (['config.bert_embedding_preprocess_tfhub_url', 'config.model.vae_cell.max_seq_length'], {}), '(config.bert_embedding_preprocess_tfhub_url, config.\n model.vae_cell.max_seq_length)\n', (19910, 19997), False, 'import utils\n'), ((20944, 21021), 'psl_utils.psl_feature_mixin', 'psl_utils.psl_feature_mixin', (['preprocess_fn', 'config.dataset', 'config.psl', 'vocab'], {}), '(preprocess_fn, config.dataset, config.psl, vocab)\n', (20971, 21021), False, 'import psl_utils\n'), ((21975, 22094), 'data_preprocessor.create_dataset', 'preprocessor.create_dataset', (['dataset_builder', 'config.eval_batch_size', 'preprocess_fn', 'strategy', 'distributed_training'], {}), '(dataset_builder, config.eval_batch_size,\n preprocess_fn, strategy, distributed_training)\n', (22002, 22094), True, 'import data_preprocessor as preprocessor\n'), ((23318, 23363), 'os.path.join', 'os.path.join', (['output_dir', '"""model_config.json"""'], {}), "(output_dir, 'model_config.json')\n", (23330, 23363), False, 'import os\n'), ((23403, 23446), 'linear_vrnn.VanillaLinearVRNN', 'linear_vrnn.VanillaLinearVRNN', (['config.model'], {}), '(config.model)\n', (23432, 23446), False, 'import linear_vrnn\n'), ((23464, 23560), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['config.base_learning_rate'], {'beta_1': '(1.0 - config.one_minus_momentum)'}), '(config.base_learning_rate, beta_1=1.0 - config.\n one_minus_momentum)\n', (23488, 23560), True, 'import tensorflow as tf\n'), ((24209, 24262), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model', 'optimizer': 'optimizer'}), '(model=model, optimizer=optimizer)\n', (24228, 24262), True, 'import tensorflow as tf\n'), ((24288, 24366), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'output_dir', 'max_to_keep': 'None'}), '(checkpoint, directory=output_dir, max_to_keep=None)\n', (24314, 24366), True, 'import tensorflow as tf\n'), ((30479, 30516), 'os.path.join', 'os.path.join', (['output_dir', '"""summaries"""'], {}), "(output_dir, 'summaries')\n", (30491, 30516), False, 'import os\n'), ((31943, 31991), 'absl.logging.info', 'logging.info', (['"""Starting to run epoch: %s"""', 'epoch'], {}), "('Starting to run epoch: %s', epoch)\n", (31955, 31991), False, 'from absl import logging\n'), ((32617, 32638), 'absl.logging.info', 'logging.info', (['message'], {}), '(message)\n', (32629, 32638), False, 'from absl import logging\n'), ((11494, 11516), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (11503, 11516), False, 'import json\n'), ((11567, 11613), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config.psl_config_file', '"""r"""'], {}), "(config.psl_config_file, 'r')\n", (11584, 11613), True, 'import tensorflow as tf\n'), ((15320, 15367), 'utils.create_rebalanced_sample_weights', 'utils.create_rebalanced_sample_weights', (['train_y'], {}), '(train_y)\n', (15358, 15367), False, 'import utils\n'), ((15468, 15514), 'utils.create_rebalanced_sample_weights', 'utils.create_rebalanced_sample_weights', (['test_y'], {}), '(test_y)\n', (15506, 15514), False, 'import utils\n'), ((17159, 17197), 'os.path.join', 'os.path.join', (['model_dir', '"""config.json"""'], {}), "(model_dir, 'config.json')\n", (17171, 17197), False, 'import os\n'), ((17228, 17272), 'os.path.join', 'os.path.join', (['model_dir', '"""model_config.json"""'], {}), "(model_dir, 'model_config.json')\n", (17240, 17272), False, 'import os\n'), ((20344, 20455), 'data_preprocessor.BertDataPreprocessor', 'preprocessor.BertDataPreprocessor', (['bert_preprocess_model', 'config.model.num_states', 'labeled_dialog_turn_ids'], {}), '(bert_preprocess_model, config.model.\n num_states, labeled_dialog_turn_ids)\n', (20377, 20455), True, 'import data_preprocessor as preprocessor\n'), ((20522, 20601), 'data_preprocessor.DataPreprocessor', 'preprocessor.DataPreprocessor', (['config.model.num_states', 'labeled_dialog_turn_ids'], {}), '(config.model.num_states, labeled_dialog_turn_ids)\n', (20551, 20601), True, 'import data_preprocessor as preprocessor\n'), ((20831, 20877), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config.vocab_file_path', '"""r"""'], {}), "(config.vocab_file_path, 'r')\n", (20848, 20877), True, 'import tensorflow as tf\n'), ((23089, 23138), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config.word_weights_path', '"""rb"""'], {}), "(config.word_weights_path, 'rb')\n", (23106, 23138), True, 'import tensorflow as tf\n'), ((23192, 23218), 'numpy.load', 'np.load', (['word_weights_file'], {}), '(word_weights_file)\n', (23199, 23218), True, 'import numpy as np\n'), ((23803, 23935), 'psl_utils.get_psl_model', 'psl_utils.get_psl_model', (['config.dataset', 'config.psl_constraint_rule_names', 'config.psl_constraint_rule_weights'], {'config': 'config.psl'}), '(config.dataset, config.psl_constraint_rule_names,\n config.psl_constraint_rule_weights, config=config.psl)\n', (23826, 23935), False, 'import psl_utils\n'), ((24051, 24147), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['config.base_learning_rate'], {'beta_1': '(1.0 - config.one_minus_momentum)'}), '(config.base_learning_rate, beta_1=1.0 - config.\n one_minus_momentum)\n', (24075, 24147), True, 'import tensorflow as tf\n'), ((24432, 24506), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'model_dir', 'max_to_keep': '(1)'}), '(checkpoint, directory=model_dir, max_to_keep=1)\n', (24458, 24506), True, 'import tensorflow as tf\n'), ((24874, 24974), 'absl.logging.info', 'logging.info', (['"""Loaded checkpoint %s. Initialize from epoch %s"""', 'latest_checkpoint', 'initial_epoch'], {}), "('Loaded checkpoint %s. Initialize from epoch %s',\n latest_checkpoint, initial_epoch)\n", (24886, 24974), False, 'from absl import logging\n'), ((28210, 28822), 'linear_vrnn.compute_loss', 'linear_vrnn.compute_loss', (['input_1[_INPUT_ID_NAME]', 'input_2[_INPUT_ID_NAME]', 'input_1[_INPUT_MASK_NAME]', 'input_2[_INPUT_MASK_NAME]', 'model_outputs'], {'latent_label_id': 'label_id', 'latent_label_mask': 'label_mask', 'word_weights': 'word_weights', 'with_bpr': 'config.with_bpr', 'kl_loss_weight': 'config.kl_loss_weight', 'with_bow': 'config.with_bow', 'bow_loss_weight': 'config.bow_loss_weight', 'num_latent_states': 'num_latent_states', 'classification_loss_weight': 'config.classification_loss_weight', 'psl_constraint_model': 'psl_model', 'psl_inputs': 'psl_inputs', 'psl_constraint_loss_weight': 'config.psl_constraint_inference_weight'}), '(input_1[_INPUT_ID_NAME], input_2[_INPUT_ID_NAME],\n input_1[_INPUT_MASK_NAME], input_2[_INPUT_MASK_NAME], model_outputs,\n latent_label_id=label_id, latent_label_mask=label_mask, word_weights=\n word_weights, with_bpr=config.with_bpr, kl_loss_weight=config.\n kl_loss_weight, with_bow=config.with_bow, bow_loss_weight=config.\n bow_loss_weight, num_latent_states=num_latent_states,\n classification_loss_weight=config.classification_loss_weight,\n psl_constraint_model=psl_model, psl_inputs=psl_inputs,\n psl_constraint_loss_weight=config.psl_constraint_inference_weight)\n', (28234, 28822), False, 'import linear_vrnn\n'), ((30260, 30294), 'linear_vrnn.get_prediction', 'linear_vrnn.get_prediction', (['logits'], {}), '(logits)\n', (30286, 30294), False, 'import linear_vrnn\n'), ((30599, 30635), 'os.path.join', 'os.path.join', (['model_dir', '"""summaries"""'], {}), "(model_dir, 'summaries')\n", (30611, 30635), False, 'import os\n'), ((31683, 31889), 'absl.logging.info', 'logging.info', (['"""Found primary metric %s keeping being worse than the current best %.4f for %s evaluation cycles, early stop at epoch %s"""', '_PRIMARY_METRIC_KEY', 'primary_metric', 'out_of_patience', 'epoch'], {}), "(\n 'Found primary metric %s keeping being worse than the current best %.4f for %s evaluation cycles, early stop at epoch %s'\n , _PRIMARY_METRIC_KEY, primary_metric, out_of_patience, epoch)\n", (31695, 31889), False, 'from absl import logging\n'), ((32028, 32062), 'tensorflow.cast', 'tf.cast', (['steps_per_epoch', 'tf.int32'], {}), '(steps_per_epoch, tf.int32)\n', (32035, 32062), True, 'import tensorflow as tf\n'), ((32186, 32197), 'time.time', 'time.time', ([], {}), '()\n', (32195, 32197), False, 'import time\n'), ((33539, 33597), 'absl.logging.info', 'logging.info', (['"""Evaluating hidden representation learning."""'], {}), "('Evaluating hidden representation learning.')\n", (33551, 33597), False, 'from absl import logging\n'), ((36139, 36173), 'absl.logging.info', 'logging.info', (['"""Saving checkpoint."""'], {}), "('Saving checkpoint.')\n", (36151, 36173), False, 'from absl import logging\n'), ((11328, 11377), 'os.path.join', 'os.path.join', (['config.bert_dir', '"""bert_config.json"""'], {}), "(config.bert_dir, 'bert_config.json')\n", (11340, 11377), False, 'import os\n'), ((25207, 25328), 'bert_utils.load_bert_weight_from_ckpt', 'bert_utils.load_bert_weight_from_ckpt', ([], {'bert_model': 'model.vae_cell.shared_embedding_layer', 'bert_ckpt_dir': 'bert_ckpt_dir'}), '(bert_model=model.vae_cell.\n shared_embedding_layer, bert_ckpt_dir=bert_ckpt_dir)\n', (25244, 25328), False, 'import bert_utils\n'), ((25353, 25409), 'absl.logging.info', 'logging.info', (['"""Loaded BERT checkpoint %s"""', 'bert_ckpt_dir'], {}), "('Loaded BERT checkpoint %s', bert_ckpt_dir)\n", (25365, 25409), False, 'from absl import logging\n'), ((25858, 25946), 'tensorflow.ensure_shape', 'tf.ensure_shape', (['psl_inputs', '(batch_size, psl_inputs.shape[1], psl_inputs.shape[2])'], {}), '(psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.\n shape[2]))\n', (25873, 25946), True, 'import tensorflow as tf\n'), ((26150, 26167), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (26165, 26167), True, 'import tensorflow as tf\n'), ((26322, 26933), 'linear_vrnn.compute_loss', 'linear_vrnn.compute_loss', (['input_1[_INPUT_ID_NAME]', 'input_2[_INPUT_ID_NAME]', 'input_1[_INPUT_MASK_NAME]', 'input_2[_INPUT_MASK_NAME]', 'model_outputs'], {'latent_label_id': 'label_id', 'latent_label_mask': 'label_mask', 'word_weights': 'word_weights', 'with_bpr': 'config.with_bpr', 'kl_loss_weight': 'config.kl_loss_weight', 'with_bow': 'config.with_bow', 'bow_loss_weight': 'config.bow_loss_weight', 'num_latent_states': 'num_latent_states', 'classification_loss_weight': 'config.classification_loss_weight', 'psl_constraint_model': 'psl_model', 'psl_inputs': 'psl_inputs', 'psl_constraint_loss_weight': 'config.psl_constraint_learning_weight'}), '(input_1[_INPUT_ID_NAME], input_2[_INPUT_ID_NAME],\n input_1[_INPUT_MASK_NAME], input_2[_INPUT_MASK_NAME], model_outputs,\n latent_label_id=label_id, latent_label_mask=label_mask, word_weights=\n word_weights, with_bpr=config.with_bpr, kl_loss_weight=config.\n kl_loss_weight, with_bow=config.with_bow, bow_loss_weight=config.\n bow_loss_weight, num_latent_states=num_latent_states,\n classification_loss_weight=config.classification_loss_weight,\n psl_constraint_model=psl_model, psl_inputs=psl_inputs,\n psl_constraint_loss_weight=config.psl_constraint_learning_weight)\n', (26346, 26933), False, 'import linear_vrnn\n'), ((27871, 27959), 'tensorflow.ensure_shape', 'tf.ensure_shape', (['psl_inputs', '(batch_size, psl_inputs.shape[1], psl_inputs.shape[2])'], {}), '(psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.\n shape[2]))\n', (27886, 27959), True, 'import tensorflow as tf\n'), ((29709, 29797), 'tensorflow.ensure_shape', 'tf.ensure_shape', (['psl_inputs', '(batch_size, psl_inputs.shape[1], psl_inputs.shape[2])'], {}), '(psl_inputs, (batch_size, psl_inputs.shape[1], psl_inputs.\n shape[2]))\n', (29724, 29797), True, 'import tensorflow as tf\n'), ((29823, 30018), 'psl_utils.update_logits', 'psl_utils.update_logits', (['model', 'psl_optimizer', 'model_inputs', 'linear_vrnn.get_logits', 'psl_model', 'psl_inputs', 'config.psl_constraint_inference_steps', 'config.psl_constraint_inference_weight'], {}), '(model, psl_optimizer, model_inputs, linear_vrnn.\n get_logits, psl_model, psl_inputs, config.\n psl_constraint_inference_steps, config.psl_constraint_inference_weight)\n', (29846, 30018), False, 'import psl_utils\n'), ((30202, 30239), 'linear_vrnn.get_logits', 'linear_vrnn.get_logits', (['model_outputs'], {}), '(model_outputs)\n', (30224, 30239), False, 'import linear_vrnn\n'), ((32808, 32859), 'absl.logging.info', 'logging.info', (['"""Testing on dataset %s"""', 'dataset_name'], {}), "('Testing on dataset %s', dataset_name)\n", (32820, 32859), False, 'from absl import logging\n'), ((32868, 32924), 'absl.logging.info', 'logging.info', (['"""Starting to run eval at epoch: %s"""', 'epoch'], {}), "('Starting to run eval at epoch: %s', epoch)\n", (32880, 32924), False, 'from absl import logging\n'), ((33128, 33181), 'absl.logging.info', 'logging.info', (['"""Done with testing on %s"""', 'dataset_name'], {}), "('Done with testing on %s', dataset_name)\n", (33140, 33181), False, 'from absl import logging\n'), ((34049, 34099), 'data_utils.get_dataset_num_domains', 'data_utils.get_dataset_num_domains', (['config.dataset'], {}), '(config.dataset)\n', (34083, 34099), False, 'import data_utils\n'), ((19570, 19624), 'os.path.join', 'os.path.join', (['model_dir', '"""labeled_dialog_turn_ids.txt"""'], {}), "(model_dir, 'labeled_dialog_turn_ids.txt')\n", (19582, 19624), False, 'import os\n'), ((33022, 33069), 'tensorflow.cast', 'tf.cast', (['steps_per_eval[dataset_name]', 'tf.int32'], {}), '(steps_per_eval[dataset_name], tf.int32)\n', (33029, 33069), True, 'import tensorflow as tf\n'), ((34978, 35021), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'result'], {'step': 'epoch'}), '(name, result, step=epoch)\n', (34995, 35021), True, 'import tensorflow as tf\n'), ((36795, 36905), 'absl.logging.warn', 'logging.warn', (['"""Error encountered during experiment: %s. Will now try to recover."""', 'err'], {'exc_info': '(True)'}), "(\n 'Error encountered during experiment: %s. Will now try to recover.',\n err, exc_info=True)\n", (36807, 36905), False, 'from absl import logging\n'), ((35722, 35765), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'result'], {'step': 'epoch'}), '(name, result, step=epoch)\n', (35739, 35765), True, 'import tensorflow as tf\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import glob
import random
import numpy as np
import cv2
import uuid
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--output_dir", help="where to p")
parser.add_argument("--img_size", help="")
# export options
a = parser.parse_args()
input_dir=a.input_dir
img_size=int(a.img_size)
mirror_file=None
output_dir=a.output_dir
repeat=1
BATCH_SIZE = 128
def getAffine(From, To):
FromMean = np.mean(From, axis=0)
ToMean = np.mean(To, axis=0)
FromCentralized = From - FromMean
ToCentralized = To - ToMean
FromVector = (FromCentralized).flatten()
ToVector = (ToCentralized).flatten()
DotResult = np.dot(FromVector, ToVector)
NormPow2 = np.linalg.norm(FromCentralized) ** 2
a = DotResult / NormPow2
b = np.sum(np.cross(FromCentralized, ToCentralized)) / NormPow2
R = np.array([[a, b], [-b, a]])
T = ToMean - np.dot(FromMean, R)
return R, T
def _load_data(imagepath, ptspath):
def makerotate(angle):
rad = angle * np.pi / 180.0
return np.array([[np.cos(rad), np.sin(rad)], [-np.sin(rad), np.cos(rad)]], dtype=np.float32)
srcpts = np.genfromtxt(ptspath.decode())#skip_header=0, skip_footer=1
# print(srcpts)
# x, y = np.min(srcpts, axis=0).astype(np.int32)
# w, h = np.ptp(srcpts, axis=0).astype(np.int32)
# print('***************** x,y')
# print(x, y)
# print('***************** w,h')
# print(w, h)
# pts = (srcpts - [x, y]) / [w, h]
# print('***************** pts')
# print(pts)
img = cv2.imread(imagepath.decode(), cv2.IMREAD_UNCHANGED)
try:
height, width, channels =img.shape
except:
height, width=img.shape
pts = srcpts/height
pts = pts * img_size
R, T = getAffine(srcpts, pts)
M = np.zeros((2, 3), dtype=np.float32)
M[0:2, 0:2] = R.T
M[:, 2] = T
img = cv2.warpAffine(img, M, (img_size, img_size))
_,filename = os.path.split(imagepath.decode())
filename,_ = os.path.splitext(filename)
uid = str(uuid.uuid1())
cv2.imwrite(os.path.join(output_dir,filename + '_pred.png'),img)
np.savetxt(os.path.join(output_dir,filename + '_pred.pts'), pts ,delimiter=', ',fmt='%.1f')
return img,pts.astype(np.float32)
def _input_fn(img, pts):
dataset_image = tf.data.Dataset.from_tensor_slices(img)
dataset_pts = tf.data.Dataset.from_tensor_slices(pts)
dataset = tf.data.Dataset.zip((dataset_image, dataset_pts))
dataset = dataset.prefetch(BATCH_SIZE)
dataset = dataset.repeat(repeat)
dataset = dataset.map(lambda imagepath, ptspath: tuple(tf.py_func(_load_data, [
imagepath, ptspath], [tf.uint8,tf.float32])), num_parallel_calls=8)
dataset = dataset.prefetch(1)
return dataset
def _get_filenames(data_dir, listext):
imagelist = []
for ext in listext:
p = os.path.join(data_dir, ext)
imagelist.extend(glob.glob(p))
ptslist = []
for image in imagelist:
ptslist.append(os.path.splitext(image)[0] + ".pts")
return imagelist, ptslist
def main(argv):
imagenames, ptsnames = _get_filenames(input_dir, ["*.jpg", "*.png"])
print('***********************************')
print(input_dir)
print('***********************************')
#print(imagenames)
mirror_array = np.genfromtxt(mirror_file, dtype=int, delimiter=',') if mirror_file else np.zeros(1)
dataset = _input_fn(imagenames,ptsnames)
print('***********************************')
print(dataset)
next_element = dataset.make_one_shot_iterator().get_next()
print('***********************************')
print(dataset)
img_list = []
pts_list = []
with tf.Session() as sess:
count = 0
while True:
try:
img,pts = sess.run(next_element)
img_list.append(img)
pts_list.append(pts)
except tf.errors.OutOfRangeError:
print("end")
break
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
print(sys.argv)
tf.app.run(argv=sys.argv) | [
"argparse.ArgumentParser",
"tensorflow.logging.set_verbosity",
"cv2.warpAffine",
"numpy.mean",
"numpy.linalg.norm",
"numpy.sin",
"glob.glob",
"os.path.join",
"numpy.genfromtxt",
"tensorflow.app.run",
"numpy.cross",
"tensorflow.Session",
"uuid.uuid1",
"tensorflow.data.Dataset.zip",
"numpy... | [((263, 288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (286, 288), False, 'import argparse\n'), ((665, 686), 'numpy.mean', 'np.mean', (['From'], {'axis': '(0)'}), '(From, axis=0)\n', (672, 686), True, 'import numpy as np\n'), ((700, 719), 'numpy.mean', 'np.mean', (['To'], {'axis': '(0)'}), '(To, axis=0)\n', (707, 719), True, 'import numpy as np\n'), ((895, 923), 'numpy.dot', 'np.dot', (['FromVector', 'ToVector'], {}), '(FromVector, ToVector)\n', (901, 923), True, 'import numpy as np\n'), ((1083, 1110), 'numpy.array', 'np.array', (['[[a, b], [-b, a]]'], {}), '([[a, b], [-b, a]])\n', (1091, 1110), True, 'import numpy as np\n'), ((2028, 2062), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'np.float32'}), '((2, 3), dtype=np.float32)\n', (2036, 2062), True, 'import numpy as np\n'), ((2111, 2155), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(img_size, img_size)'], {}), '(img, M, (img_size, img_size))\n', (2125, 2155), False, 'import cv2\n'), ((2226, 2252), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2242, 2252), False, 'import os\n'), ((2534, 2573), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['img'], {}), '(img)\n', (2568, 2573), True, 'import tensorflow as tf\n'), ((2592, 2631), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['pts'], {}), '(pts)\n', (2626, 2631), True, 'import tensorflow as tf\n'), ((2646, 2695), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(dataset_image, dataset_pts)'], {}), '((dataset_image, dataset_pts))\n', (2665, 2695), True, 'import tensorflow as tf\n'), ((4313, 4354), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4337, 4354), True, 'import tensorflow as tf\n'), ((4379, 4404), 'tensorflow.app.run', 'tf.app.run', ([], {'argv': 'sys.argv'}), '(argv=sys.argv)\n', (4389, 4404), True, 'import tensorflow as tf\n'), ((939, 970), 'numpy.linalg.norm', 'np.linalg.norm', (['FromCentralized'], {}), '(FromCentralized)\n', (953, 970), True, 'import numpy as np\n'), ((1128, 1147), 'numpy.dot', 'np.dot', (['FromMean', 'R'], {}), '(FromMean, R)\n', (1134, 1147), True, 'import numpy as np\n'), ((2268, 2280), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (2278, 2280), False, 'import uuid\n'), ((2300, 2348), 'os.path.join', 'os.path.join', (['output_dir', "(filename + '_pred.png')"], {}), "(output_dir, filename + '_pred.png')\n", (2312, 2348), False, 'import os\n'), ((2368, 2416), 'os.path.join', 'os.path.join', (['output_dir', "(filename + '_pred.pts')"], {}), "(output_dir, filename + '_pred.pts')\n", (2380, 2416), False, 'import os\n'), ((3125, 3152), 'os.path.join', 'os.path.join', (['data_dir', 'ext'], {}), '(data_dir, ext)\n', (3137, 3152), False, 'import os\n'), ((3580, 3632), 'numpy.genfromtxt', 'np.genfromtxt', (['mirror_file'], {'dtype': 'int', 'delimiter': '""","""'}), "(mirror_file, dtype=int, delimiter=',')\n", (3593, 3632), True, 'import numpy as np\n'), ((3653, 3664), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3661, 3664), True, 'import numpy as np\n'), ((3965, 3977), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3975, 3977), True, 'import tensorflow as tf\n'), ((1021, 1061), 'numpy.cross', 'np.cross', (['FromCentralized', 'ToCentralized'], {}), '(FromCentralized, ToCentralized)\n', (1029, 1061), True, 'import numpy as np\n'), ((3178, 3190), 'glob.glob', 'glob.glob', (['p'], {}), '(p)\n', (3187, 3190), False, 'import glob\n'), ((2836, 2904), 'tensorflow.py_func', 'tf.py_func', (['_load_data', '[imagepath, ptspath]', '[tf.uint8, tf.float32]'], {}), '(_load_data, [imagepath, ptspath], [tf.uint8, tf.float32])\n', (2846, 2904), True, 'import tensorflow as tf\n'), ((1292, 1303), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (1298, 1303), True, 'import numpy as np\n'), ((1305, 1316), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (1311, 1316), True, 'import numpy as np\n'), ((1334, 1345), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (1340, 1345), True, 'import numpy as np\n'), ((3261, 3284), 'os.path.splitext', 'os.path.splitext', (['image'], {}), '(image)\n', (3277, 3284), False, 'import os\n'), ((1321, 1332), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (1327, 1332), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@authors: PIE n°7 group - ISAE Supaero - 2020/2021
Description : This script intends to provide the current aerodynamic coefficients
of the plane, which depends on the current state vector of the plane, the pilot
commands, and some intrinsic parameters of the plane.
Source : JSBSim
"""
import numpy as np
def plane_data_fct(plane_position, plane_orientation,
plane_speed, plane_angular_speed,atmospheric_parameters,plane_intrinsic_data,
wind, pilot_data, plane_TAS_before_update, plane_TAS_vector_before_update,
alpha, beta,plane_mach):
"""
Inputs: -plane_position: vector 3*1 [x, y, z]'
-plane_orientation: vector 3*1
-plane_speed: vector 3*1 [vx, vy, vz]'
-plane_angular_speed: vector 3*1
-atmospheric_parameters
-plane_intrinsic_data: vectors babsed on aircraft file
-wind: vector 3*1 [vx, vy, vz]'
-pilot_data: vector 4*1 [Throttle, rudder, aileron, elevator]'
-true air speed : vector and module
-aerodynamic angles : alpha and beta
-Mach
Outputs:
-Lift coefficient: CL
-Drag coefficient: CD
-Side force coefficient: CY
-Pitching moment: Cl
-Rolling moment: Cm
-Yawing moment: Cn
-thrust
"""
d2r = np.pi / 180
r2d = 1 / d2r
# Speed of the airplane in the air
v_air_mod = plane_TAS_before_update
# Define coefficients of multiplication for contribution of rotations
b2v = plane_intrinsic_data['span'] / (2 * v_air_mod)
c_bar2v = plane_intrinsic_data['chord'] / (2 * v_air_mod)
# Define control parameters angle
q = plane_angular_speed[2] # pitch rate angular speed
p = plane_angular_speed[1] # roll rate angular speed
r = plane_angular_speed[0] # yaw rate angular speed
# Absolute pilot commands
de = pilot_data[3] / 10 * plane_intrinsic_data['de_max'] * d2r
da = pilot_data[2] / 10 * plane_intrinsic_data['da_max'] * d2r
dr = pilot_data[1] / 10 * plane_intrinsic_data['dr_max'] * d2r
dthrust=pilot_data[0]/10
#Prandtl factor for mach effect
prandtl=1/np.sqrt(1-plane_mach**2)
### Force coefficients
# Lift coefficient
cL = (plane_intrinsic_data['CL_0'] + plane_intrinsic_data['CL_a'] * alpha + plane_intrinsic_data['CL_q'] * q * c_bar2v + plane_intrinsic_data['CL_de'] * de)*prandtl
# Drag coefficient
cd = (plane_intrinsic_data['CD_0'] + plane_intrinsic_data['induced_drag_factor'] * cL ** 2 + plane_intrinsic_data['CD_de'] * de)*prandtl
# Side force coefficient
cy = (plane_intrinsic_data['CY_0'] + plane_intrinsic_data['CY_beta'] * beta + (plane_intrinsic_data['CY_p'] * p + plane_intrinsic_data['CY_r'] * r) * b2v + plane_intrinsic_data['CY_dr'] * dr)*prandtl
# Moment coefficients
# Pitching moment
cm = (plane_intrinsic_data['Cm_0'] + plane_intrinsic_data['Cm_a'] * alpha + plane_intrinsic_data['Cm_q'] * c_bar2v * q + plane_intrinsic_data['Cm_de'] * de)*prandtl
# Rolling moment
cl = (plane_intrinsic_data['Cl_0'] + plane_intrinsic_data['Cl_da'] * da + plane_intrinsic_data['Cl_beta'] * beta + (
plane_intrinsic_data['Cl_r'] * r + plane_intrinsic_data['Cl_p'] * p) * b2v * plane_intrinsic_data['Cl_dr'] * dr)*prandtl
# Yawing moment
cn = (plane_intrinsic_data['Cn_0'] + plane_intrinsic_data['Cn_beta'] * beta + (plane_intrinsic_data['Cn_p'] * p + plane_intrinsic_data['Cn_r'] * r) * b2v +
plane_intrinsic_data['Cn_da'] * da + plane_intrinsic_data['Cn_dr'] * dr)*prandtl
### Thrust
air_density=atmospheric_parameters[4]
thrust=dthrust*plane_intrinsic_data['static_thrust']*(air_density/1.225)*(1-np.exp((plane_position[2]-18000)/2000))
return [cL, cd, cy, cl, cm, cn, thrust]
| [
"numpy.exp",
"numpy.sqrt"
] | [((2363, 2391), 'numpy.sqrt', 'np.sqrt', (['(1 - plane_mach ** 2)'], {}), '(1 - plane_mach ** 2)\n', (2370, 2391), True, 'import numpy as np\n'), ((3922, 3964), 'numpy.exp', 'np.exp', (['((plane_position[2] - 18000) / 2000)'], {}), '((plane_position[2] - 18000) / 2000)\n', (3928, 3964), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.