code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from scipy.sparse import data
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import pandas as pd
from sklearn.linear_model import LinearRegression
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
# NOTE: this model does not work because it does not take into accont the time series
class model:
"""
This class will use multiple linear regression technique to predict the price of a stock company in the near future
"""
def __init__(self, ticker):
"""
initialize the data by filtering the news based on the ticker
:param ticker: the ticker of the company
:type ticker: str
"""
self.ticker = ticker
self.all_data = pd.read_csv("csv_data/reuters_news.csv", low_memory=False)
print(self.all_data)
self.data = self.all_data.loc[self.all_data["ticker"] == self.ticker]
self.data = self.data.dropna(axis=0)
# TODO: test if there is an information about the ticker (if self.data is empty)
def _setup_data(self):
# separate the given data from the target
x = self.data[["polarity", "subjectivity"]]
y = self.data["prices"]
# split the data
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
return {
"x_train": x_train,
"x_test": x_test,
"y_train": y_train,
"y_test": y_test,
}
def predict(self):
# get the split data
data = self._setup_data()
# define the model
linear_regression = LinearRegression()
# fit the model
linear_regression.fit(data["x_train"], data["y_train"])
# predict
y_prediction = linear_regression.predict(data["x_test"])
# calculate the accuracy of the prediction
score = r2_score(data["y_test"], y_prediction)
mean_sqrd_error = mean_squared_error(data["y_test"], y_prediction)
root_mean_sqrd_error = np.sqrt(mean_sqrd_error)
return {
"y_predict": y_prediction,
"score": score,
"mean_sqrd_error": mean_sqrd_error,
"root_mean_sqrd_error": root_mean_sqrd_error,
}
def show_word_cloud(self, title=None):
data = self.data["description"]
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color="white",
stopwords=stopwords,
max_words=200000,
max_font_size=40,
scale=3,
random_state=1,
).generate(str(data))
fig = plt.figure(1, figsize=(6, 6))
plt.axis("off")
if title:
fig.suptitle(title, fontsize=20)
fig.subplots_adjust(top=2.3)
plt.imshow(wordcloud) # type:ignore
plt.show()
if __name__ == "__main__":
p = model("AAPL")
print(p.predict()["score"])
| [
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"wordcloud.WordCloud",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression",
"matplo... | [((799, 857), 'pandas.read_csv', 'pd.read_csv', (['"""csv_data/reuters_news.csv"""'], {'low_memory': '(False)'}), "('csv_data/reuters_news.csv', low_memory=False)\n", (810, 857), True, 'import pandas as pd\n'), ((1332, 1386), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(x, y, test_size=0.2, random_state=42)\n', (1348, 1386), False, 'from sklearn.model_selection import train_test_split\n'), ((1704, 1722), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1720, 1722), False, 'from sklearn.linear_model import LinearRegression\n'), ((1964, 2002), 'sklearn.metrics.r2_score', 'r2_score', (["data['y_test']", 'y_prediction'], {}), "(data['y_test'], y_prediction)\n", (1972, 2002), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((2029, 2077), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (["data['y_test']", 'y_prediction'], {}), "(data['y_test'], y_prediction)\n", (2047, 2077), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((2109, 2133), 'numpy.sqrt', 'np.sqrt', (['mean_sqrd_error'], {}), '(mean_sqrd_error)\n', (2116, 2133), True, 'import numpy as np\n'), ((2710, 2739), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6, 6)'}), '(1, figsize=(6, 6))\n', (2720, 2739), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2763), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2756, 2763), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {}), '(wordcloud)\n', (2887, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2930, 2932), True, 'import matplotlib.pyplot as plt\n'), ((2474, 2595), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'stopwords': 'stopwords', 'max_words': '(200000)', 'max_font_size': '(40)', 'scale': '(3)', 'random_state': '(1)'}), "(background_color='white', stopwords=stopwords, max_words=200000,\n max_font_size=40, scale=3, random_state=1)\n", (2483, 2595), False, 'from wordcloud import WordCloud, STOPWORDS\n')] |
'''
gui_multilabel.py
Copyright @ 2018 Jiaoyan<<EMAIL>>, Ziqi<<EMAIL>>, Han <<EMAIL>>
License: MIT
'''
from tkinter import *
from control import *
import pandas as pd
import numpy as np
from tkinter import messagebox
from Model import *
def show_result():
BENE_SEX_IDENT_CD_1, BENE_SEX_IDENT_CD_2 = set_gender(gender.get())
BENE_RACE_CD_1,BENE_RACE_CD_2,BENE_RACE_CD_3,BENE_RACE_CD_5 = set_race(race.get())
BENE_ESRD_IND_0,BENE_ESRD_IND_Y = set_ESRD(ESRD.get())
statelist = set_state(state.get())
ALZHDMTA = set_disease(SP_ALZHDMTA.get())
CHF = set_disease(SP_CHF.get())
CHRNKIDN = set_disease(SP_CHRNKIDN.get())
CNCR = set_disease(SP_CNCR.get())
COPD = set_disease(SP_COPD.get())
DEPRESSN = set_disease(SP_DEPRESSN.get())
DIABETES = set_disease(SP_DIABETES.get())
ISCHMCHT = set_disease(SP_ISCHMCHT.get())
OSTEOPRS = set_disease(SP_OSTEOPRS.get())
RA_OA = set_disease(SP_RA_OA.get())
STRKETIA = set_disease(SP_STRKETIA.get())
SP_STATE_CODE_1 = statelist[0]
SP_STATE_CODE_2 = statelist[1]
SP_STATE_CODE_3 = statelist[2]
SP_STATE_CODE_4 = statelist[3]
SP_STATE_CODE_5 = statelist[4]
SP_STATE_CODE_6 = statelist[5]
SP_STATE_CODE_7 = statelist[6]
SP_STATE_CODE_8 = statelist[7]
SP_STATE_CODE_9 = statelist[8]
SP_STATE_CODE_10 = statelist[9]
SP_STATE_CODE_11 = statelist[10]
SP_STATE_CODE_12 = statelist[11]
SP_STATE_CODE_13 = statelist[12]
SP_STATE_CODE_14 = statelist[13]
SP_STATE_CODE_15 = statelist[14]
SP_STATE_CODE_16 = statelist[15]
SP_STATE_CODE_17 = statelist[16]
SP_STATE_CODE_18 = statelist[17]
SP_STATE_CODE_19 = statelist[18]
SP_STATE_CODE_20 = statelist[19]
SP_STATE_CODE_21 = statelist[20]
SP_STATE_CODE_22 = statelist[21]
SP_STATE_CODE_23 = statelist[22]
SP_STATE_CODE_24 = statelist[23]
SP_STATE_CODE_25 = statelist[24]
SP_STATE_CODE_26 = statelist[25]
SP_STATE_CODE_27 = statelist[26]
SP_STATE_CODE_28 = statelist[27]
SP_STATE_CODE_29 = statelist[28]
SP_STATE_CODE_30 = statelist[29]
SP_STATE_CODE_31 = statelist[30]
SP_STATE_CODE_32 = statelist[31]
SP_STATE_CODE_33 = statelist[32]
SP_STATE_CODE_34 = statelist[33]
SP_STATE_CODE_35 = statelist[34]
SP_STATE_CODE_36 = statelist[35]
SP_STATE_CODE_37 = statelist[36]
SP_STATE_CODE_38 = statelist[37]
SP_STATE_CODE_39 = statelist[38]
SP_STATE_CODE_41 = statelist[39]
SP_STATE_CODE_42 = statelist[40]
SP_STATE_CODE_43 = statelist[41]
SP_STATE_CODE_44 = statelist[42]
SP_STATE_CODE_45 = statelist[43]
SP_STATE_CODE_46 = statelist[44]
SP_STATE_CODE_47 = statelist[45]
SP_STATE_CODE_49 = statelist[46]
SP_STATE_CODE_50 = statelist[47]
SP_STATE_CODE_51 = statelist[48]
SP_STATE_CODE_52 = statelist[49]
SP_STATE_CODE_53 = statelist[50]
SP_STATE_CODE_54 = statelist[51]
test_X = [visiting_time_before.get(),ALZHDMTA,CHF,CHRNKIDN,CNCR,COPD, DEPRESSN,DIABETES,ISCHMCHT,OSTEOPRS,RA_OA,STRKETIA, \
AGE.get(), BENE_SEX_IDENT_CD_1,BENE_SEX_IDENT_CD_2,BENE_RACE_CD_1,BENE_RACE_CD_2,BENE_RACE_CD_3,BENE_RACE_CD_5,BENE_ESRD_IND_0,BENE_ESRD_IND_Y,\
SP_STATE_CODE_1,SP_STATE_CODE_2,SP_STATE_CODE_3,SP_STATE_CODE_4,SP_STATE_CODE_5,SP_STATE_CODE_6,\
SP_STATE_CODE_7,SP_STATE_CODE_8,SP_STATE_CODE_9,SP_STATE_CODE_10,SP_STATE_CODE_11,SP_STATE_CODE_12,\
SP_STATE_CODE_13,SP_STATE_CODE_14,SP_STATE_CODE_15,SP_STATE_CODE_16,SP_STATE_CODE_17,SP_STATE_CODE_18,\
SP_STATE_CODE_19,SP_STATE_CODE_20,SP_STATE_CODE_21,SP_STATE_CODE_22,SP_STATE_CODE_23,SP_STATE_CODE_24,\
SP_STATE_CODE_25,SP_STATE_CODE_26,SP_STATE_CODE_27,SP_STATE_CODE_28,SP_STATE_CODE_29,SP_STATE_CODE_30,\
SP_STATE_CODE_31,SP_STATE_CODE_32,SP_STATE_CODE_33,SP_STATE_CODE_34,SP_STATE_CODE_35,SP_STATE_CODE_36,\
SP_STATE_CODE_37,SP_STATE_CODE_38,SP_STATE_CODE_39,SP_STATE_CODE_41,SP_STATE_CODE_42,SP_STATE_CODE_43,\
SP_STATE_CODE_44,SP_STATE_CODE_45,SP_STATE_CODE_46,SP_STATE_CODE_47,SP_STATE_CODE_49,SP_STATE_CODE_50,\
SP_STATE_CODE_51,SP_STATE_CODE_52,SP_STATE_CODE_53,SP_STATE_CODE_54]
test_X = np.array(test_X).reshape(1,-1)
result = myControl.prediction(test_X)
messagebox.showinfo("Diagnose",result)
def set_gender(gender):
if gender == "male":
return 1,0
else:
return 0,1
def set_race(race):
if race == "American Indian or Alaska Native":
return 1,0,0,0
elif race == "Asian":
return 0,1,0,0
elif race == "Black or African American":
return 0,0,1,0
elif race == "White":
return 0,0,0,1
def set_ESRD(ESRD):
if ESRD == 0:
return 1,0
else:
return 0,1
def set_state(state):
state_map = {"AK" : 0,"AL" : 1,"AR": 2,"AZ" : 3,"CA" : 4,"CO" : 5,"CT" : 6,"DE" : 7,"FL" : 8,\
"GA" : 9,"GU" : 10,"HI" : 11,"IA" : 12,"ID" : 13,"IL" : 14,"IN" : 15,"KS" : 16,"KY" : 17,\
"LA" : 18,"MA" : 19,"MD" : 20,"ME" : 21,"MI" : 22,"MN" : 23,"MO" : 24,"MP" : 25,"MS" : 26,"MT" : 27,\
"NC" : 28,"ND" : 29,"NE" : 30,"NH" : 31,"NJ" : 32,"NM": 33,"NV" :34,"NY" : 35, "OH" : 36,"OK" : 37,\
"OR" : 38,"PA" : 39,"PR": 40,"RI" : 41,"SC" : 42,"SD" : 43,"TN" : 44,"TX" : 45,"UT" : 45,"VA" : 46,\
"VI" : 47,"VT" : 48,"WA": 49,"WI": 50,"WV" : 51,"WY" : 52}
statelist = list(0 for i in range(52))
statelist[state_map[state]] = 1
return statelist
def set_disease(sp):
if sp == 0:
return 1
else:
return 2
data = np.load('FC_data.npy')
myControl = control(data)
master = Tk()
master.title("Prediction of potential disease")
Label(master, text="Personal Information").grid(row=0,sticky=W)
Label(master, text="First Name").grid(row=1,sticky=W)
firstname = Entry(master).grid(row=1)
Label(master, text="Last Name").grid(row=2, sticky=W)
lastname = Entry(master).grid(row=2)
Label(master, text="Age").grid(row=3, sticky=W)
AGE = Entry(master)
AGE.grid(row=3)
Label(master, text="Race").grid(row=4, sticky=W)
race = StringVar(master)
race.set("American Indian or Alaska Native") # default value
OptionMenu(master,race, "American Indian or Alaska Native", "Asian","Black or African American", \
"Native Hawaiian or Other Pacific Islander","White").grid(row = 4)
Label(master, text="Gender").grid(row=5, sticky=W)
gender = StringVar(master)
gender.set("male") # default value
OptionMenu(master,gender, "male", "female").grid(row = 5)
Label(master, text="State").grid(row=6, sticky=W)
state = StringVar(master)
state.set("AK") # default value
OptionMenu(master,state, \
"AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","GU","HI","IA","ID","IL","IN","KS","KY", \
"LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY",\
"OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VI","VT","WA","WI","WV","WY").grid(row = 6)
Label(master, text="How many claims have you made in this year? ").grid(row=7, sticky=W)
visiting_time_before = Entry(master)
visiting_time_before.grid(row=8, sticky=W)
Label(master, text="Check all the symptoms that you're currently experiencing:").grid(row=13, sticky=W)
SP_ALZHDMTA = IntVar()
Checkbutton(master, text="Alzheimer or related disorders or senile", variable=SP_ALZHDMTA).grid(row=14,sticky=W)
SP_CHF = IntVar()
Checkbutton(master, text="Heart Failure", variable=SP_CHF).grid(row=15, sticky=W)
SP_CHRNKIDN = IntVar()
Checkbutton(master, text="Chronic Kidney Disease", variable=SP_CHRNKIDN).grid(row=16, sticky=W)
SP_CNCR = IntVar()
Checkbutton(master, text="Cancer", variable=SP_CNCR).grid(row=17, sticky=W)
SP_COPD = IntVar()
Checkbutton(master, text="Chronic Obstructive Pulmonary Disease", variable=SP_COPD).grid(row=18, sticky=W)
SP_DEPRESSN = IntVar()
Checkbutton(master, text="Depression", variable=SP_DEPRESSN).grid(row=19, sticky=W)
SP_DIABETES = IntVar()
Checkbutton(master, text="Diabetes", variable=SP_DIABETES).grid(row=20, sticky=W)
SP_ISCHMCHT = IntVar()
Checkbutton(master, text="Ischemic Heart Disease", variable=SP_ISCHMCHT).grid(row=21, sticky=W)
SP_OSTEOPRS = IntVar()
Checkbutton(master, text="Osteoporosis", variable=SP_OSTEOPRS).grid(row=22, sticky=W)
SP_RA_OA = IntVar()
Checkbutton(master, text="Rheumatoid arthritis and osteoarthritis (RA/OA)", variable=SP_RA_OA).grid(row=23, sticky=W)
SP_STRKETIA = IntVar()
Checkbutton(master, text="Stroke/transient Ischemic Attack", variable=SP_STRKETIA).grid(row=24, sticky=W)
ESRD = IntVar()
Checkbutton(master, text="End stage renal disease", variable=ESRD).grid(row=25, sticky=W)
Button(master, text='submit', command=show_result).grid(row=26, sticky=W, pady=4)
Button(master, text='quit', command=master.quit).grid(row=26, pady=4)
mainloop( ) | [
"numpy.array",
"numpy.load",
"tkinter.messagebox.showinfo"
] | [((5122, 5144), 'numpy.load', 'np.load', (['"""FC_data.npy"""'], {}), "('FC_data.npy')\n", (5129, 5144), True, 'import numpy as np\n'), ((3964, 4003), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Diagnose"""', 'result'], {}), "('Diagnose', result)\n", (3983, 4003), False, 'from tkinter import messagebox\n'), ((3893, 3909), 'numpy.array', 'np.array', (['test_X'], {}), '(test_X)\n', (3901, 3909), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import os.path
import numpy as np
from skimage import draw
import cv2
from scipy.ndimage import filters
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import to_xml, AlternativeImageType
from ocrd import Processor
from ocrd_utils import (
getLogger,
concat_padded,
coordinates_of_segment,
coordinates_for_segment,
bbox_from_polygon,
points_from_polygon,
xywh_from_points,
MIMETYPE_PAGE
)
from .. import get_ocrd_tool
from . import common
from .ocrolib import midrange
from .common import (
pil2array, array2pil,
# binarize,
compute_line_labels
#borderclean_bin
)
TOOL = 'ocrd-cis-ocropy-resegment'
LOG = getLogger('processor.OcropyResegment')
FALLBACK_FILEGRP_IMG = 'OCR-D-IMG-RESEG'
def resegment(line_polygon, region_labels, region_bin, line_id,
extend_margins=3,
threshold_relative=0.8, threshold_absolute=50):
"""Reduce line polygon in a labelled region to the largest intersection.
Given a Numpy array ``line_polygon`` of relative coordinates
in a region given by a Numpy array ``region_labels`` of numbered
segments and a Numpy array ``region_bin`` of foreground pixels,
find the label of the largest segment that intersects the polygon.
If the number of foreground pixels within that segment is larger
than ``threshold_absolute`` and if the share of foreground pixels
within the whole polygon is larger than ``threshold_relative``,
then compute the contour of that intersection and return it
as a new polygon. Otherwise, return None.
If ``extend_margins`` is larger than zero, then extend ``line_polygon``
by that amount of pixels horizontally and vertically before.
"""
height, width = region_labels.shape
# mask from line polygon:
line_mask = np.zeros_like(region_labels)
line_mask[draw.polygon(line_polygon[:,1], line_polygon[:,0], line_mask.shape)] = 1
line_mask[draw.polygon_perimeter(line_polygon[:,1], line_polygon[:,0], line_mask.shape)] = 1
#DSAVE('line %s mask' % line_id, line_mask + 0.5 * region_bin)
# pad line polygon (extend the mask):
line_mask = filters.maximum_filter(line_mask, 1 + 2 * extend_margins)
# intersect with region labels
line_labels = region_labels * line_mask
if not np.count_nonzero(line_labels):
LOG.warning('Label mask is empty for line "%s"', line_id)
return None
# find the mask of the largest label (in the foreground):
total_count = np.sum(region_bin * line_mask)
line_labels_fg = region_bin * line_labels
if not np.count_nonzero(line_labels_fg):
LOG.warning('No foreground pixels within line mask for line "%s"', line_id)
return None
label_counts = np.bincount(line_labels_fg.flat)
max_label = np.argmax(label_counts[1:]) + 1
max_count = label_counts[max_label]
if (max_count < threshold_absolute and
max_count / total_count < threshold_relative):
LOG.info('Largest label (%d) is too small (%d/%d) in line "%s"',
max_label, max_count, total_count, line_id)
return None
LOG.debug('Black pixels before/after resegment of line "%s" (nlabels=%d): %d/%d',
line_id, len(label_counts.nonzero()[0]), total_count, max_count)
line_mask = np.array(line_labels == max_label, np.uint8)
#DSAVE('line %s mask tight' % line_id, line_mask + 0.5 * region_bin)
# find outer contour (parts):
contours, _ = cv2.findContours(line_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# determine largest part by area:
contour_areas = [cv2.contourArea(contour) for contour in contours]
max_contour = np.argmax(contour_areas)
max_area = contour_areas[max_contour]
total_area = cv2.contourArea(np.expand_dims(line_polygon, 1))
if max_area / total_area < 0.5 * threshold_relative:
# using a different, more conservative threshold here:
# avoid being overly strict with cropping background,
# just ensure the contours are not a split of the mask
LOG.info('Largest label (%d) largest contour (%d) is too small (%d/%d) in line "%s"',
max_label, max_contour, max_area, total_area, line_id)
return None
contour = contours[max_contour]
# simplify shape:
line_polygon = cv2.approxPolyDP(contour, 2, False)[:, 0, ::] # already ordered x,y
if len(line_polygon) < 4:
LOG.warning('found no contour of >=4 points for line "%s"', line_id)
return None
return line_polygon
class OcropyResegment(Processor):
def __init__(self, *args, **kwargs):
self.ocrd_tool = get_ocrd_tool()
kwargs['ocrd_tool'] = self.ocrd_tool['tools'][TOOL]
kwargs['version'] = self.ocrd_tool['version']
super(OcropyResegment, self).__init__(*args, **kwargs)
if hasattr(self, 'output_file_grp'):
try:
self.page_grp, self.image_grp = self.output_file_grp.split(',')
except ValueError:
self.page_grp = self.output_file_grp
self.image_grp = FALLBACK_FILEGRP_IMG
LOG.info("No output file group for images specified, falling back to '%s'", FALLBACK_FILEGRP_IMG)
def process(self):
"""Resegment lines of the workspace.
Open and deserialise PAGE input files and their respective images,
then iterate over the element hierarchy down to the line level.
Next, get each region image according to the layout annotation (from
the alternative image of the region, or by cropping via coordinates
into the higher-level image), binarize it (without deskewing), and
compute a new line segmentation from that (as a label mask).
Then for each line within the region, find the label with the largest
foreground area in the binarized image within the annotated polygon
(or rectangle) of the line. Unless its relative area is too small,
or its center is far off, convert that label's mask into a polygon
outline, intersect with the old polygon, and find the contour of that
segment. Annotate the result as new coordinates of the line.
Add a new image file to the workspace with the fileGrp USE given
in the second position of the output fileGrp, or ``OCR-D-IMG-RESEG``,
and an ID based on input file and input element.
Produce a new output file by serialising the resulting hierarchy.
"""
# This makes best sense for bad/coarse segmentation, like current GT.
# Most notably, it can convert rectangles to polygons. It depends on
# a decent line segmentation from ocropy though. So it _should_ ideally
# be run after deskewing (on the page or region level), and preferably
# after binarization (on page or region level), because segmentation of
# both a skewed image or of implicit binarization could be suboptimal,
# and the explicit binarization after resegmentation could be, too.
threshold = self.parameter['min_fraction']
margin = self.parameter['extend_margins']
for (n, input_file) in enumerate(self.input_files):
LOG.info("INPUT FILE %i / %s", n, input_file.pageId or input_file.ID)
file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)
if file_id == input_file.ID:
file_id = concat_padded(self.image_grp, n)
pcgts = page_from_file(self.workspace.download_file(input_file))
page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)
page = pcgts.get_Page()
page_image, page_xywh, page_image_info = self.workspace.image_from_page(
page, page_id)
if page_image_info.resolution != 1:
dpi = page_image_info.resolution
if page_image_info.resolutionUnit == 'cm':
dpi = round(dpi * 2.54)
LOG.info('Page "%s" uses %d DPI', page_id, dpi)
zoom = 300.0/dpi
else:
zoom = 1
regions = page.get_TextRegion()
if not regions:
LOG.warning('Page "%s" contains no text regions', page_id)
for region in regions:
lines = region.get_TextLine()
if not lines:
LOG.warning('Page "%s" region "%s" contains no text lines', page_id, region.id)
continue
if len(lines) == 1:
LOG.warning('Page "%s" region "%s" contains only one line', page_id, region.id)
continue
region_image, region_xywh = self.workspace.image_from_segment(
region, page_image, page_xywh)
# ad-hoc binarization:
region_array = pil2array(region_image)
region_array, _ = common.binarize(region_array, maxskew=0) # just in case still raw
region_bin = np.array(region_array <= midrange(region_array), np.uint8)
try:
region_labels, _, _, _ = compute_line_labels(region_array, zoom=zoom)
except Exception as err:
LOG.warning('Cannot line-segment page "%s" region "%s": %s',
page_id, region.id, err)
# fallback option 1: borderclean
# label margins vs interior, but with the interior
# extended into the margin by its connected components
# to remove noise from neighbouring regions:
#region_labels = borderclean_bin(region_bin, margin=round(4/zoom)) + 1
# too dangerous, because we risk losing dots from i or punctuation;
# fallback option2: only extend_margins
# instead, just provide a uniform label, so at least we get
# to extend the polygon margins:
#region_labels = np.ones_like(region_bin)
# fallback option3: keep unchanged
continue
for line in lines:
if line.get_AlternativeImage():
# get cropped line image:
line_image, line_xywh = self.workspace.image_from_segment(
line, region_image, region_xywh)
LOG.debug("Using AlternativeImage (%s) for line '%s'",
line_xywh['features'], line.id)
# crop region arrays accordingly:
line_polygon = coordinates_of_segment(line, region_image, region_xywh)
line_bbox = bbox_from_polygon(line_polygon)
line_labels = region_labels[line_bbox[1]:line_bbox[3],
line_bbox[0]:line_bbox[2]]
line_bin = region_bin[line_bbox[1]:line_bbox[3],
line_bbox[0]:line_bbox[2]]
# get polygon in relative (line) coordinates:
line_polygon = coordinates_of_segment(line, line_image, line_xywh)
line_polygon = resegment(line_polygon, line_labels, line_bin, line.id,
extend_margins=margin, threshold_relative=threshold)
if line_polygon is None:
continue # not good enough – keep
# convert back to absolute (page) coordinates:
line_polygon = coordinates_for_segment(line_polygon, line_image, line_xywh)
else:
# get polygon in relative (region) coordinates:
line_polygon = coordinates_of_segment(line, region_image, region_xywh)
line_polygon = resegment(line_polygon, region_labels, region_bin, line.id,
extend_margins=margin, threshold_relative=threshold)
if line_polygon is None:
continue # not good enough – keep
# convert back to absolute (page) coordinates:
line_polygon = coordinates_for_segment(line_polygon, region_image, region_xywh)
# annotate result:
line.get_Coords().points = points_from_polygon(line_polygon)
# create new image:
line_image, line_xywh = self.workspace.image_from_segment(
line, region_image, region_xywh)
# update METS (add the image file):
file_path = self.workspace.save_image_file(
line_image,
file_id=file_id + '_' + region.id + '_' + line.id,
page_id=page_id,
file_grp=self.image_grp)
# update PAGE (reference the image file):
line.add_AlternativeImage(AlternativeImageType(
filename=file_path,
comments=region_xywh['features']))
# update METS (add the PAGE file):
file_id = input_file.ID.replace(self.input_file_grp, self.page_grp)
if file_id == input_file.ID:
file_id = concat_padded(self.page_grp, n)
file_path = os.path.join(self.page_grp, file_id + '.xml')
out = self.workspace.add_file(
ID=file_id,
file_grp=self.page_grp,
pageId=input_file.pageId,
local_filename=file_path,
mimetype=MIMETYPE_PAGE,
content=to_xml(pcgts))
LOG.info('created file ID: %s, file_grp: %s, path: %s',
file_id, self.page_grp, out.local_filename)
| [
"ocrd_utils.coordinates_of_segment",
"numpy.count_nonzero",
"numpy.array",
"ocrd_utils.coordinates_for_segment",
"cv2.approxPolyDP",
"ocrd_models.ocrd_page.to_xml",
"cv2.contourArea",
"numpy.argmax",
"ocrd_utils.concat_padded",
"scipy.ndimage.filters.maximum_filter",
"skimage.draw.polygon_perime... | [((721, 759), 'ocrd_utils.getLogger', 'getLogger', (['"""processor.OcropyResegment"""'], {}), "('processor.OcropyResegment')\n", (730, 759), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((1872, 1900), 'numpy.zeros_like', 'np.zeros_like', (['region_labels'], {}), '(region_labels)\n', (1885, 1900), True, 'import numpy as np\n'), ((2210, 2267), 'scipy.ndimage.filters.maximum_filter', 'filters.maximum_filter', (['line_mask', '(1 + 2 * extend_margins)'], {}), '(line_mask, 1 + 2 * extend_margins)\n', (2232, 2267), False, 'from scipy.ndimage import filters\n'), ((2555, 2585), 'numpy.sum', 'np.sum', (['(region_bin * line_mask)'], {}), '(region_bin * line_mask)\n', (2561, 2585), True, 'import numpy as np\n'), ((2800, 2832), 'numpy.bincount', 'np.bincount', (['line_labels_fg.flat'], {}), '(line_labels_fg.flat)\n', (2811, 2832), True, 'import numpy as np\n'), ((3354, 3398), 'numpy.array', 'np.array', (['(line_labels == max_label)', 'np.uint8'], {}), '(line_labels == max_label, np.uint8)\n', (3362, 3398), True, 'import numpy as np\n'), ((3524, 3595), 'cv2.findContours', 'cv2.findContours', (['line_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(line_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3540, 3595), False, 'import cv2\n'), ((3723, 3747), 'numpy.argmax', 'np.argmax', (['contour_areas'], {}), '(contour_areas)\n', (3732, 3747), True, 'import numpy as np\n'), ((1915, 1984), 'skimage.draw.polygon', 'draw.polygon', (['line_polygon[:, 1]', 'line_polygon[:, 0]', 'line_mask.shape'], {}), '(line_polygon[:, 1], line_polygon[:, 0], line_mask.shape)\n', (1927, 1984), False, 'from skimage import draw\n'), ((2002, 2081), 'skimage.draw.polygon_perimeter', 'draw.polygon_perimeter', (['line_polygon[:, 1]', 'line_polygon[:, 0]', 'line_mask.shape'], {}), '(line_polygon[:, 1], line_polygon[:, 0], line_mask.shape)\n', (2024, 2081), False, 'from skimage import draw\n'), ((2358, 2387), 'numpy.count_nonzero', 'np.count_nonzero', (['line_labels'], {}), '(line_labels)\n', (2374, 2387), True, 'import numpy as np\n'), ((2643, 2675), 'numpy.count_nonzero', 'np.count_nonzero', (['line_labels_fg'], {}), '(line_labels_fg)\n', (2659, 2675), True, 'import numpy as np\n'), ((2849, 2876), 'numpy.argmax', 'np.argmax', (['label_counts[1:]'], {}), '(label_counts[1:])\n', (2858, 2876), True, 'import numpy as np\n'), ((3655, 3679), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3670, 3679), False, 'import cv2\n'), ((3823, 3854), 'numpy.expand_dims', 'np.expand_dims', (['line_polygon', '(1)'], {}), '(line_polygon, 1)\n', (3837, 3854), True, 'import numpy as np\n'), ((4364, 4399), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['contour', '(2)', '(False)'], {}), '(contour, 2, False)\n', (4380, 4399), False, 'import cv2\n'), ((7524, 7556), 'ocrd_utils.concat_padded', 'concat_padded', (['self.image_grp', 'n'], {}), '(self.image_grp, n)\n', (7537, 7556), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((13613, 13644), 'ocrd_utils.concat_padded', 'concat_padded', (['self.page_grp', 'n'], {}), '(self.page_grp, n)\n', (13626, 13644), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((12642, 12675), 'ocrd_utils.points_from_polygon', 'points_from_polygon', (['line_polygon'], {}), '(line_polygon)\n', (12661, 12675), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((13974, 13987), 'ocrd_models.ocrd_page.to_xml', 'to_xml', (['pcgts'], {}), '(pcgts)\n', (13980, 13987), False, 'from ocrd_models.ocrd_page import to_xml, AlternativeImageType\n'), ((10808, 10863), 'ocrd_utils.coordinates_of_segment', 'coordinates_of_segment', (['line', 'region_image', 'region_xywh'], {}), '(line, region_image, region_xywh)\n', (10830, 10863), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((10900, 10931), 'ocrd_utils.bbox_from_polygon', 'bbox_from_polygon', (['line_polygon'], {}), '(line_polygon)\n', (10917, 10931), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((11345, 11396), 'ocrd_utils.coordinates_of_segment', 'coordinates_of_segment', (['line', 'line_image', 'line_xywh'], {}), '(line, line_image, line_xywh)\n', (11367, 11396), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((11815, 11875), 'ocrd_utils.coordinates_for_segment', 'coordinates_for_segment', (['line_polygon', 'line_image', 'line_xywh'], {}), '(line_polygon, line_image, line_xywh)\n', (11838, 11875), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((12013, 12068), 'ocrd_utils.coordinates_of_segment', 'coordinates_of_segment', (['line', 'region_image', 'region_xywh'], {}), '(line, region_image, region_xywh)\n', (12035, 12068), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((12491, 12555), 'ocrd_utils.coordinates_for_segment', 'coordinates_for_segment', (['line_polygon', 'region_image', 'region_xywh'], {}), '(line_polygon, region_image, region_xywh)\n', (12514, 12555), False, 'from ocrd_utils import getLogger, concat_padded, coordinates_of_segment, coordinates_for_segment, bbox_from_polygon, points_from_polygon, xywh_from_points, MIMETYPE_PAGE\n'), ((13281, 13355), 'ocrd_models.ocrd_page.AlternativeImageType', 'AlternativeImageType', ([], {'filename': 'file_path', 'comments': "region_xywh['features']"}), "(filename=file_path, comments=region_xywh['features'])\n", (13301, 13355), False, 'from ocrd_models.ocrd_page import to_xml, AlternativeImageType\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import sys
import argparse
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
FOURCC = {
"mp4": cv2.VideoWriter_fourcc(*"MP4V"),
"avi": cv2.VideoWriter_fourcc(*"XVID"),
}
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class VideoGenerator:
def __init__(self, args):
self.args = args
# self.model = None
# Don't need to load model if you only want a video
if not self.args.video_only:
self.model = self.__load_model()
def run(self):
if self.args.input_path is None:
print(f"Provided input path {self.args.input_path} is non valid.")
sys.exit(1)
else:
if self.args.video_only:
self._generate_video_from_images(
self.args.input_path, self.args.output_path
)
else:
# If input path exists
if os.path.exists(self.args.input_path):
# If input is a video file
if os.path.isfile(self.args.input_path):
frames_folder = os.path.join(self.args.output_path, "frames")
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(frames_folder, exist_ok=True)
os.makedirs(attention_folder, exist_ok=True)
self._extract_frames_from_video(
self.args.input_path, frames_folder
)
self._inference(
frames_folder,
attention_folder,
)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input is a folder of already extracted frames
if os.path.isdir(self.args.input_path):
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(attention_folder, exist_ok=True)
self._inference(self.args.input_path, attention_folder)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input path doesn't exists
else:
print(f"Provided input path {self.args.input_path} doesn't exists.")
sys.exit(1)
def _extract_frames_from_video(self, inp: str, out: str):
vidcap = cv2.VideoCapture(inp)
self.args.fps = vidcap.get(cv2.CAP_PROP_FPS)
print(f"Video: {inp} ({self.args.fps} fps)")
print(f"Extracting frames to {out}")
success, image = vidcap.read()
count = 0
while success:
cv2.imwrite(
os.path.join(out, f"frame-{count:04}.jpg"),
image,
)
success, image = vidcap.read()
count += 1
def _generate_video_from_images(self, inp: str, out: str):
img_array = []
attention_images_list = sorted(glob.glob(os.path.join(inp, "attn-*.jpg")))
# Get size of the first image
with open(attention_images_list[0], "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
size = (img.width, img.height)
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
print(f"Generating video {size} to {out}")
for filename in tqdm(attention_images_list[1:]):
with open(filename, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
out = cv2.VideoWriter(
os.path.join(out, "video." + self.args.video_format),
FOURCC[self.args.video_format],
self.args.fps,
size,
)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print("Done")
def _inference(self, inp: str, out: str):
print(f"Generating attention images to {out}")
for img_path in tqdm(sorted(glob.glob(os.path.join(inp, "*.jpg")))):
with open(img_path, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
if self.args.resize is not None:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Resize(self.args.resize),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
else:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
img = transform(img)
# make the image divisible by the patch size
w, h = (
img.shape[1] - img.shape[1] % self.args.patch_size,
img.shape[2] - img.shape[2] % self.args.patch_size,
)
img = img[:, :w, :h].unsqueeze(0)
w_featmap = img.shape[-2] // self.args.patch_size
h_featmap = img.shape[-1] // self.args.patch_size
attentions = self.model.get_last_selfattention(img.to(DEVICE))
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=1, keepdim=True)
cumval = torch.cumsum(val, dim=1)
th_attn = cumval > (1 - self.args.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[head] = th_attn[head][idx2[head]]
th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float()
# interpolate
th_attn = (
nn.functional.interpolate(
th_attn.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
attentions = attentions.reshape(nh, w_featmap, h_featmap)
attentions = (
nn.functional.interpolate(
attentions.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
# save attentions heatmaps
fname = os.path.join(out, "attn-" + os.path.basename(img_path))
plt.imsave(
fname=fname,
arr=sum(
attentions[i] * 1 / attentions.shape[0]
for i in range(attentions.shape[0])
),
cmap="inferno",
format="jpg",
)
def __load_model(self):
# build model
model = vits.__dict__[self.args.arch](
patch_size=self.args.patch_size, num_classes=0
)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.to(DEVICE)
if os.path.isfile(self.args.pretrained_weights):
state_dict = torch.load(self.args.pretrained_weights, map_location="cpu")
if (
self.args.checkpoint_key is not None
and self.args.checkpoint_key in state_dict
):
print(
f"Take key {self.args.checkpoint_key} in provided checkpoint dict"
)
state_dict = state_dict[self.args.checkpoint_key]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print(
"Pretrained weights found at {} and loaded with msg: {}".format(
self.args.pretrained_weights, msg
)
)
else:
print(
"Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate."
)
url = None
if self.args.arch == "deit_small" and self.args.patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif self.args.arch == "deit_small" and self.args.patch_size == 8:
url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper
elif self.args.arch == "vit_base" and self.args.patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif self.args.arch == "vit_base" and self.args.patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print(
"Since no pretrained weights have been provided, we load the reference pretrained DINO weights."
)
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/" + url
)
model.load_state_dict(state_dict, strict=True)
else:
print(
"There is no reference weights available for this model => We use random weights."
)
return model
def parse_args():
parser = argparse.ArgumentParser("Generation self-attention video")
parser.add_argument(
"--arch",
default="deit_small",
type=str,
choices=["deit_tiny", "deit_small", "vit_base"],
help="Architecture (support only ViT atm).",
)
parser.add_argument(
"--patch_size", default=8, type=int, help="Patch resolution of the self.model."
)
parser.add_argument(
"--pretrained_weights",
default="",
type=str,
help="Path to pretrained weights to load.",
)
parser.add_argument(
"--checkpoint_key",
default="teacher",
type=str,
help='Key to use in the checkpoint (example: "teacher")',
)
parser.add_argument(
"--input_path",
required=True,
type=str,
help="""Path to a video file if you want to extract frames
or to a folder of images already extracted by yourself.
or to a folder of attention images.""",
)
parser.add_argument(
"--output_path",
default="./",
type=str,
help="""Path to store a folder of frames and / or a folder of attention images.
and / or a final video. Default to current directory.""",
)
parser.add_argument(
"--threshold",
type=float,
default=0.6,
help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx percent of the mass.""",
)
parser.add_argument(
"--resize",
default=None,
type=int,
nargs="+",
help="""Apply a resize transformation to input image(s). Use if OOM error.
Usage (single or W H): --resize 512, --resize 720 1280""",
)
parser.add_argument(
"--video_only",
action="store_true",
help="""Use this flag if you only want to generate a video and not all attention images.
If used, --input_path must be set to the folder of attention images. Ex: ./attention/""",
)
parser.add_argument(
"--fps",
default=30.0,
type=float,
help="FPS of input / output video. Automatically set if you extract frames from a video.",
)
parser.add_argument(
"--video_format",
default="mp4",
type=str,
choices=["mp4", "avi"],
help="Format of generated video (mp4 or avi).",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
vg = VideoGenerator(args)
vg.run()
| [
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"sys.exit",
"os.path.exists",
"argparse.ArgumentParser",
"torch.hub.load_state_dict_from_url",
"torch.argsort",
"os.path.isdir",
"cv2.VideoWriter_fourcc",
"torchvision.transforms.ToTensor",
"torch.sort",
"os.path.isfile",
"torchvision.... | [((935, 966), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (957, 966), False, 'import cv2\n'), ((979, 1010), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (1001, 1010), False, 'import cv2\n'), ((1047, 1072), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1070, 1072), False, 'import torch\n'), ((1023, 1043), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1035, 1043), False, 'import torch\n'), ((1078, 1097), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1090, 1097), False, 'import torch\n'), ((11002, 11060), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Generation self-attention video"""'], {}), "('Generation self-attention video')\n", (11025, 11060), False, 'import argparse\n'), ((3595, 3616), 'cv2.VideoCapture', 'cv2.VideoCapture', (['inp'], {}), '(inp)\n', (3611, 3616), False, 'import cv2\n'), ((4569, 4600), 'tqdm.tqdm', 'tqdm', (['attention_images_list[1:]'], {}), '(attention_images_list[1:])\n', (4573, 4600), False, 'from tqdm import tqdm\n'), ((8708, 8752), 'os.path.isfile', 'os.path.isfile', (['self.args.pretrained_weights'], {}), '(self.args.pretrained_weights)\n', (8722, 8752), False, 'import os\n'), ((1499, 1510), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1507, 1510), False, 'import sys\n'), ((4321, 4334), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (4331, 4334), False, 'from PIL import Image\n'), ((4848, 4900), 'os.path.join', 'os.path.join', (['out', "('video.' + self.args.video_format)"], {}), "(out, 'video.' + self.args.video_format)\n", (4860, 4900), False, 'import os\n'), ((6952, 6974), 'torch.sort', 'torch.sort', (['attentions'], {}), '(attentions)\n', (6962, 6974), False, 'import torch\n'), ((6994, 7029), 'torch.sum', 'torch.sum', (['val'], {'dim': '(1)', 'keepdim': '(True)'}), '(val, dim=1, keepdim=True)\n', (7003, 7029), False, 'import torch\n'), ((7051, 7075), 'torch.cumsum', 'torch.cumsum', (['val'], {'dim': '(1)'}), '(val, dim=1)\n', (7063, 7075), False, 'import torch\n'), ((7152, 7170), 'torch.argsort', 'torch.argsort', (['idx'], {}), '(idx)\n', (7165, 7170), False, 'import torch\n'), ((8779, 8839), 'torch.load', 'torch.load', (['self.args.pretrained_weights'], {'map_location': '"""cpu"""'}), "(self.args.pretrained_weights, map_location='cpu')\n", (8789, 8839), False, 'import torch\n'), ((1770, 1806), 'os.path.exists', 'os.path.exists', (['self.args.input_path'], {}), '(self.args.input_path)\n', (1784, 1806), False, 'import os\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['out', 'f"""frame-{count:04}.jpg"""'], {}), "(out, f'frame-{count:04}.jpg')\n", (3903, 3933), False, 'import os\n'), ((4174, 4205), 'os.path.join', 'os.path.join', (['inp', '"""attn-*.jpg"""'], {}), "(inp, 'attn-*.jpg')\n", (4186, 4205), False, 'import os\n'), ((4668, 4681), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (4678, 4681), False, 'from PIL import Image\n'), ((5368, 5381), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (5378, 5381), False, 'from PIL import Image\n'), ((10600, 10689), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', ([], {'url': "('https://dl.fbaipublicfiles.com/dino/' + url)"}), "(url=\n 'https://dl.fbaipublicfiles.com/dino/' + url)\n", (10634, 10689), False, 'import torch\n'), ((1878, 1914), 'os.path.isfile', 'os.path.isfile', (['self.args.input_path'], {}), '(self.args.input_path)\n', (1892, 1914), False, 'import os\n'), ((2837, 2872), 'os.path.isdir', 'os.path.isdir', (['self.args.input_path'], {}), '(self.args.input_path)\n', (2850, 2872), False, 'import os\n'), ((3503, 3514), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3511, 3514), False, 'import sys\n'), ((4457, 4470), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4465, 4470), True, 'import numpy as np\n'), ((5271, 5297), 'os.path.join', 'os.path.join', (['inp', '"""*.jpg"""'], {}), "(inp, '*.jpg')\n", (5283, 5297), False, 'import os\n'), ((8093, 8119), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (8109, 8119), False, 'import os\n'), ((1956, 2001), 'os.path.join', 'os.path.join', (['self.args.output_path', '"""frames"""'], {}), "(self.args.output_path, 'frames')\n", (1968, 2001), False, 'import os\n'), ((2045, 2093), 'os.path.join', 'os.path.join', (['self.args.output_path', '"""attention"""'], {}), "(self.args.output_path, 'attention')\n", (2057, 2093), False, 'import os\n'), ((2173, 2214), 'os.makedirs', 'os.makedirs', (['frames_folder'], {'exist_ok': '(True)'}), '(frames_folder, exist_ok=True)\n', (2184, 2214), False, 'import os\n'), ((2239, 2283), 'os.makedirs', 'os.makedirs', (['attention_folder'], {'exist_ok': '(True)'}), '(attention_folder, exist_ok=True)\n', (2250, 2283), False, 'import os\n'), ((2917, 2965), 'os.path.join', 'os.path.join', (['self.args.output_path', '"""attention"""'], {}), "(self.args.output_path, 'attention')\n", (2929, 2965), False, 'import os\n'), ((3045, 3089), 'os.makedirs', 'os.makedirs', (['attention_folder'], {'exist_ok': '(True)'}), '(attention_folder, exist_ok=True)\n', (3056, 3089), False, 'import os\n'), ((4769, 4782), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4777, 4782), True, 'import numpy as np\n'), ((5567, 5592), 'torchvision.transforms.ToTensor', 'pth_transforms.ToTensor', ([], {}), '()\n', (5590, 5592), True, 'from torchvision import transforms as pth_transforms\n'), ((5618, 5657), 'torchvision.transforms.Resize', 'pth_transforms.Resize', (['self.args.resize'], {}), '(self.args.resize)\n', (5639, 5657), True, 'from torchvision import transforms as pth_transforms\n'), ((5683, 5753), 'torchvision.transforms.Normalize', 'pth_transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (5707, 5753), True, 'from torchvision import transforms as pth_transforms\n'), ((5965, 5990), 'torchvision.transforms.ToTensor', 'pth_transforms.ToTensor', ([], {}), '()\n', (5988, 5990), True, 'from torchvision import transforms as pth_transforms\n'), ((6016, 6086), 'torchvision.transforms.Normalize', 'pth_transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (6040, 6086), True, 'from torchvision import transforms as pth_transforms\n')] |
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import inspect
import os
from mne.externals.six.moves import cPickle as pickle
import pytest
from numpy.testing import assert_array_equal
from mne.io.kit import read_mrk
from mne.io.meas_info import _write_dig_points
from mne.utils import _TempDir
FILE = inspect.getfile(inspect.currentframe())
parent_dir = os.path.dirname(os.path.abspath(FILE))
data_dir = os.path.join(parent_dir, 'data')
mrk_fname = os.path.join(data_dir, 'test_mrk.sqd')
def test_io_mrk():
"""Test IO for mrk files."""
tempdir = _TempDir()
pts = read_mrk(mrk_fname)
# txt
path = os.path.join(tempdir, 'mrk.txt')
_write_dig_points(path, pts)
pts_2 = read_mrk(path)
assert_array_equal(pts, pts_2, "read/write mrk to text")
# pickle
fname = os.path.join(tempdir, 'mrk.pickled')
with open(fname, 'wb') as fid:
pickle.dump(dict(mrk=pts), fid)
pts_2 = read_mrk(fname)
assert_array_equal(pts_2, pts, "pickle mrk")
with open(fname, 'wb') as fid:
pickle.dump(dict(), fid)
pytest.raises(ValueError, read_mrk, fname)
# unsupported extension
pytest.raises(ValueError, read_mrk, "file.ext")
| [
"mne.utils._TempDir",
"inspect.currentframe",
"mne.io.meas_info._write_dig_points",
"os.path.join",
"pytest.raises",
"os.path.abspath",
"mne.io.kit.read_mrk",
"numpy.testing.assert_array_equal"
] | [((419, 451), 'os.path.join', 'os.path.join', (['parent_dir', '"""data"""'], {}), "(parent_dir, 'data')\n", (431, 451), False, 'import os\n'), ((464, 502), 'os.path.join', 'os.path.join', (['data_dir', '"""test_mrk.sqd"""'], {}), "(data_dir, 'test_mrk.sqd')\n", (476, 502), False, 'import os\n'), ((332, 354), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (352, 354), False, 'import inspect\n'), ((385, 406), 'os.path.abspath', 'os.path.abspath', (['FILE'], {}), '(FILE)\n', (400, 406), False, 'import os\n'), ((571, 581), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (579, 581), False, 'from mne.utils import _TempDir\n'), ((592, 611), 'mne.io.kit.read_mrk', 'read_mrk', (['mrk_fname'], {}), '(mrk_fname)\n', (600, 611), False, 'from mne.io.kit import read_mrk\n'), ((634, 666), 'os.path.join', 'os.path.join', (['tempdir', '"""mrk.txt"""'], {}), "(tempdir, 'mrk.txt')\n", (646, 666), False, 'import os\n'), ((671, 699), 'mne.io.meas_info._write_dig_points', '_write_dig_points', (['path', 'pts'], {}), '(path, pts)\n', (688, 699), False, 'from mne.io.meas_info import _write_dig_points\n'), ((712, 726), 'mne.io.kit.read_mrk', 'read_mrk', (['path'], {}), '(path)\n', (720, 726), False, 'from mne.io.kit import read_mrk\n'), ((731, 787), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['pts', 'pts_2', '"""read/write mrk to text"""'], {}), "(pts, pts_2, 'read/write mrk to text')\n", (749, 787), False, 'from numpy.testing import assert_array_equal\n'), ((814, 850), 'os.path.join', 'os.path.join', (['tempdir', '"""mrk.pickled"""'], {}), "(tempdir, 'mrk.pickled')\n", (826, 850), False, 'import os\n'), ((938, 953), 'mne.io.kit.read_mrk', 'read_mrk', (['fname'], {}), '(fname)\n', (946, 953), False, 'from mne.io.kit import read_mrk\n'), ((958, 1002), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['pts_2', 'pts', '"""pickle mrk"""'], {}), "(pts_2, pts, 'pickle mrk')\n", (976, 1002), False, 'from numpy.testing import assert_array_equal\n'), ((1075, 1117), 'pytest.raises', 'pytest.raises', (['ValueError', 'read_mrk', 'fname'], {}), '(ValueError, read_mrk, fname)\n', (1088, 1117), False, 'import pytest\n'), ((1151, 1198), 'pytest.raises', 'pytest.raises', (['ValueError', 'read_mrk', '"""file.ext"""'], {}), "(ValueError, read_mrk, 'file.ext')\n", (1164, 1198), False, 'import pytest\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
import warnings
from collections.abc import Sequence
import numpy as np
from .. import units as u
from ..constants import c
from .. import _erfa as erfa
from ..io import ascii
from ..utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'get_constellation', 'concatenate_representations', 'concatenate']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity`
The latitude in radians
lon : `~astropy.units.Quantity`
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity`
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity`
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio['p']
earth_v = earth_pv_bary['v']
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate object
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = dict([(l[:3], l[4:])
for l in cnames.split('\n')
if not l.startswith('#')])
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter('ignore', erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided]))
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
def _concatenate_components(reps_difs, names):
""" Helper function for the concatenate function below. Gets and
concatenates all of the individual components for an iterable of
representations or differentials.
"""
values = []
for name in names:
data_vals = []
for x in reps_difs:
data_val = getattr(x, name)
data_vals.append(data_val.reshape(1, ) if x.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
return values
def concatenate_representations(reps):
"""
Combine multiple representation objects into a single instance by
concatenating the data in each component.
Currently, all of the input representations have to be the same type. This
properly handles differential or velocity data, but all input objects must
have the same differential object type as well.
Parameters
----------
reps : sequence of representation objects
The objects to concatenate
Returns
-------
rep : `~astropy.coordinates.BaseRepresentation` subclass
A single representation object with its data set to the concatenation of
all the elements of the input sequence of representations.
"""
if not isinstance(reps, (Sequence, np.ndarray)):
raise TypeError('Input must be a list or iterable of representation '
'objects.')
# First, validate that the represenations are the same, and
# concatenate all of the positional data:
rep_type = type(reps[0])
if any(type(r) != rep_type for r in reps):
raise TypeError('Input representations must all have the same type.')
# Construct the new representation with the concatenated data from the
# representations passed in
values = _concatenate_components(reps,
rep_type.attr_classes.keys())
new_rep = rep_type(*values)
has_diff = any('s' in rep.differentials for rep in reps)
if has_diff and any('s' not in rep.differentials for rep in reps):
raise ValueError('Input representations must either all contain '
'differentials, or not contain differentials.')
if has_diff:
dif_type = type(reps[0].differentials['s'])
if any('s' not in r.differentials or
type(r.differentials['s']) != dif_type
for r in reps):
raise TypeError('All input representations must have the same '
'differential type.')
values = _concatenate_components([r.differentials['s'] for r in reps],
dif_type.attr_classes.keys())
new_dif = dif_type(*values)
new_rep = new_rep.with_differentials({'s': new_dif})
return new_rep
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogenous sequences of objects.
Parameters
----------
coords : sequence of coordinate objects
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
scs = [SkyCoord(coord, copy=False) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("All inputs must have equivalent frames: "
"{0} != {1}".format(sc, scs[0]))
# TODO: this can be changed to SkyCoord.from_representation() for a speed
# boost when we switch to using classmethods
return SkyCoord(concatenate_representations([c.data for c in coords]),
frame=scs[0].frame)
| [
"warnings.catch_warnings",
"numpy.sum",
"numpy.array",
"numpy.concatenate",
"warnings.simplefilter"
] | [((5298, 5327), 'numpy.sum', 'np.sum', (['(earth_p ** 2)'], {'axis': '(-1)'}), '(earth_p ** 2, axis=-1)\n', (5304, 5327), True, 'import numpy as np\n'), ((7681, 7742), 'numpy.array', 'np.array', (["[cnames_short_to_long[nm] for nm in ctable['name']]"], {}), "([cnames_short_to_long[nm] for nm in ctable['name']])\n", (7689, 7742), True, 'import numpy as np\n'), ((8397, 8422), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8420, 8422), False, 'import warnings\n'), ((8432, 8481), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'erfa.ErfaWarning'], {}), "('ignore', erfa.ErfaWarning)\n", (8453, 8481), False, 'import warnings\n'), ((9862, 9887), 'numpy.concatenate', 'np.concatenate', (['data_vals'], {}), '(data_vals)\n', (9876, 9887), True, 'import numpy as np\n'), ((5347, 5376), 'numpy.sum', 'np.sum', (['(earth_v ** 2)'], {'axis': '(-1)'}), '(earth_v ** 2, axis=-1)\n', (5353, 5376), True, 'import numpy as np\n'), ((9051, 9066), 'numpy.sum', 'np.sum', (['notided'], {}), '(notided)\n', (9057, 9066), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def plot():
fig = plt.figure(1, figsize=(8, 5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-4, 3))
t = np.arange(0.0, 5.0, 0.2)
s = np.cos(2 * np.pi * t)
ax.plot(t, s, color="blue")
ax.annotate(
"text",
xy=(4.0, 1.0),
xycoords="data",
xytext=(4.5, 1.5),
textcoords="data",
arrowprops=dict(arrowstyle="->", ec="r"),
)
ax.annotate(
"arrowstyle",
xy=(0, 1),
xycoords="data",
xytext=(-50, 30),
textcoords="offset points",
arrowprops=dict(arrowstyle="->"),
)
ax.annotate(
"no arrow",
xy=(0, 1),
xycoords="data",
xytext=(50, -30),
textcoords="offset pixels",
)
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, __file__[:-3] + "_reference.tex")
if __name__ == "__main__":
plot()
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((75, 104), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 5)'}), '(1, figsize=(8, 5))\n', (85, 104), True, 'import matplotlib.pyplot as plt\n'), ((191, 215), 'numpy.arange', 'np.arange', (['(0.0)', '(5.0)', '(0.2)'], {}), '(0.0, 5.0, 0.2)\n', (200, 215), True, 'import numpy as np\n'), ((224, 245), 'numpy.cos', 'np.cos', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (230, 245), True, 'import numpy as np\n'), ((986, 996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (994, 996), True, 'import matplotlib.pyplot as plt\n')] |
import copy
import pathlib
import pickle
import time
from functools import partial, reduce
import numpy as np
from det3d.core.bbox import box_np_ops
from det3d.core.sampler import preprocess as prep
from det3d.utils.check import shape_mergeable
class DataBaseSamplerV2:
def __init__(
self,
db_infos, # object/dbinfos_train.pkl
groups, # [dict(Car=15,),],
db_prepor=None, # filter_by_min_num_points, filter_by_difficulty
rate=1.0, # rate=1.0
global_rot_range=None, # [0, 0]
logger=None, # logging.getLogger("build_dbsampler")
):
# load all gt database here.
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
# preprocess: filter_by_min_num_points/difficulty.
if db_prepor is not None:
db_infos = db_prepor(db_infos)
logger.info("After filter database:")
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
self.db_infos = db_infos
self._rate = rate
self._groups = groups
self._group_db_infos = {}
self._group_name_to_names = []
self._sample_classes = []
self._sample_max_nums = []
self._use_group_sampling = False # slower
if any([len(g) > 1 for g in groups]): # False
self._use_group_sampling = True
# get group_name: Car and group_max_num: 15
if not self._use_group_sampling: # True
self._group_db_infos = self.db_infos # just use db_infos
for group_info in groups:
group_names = list(group_info.keys())
self._sample_classes += group_names # ['Car']
self._sample_max_nums += list(group_info.values()) # [15]
else: # False
for group_info in groups:
group_dict = {}
group_names = list(group_info.keys())
group_name = ", ".join(group_names)
self._sample_classes += group_names
self._sample_max_nums += list(group_info.values())
self._group_name_to_names.append((group_name, group_names))
# self._group_name_to_names[group_name] = group_names
for name in group_names:
for item in db_infos[name]:
gid = item["group_id"]
if gid not in group_dict:
group_dict[gid] = [item]
else:
group_dict[gid] += [item]
if group_name in self._group_db_infos:
raise ValueError("group must be unique")
group_data = list(group_dict.values())
self._group_db_infos[group_name] = group_data
info_dict = {}
if len(group_info) > 1:
for group in group_data:
names = [item["name"] for item in group]
names = sorted(names)
group_name = ", ".join(names)
if group_name in info_dict:
info_dict[group_name] += 1
else:
info_dict[group_name] = 1
print(info_dict)
# get sampler dict for each class like Car, Cyclist, Pedestrian...
# this sampler can ensure batch samples selected randomly.
self._sampler_dict = {}
for k, v in self._group_db_infos.items():
self._sampler_dict[k] = prep.BatchSampler(v, k)
# get rotation range
self._enable_global_rot = False
if global_rot_range is not None:
if not isinstance(global_rot_range, (list, tuple, np.ndarray)):
global_rot_range = [-global_rot_range, global_rot_range]
else: # True
assert shape_mergeable(global_rot_range, [2]) # True
if np.abs(global_rot_range[0] - global_rot_range[1]) >= 1e-3: # False
self._enable_global_rot = True
self._global_rot_range = global_rot_range # [0, 0]
@property
def use_group_sampling(self):
return self._use_group_sampling
def sample_all(
self,
root_path,
gt_boxes,
gt_names,
num_point_features,
random_crop=False,
gt_group_ids=None,
calib=None,
):
# record the num of gt-aug samples with a dict and a list
sampled_num_dict = {}
sample_num_per_class = []
for class_name, max_sample_num in zip(self._sample_classes, self._sample_max_nums): # actual only once for ['Car': 15]
sampled_num = int(max_sample_num - np.sum([n == class_name for n in gt_names]))
sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
sampled_num_dict[class_name] = sampled_num
sample_num_per_class.append(sampled_num)
sampled_groups = self._sample_classes
if self._use_group_sampling: # False
assert gt_group_ids is not None
sampled_groups = []
sample_num_per_class = []
for group_name, class_names in self._group_name_to_names:
sampled_nums_group = [sampled_num_dict[n] for n in class_names]
sampled_num = np.max(sampled_nums_group)
sample_num_per_class.append(sampled_num)
sampled_groups.append(group_name)
total_group_ids = gt_group_ids
sampled = []
sampled_gt_boxes = []
avoid_coll_boxes = gt_boxes
# gt-augmentation: sample gt boxes and add them to current gt_boxes.
# todo: we may sample box one by one to ensure num of gt-boxes is fulfilled.
for class_name, sampled_num in zip(sampled_groups, sample_num_per_class):
if sampled_num > 0:
if self._use_group_sampling: # False
sampled_cls = self.sample_group(class_name, sampled_num, avoid_coll_boxes, total_group_ids)
else:
sampled_cls = self.sample_class_v2(class_name, sampled_num, avoid_coll_boxes)
sampled += sampled_cls
if len(sampled_cls) > 0:
if len(sampled_cls) == 1: # True
sampled_gt_box = sampled_cls[0]["box3d_lidar"][np.newaxis, ...]
else:
sampled_gt_box = np.stack([s["box3d_lidar"] for s in sampled_cls], axis=0)
sampled_gt_boxes += [sampled_gt_box]
avoid_coll_boxes = np.concatenate([avoid_coll_boxes, sampled_gt_box], axis=0)
if self._use_group_sampling: # False
if len(sampled_cls) == 1:
sampled_group_ids = np.array(sampled_cls[0]["group_id"])[np.newaxis, ...]
else:
sampled_group_ids = np.stack([s["group_id"] for s in sampled_cls], axis=0)
total_group_ids = np.concatenate([total_group_ids, sampled_group_ids], axis=0)
if len(sampled) > 0:
''' get points in sampled gt_boxes '''
sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
num_sampled = len(sampled)
s_points_list = []
# get points in sampled gt-boxes.
for info in sampled: # info.keys: ['name', 'path', 'image_idx', 'gt_idx', 'box3d_lidar', 'num_points_in_gt', 'difficulty', 'group_id']
try:
# TODO fix point read error
s_points = np.fromfile(str(pathlib.Path(root_path) / info["path"]), dtype=np.float32).reshape(-1, num_point_features)
if "rot_transform" in info: # False
rot = info["rot_transform"]
s_points[:, :3] = box_np_ops.rotation_points_single_angle(s_points[:, :4], rot, axis=2)
# gt_points are saved with relative distance; so need to recover by adding box center.
s_points[:, :3] += info["box3d_lidar"][:3]
s_points_list.append(s_points)
except Exception:
print(info["path"])
continue
'''todo: do something about random crop'''
if random_crop: # False
s_points_list_new = []
assert calib is not None
rect = calib["rect"]
Trv2c = calib["Trv2c"]
P2 = calib["P2"]
gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect, Trv2c, P2)
crop_frustums = prep.random_crop_frustum(gt_bboxes, rect, Trv2c, P2)
for i in range(crop_frustums.shape[0]):
s_points = s_points_list[i]
mask = prep.mask_points_in_corners(s_points, crop_frustums[i : i + 1]).reshape(-1)
num_remove = np.sum(mask)
if num_remove > 0 and (s_points.shape[0] - num_remove) > 15:
s_points = s_points[np.logical_not(mask)]
s_points_list_new.append(s_points)
s_points_list = s_points_list_new
ret = {
"gt_names": np.array([s["name"] for s in sampled]),
"difficulty": np.array([s["difficulty"] for s in sampled]),
"gt_boxes": sampled_gt_boxes,
"points": np.concatenate(s_points_list, axis=0),
"gt_masks": np.ones((num_sampled,), dtype=np.bool_),
}
if self._use_group_sampling:
ret["group_ids"] = np.array([s["group_id"] for s in sampled])
else:
ret["group_ids"] = np.arange(gt_boxes.shape[0], gt_boxes.shape[0] + len(sampled))
else:
ret = None
return ret
def sample(self, name, num):
if self._use_group_sampling:
group_name = name
ret = self._sampler_dict[group_name].sample(num)
groups_num = [len(l) for l in ret]
return reduce(lambda x, y: x + y, ret), groups_num
else:
ret = self._sampler_dict[name].sample(num)
return ret, np.ones((len(ret),), dtype=np.int64)
def sample_v1(self, name, num):
if isinstance(name, (list, tuple)):
group_name = ", ".join(name)
ret = self._sampler_dict[group_name].sample(num)
groups_num = [len(l) for l in ret]
return reduce(lambda x, y: x + y, ret), groups_num
else:
ret = self._sampler_dict[name].sample(num)
return ret, np.ones((len(ret),), dtype=np.int64)
def sample_class_v2(self, name, num, gt_boxes):
# sample num gt_boxes from gt_database
sampled = self._sampler_dict[name].sample(num)
sampled = copy.deepcopy(sampled)
num_sampled = len(sampled)
# get all boxes: gt_boxes + sp_boxes
num_gt = gt_boxes.shape[0]
gt_boxes_bv = box_np_ops.center_to_corner_box2d(gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, -1])
sp_boxes = np.stack([i["box3d_lidar"] for i in sampled], axis=0) # todo: need modification here
boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()
# unused: get mask
valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)
valid_mask = np.concatenate([valid_mask, np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0)
if self._enable_global_rot: # False
prep.noise_per_object_v3_(boxes, None, valid_mask, 0, 0, self._global_rot_range, num_try=100)
# get all boxes_bev: gt_boxes_bev + sampled_boxes_bev
sp_boxes_new = boxes[gt_boxes.shape[0] : ]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, -1])
total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)
# collision test on bev (stricter than 3d)
coll_mat = prep.box_collision_test(total_bv, total_bv) # todo: too slow here
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
# get valid samples
valid_samples = []
for i in range(num_gt, num_gt + num_sampled): # todo: without multiple try times, sometimes, the overall box num may not meet the requirement
if coll_mat[i].any():
# i-th sampled box is not considered into gt-boxes.
coll_mat[i] = False
coll_mat[:, i] = False
else:
if self._enable_global_rot: # False
sampled[i - num_gt]["box3d_lidar"][:2] = boxes[i, :2] # boxes has got noise
sampled[i - num_gt]["box3d_lidar"][-1] = boxes[i, -1]
sampled[i - num_gt]["rot_transform"] = (boxes[i, -1] - sp_boxes[i - num_gt, -1])
# i-th sampled box is considered into gt-boxes.
valid_samples.append(sampled[i - num_gt])
return valid_samples
def sample_group(self, name, num, gt_boxes, gt_group_ids):
sampled, group_num = self.sample(name, num)
sampled = copy.deepcopy(sampled)
# rewrite sampled group id to avoid duplicated with gt group ids
gid_map = {}
max_gt_gid = np.max(gt_group_ids)
sampled_gid = max_gt_gid + 1
for s in sampled:
gid = s["group_id"]
if gid in gid_map:
s["group_id"] = gid_map[gid]
else:
gid_map[gid] = sampled_gid
s["group_id"] = sampled_gid
sampled_gid += 1
num_gt = gt_boxes.shape[0]
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, -1]
)
sp_boxes = np.stack([i["box3d_lidar"] for i in sampled], axis=0)
sp_group_ids = np.stack([i["group_id"] for i in sampled], axis=0)
valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)
valid_mask = np.concatenate(
[valid_mask, np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0
)
boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()
group_ids = np.concatenate([gt_group_ids, sp_group_ids], axis=0)
if self._enable_global_rot:
# place samples to any place in a circle.
prep.noise_per_object_v3_(
boxes,
None,
valid_mask,
0,
0,
self._global_rot_range,
group_ids=group_ids,
num_try=100,
)
sp_boxes_new = boxes[gt_boxes.shape[0] :]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(
sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, -1]
)
total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)
# coll_mat = collision_test_allbox(total_bv)
coll_mat = prep.box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
valid_samples = []
idx = num_gt
for num in group_num:
if coll_mat[idx : idx + num].any():
coll_mat[idx : idx + num] = False
coll_mat[:, idx : idx + num] = False
else:
for i in range(num):
if self._enable_global_rot:
sampled[idx - num_gt + i]["box3d_lidar"][:2] = boxes[
idx + i, :2
]
sampled[idx - num_gt + i]["box3d_lidar"][-1] = boxes[
idx + i, -1
]
sampled[idx - num_gt + i]["rot_transform"] = (
boxes[idx + i, -1] - sp_boxes[idx + i - num_gt, -1]
)
valid_samples.append(sampled[idx - num_gt + i])
idx += num
return valid_samples
| [
"det3d.core.sampler.preprocess.box_collision_test",
"det3d.core.sampler.preprocess.random_crop_frustum",
"numpy.logical_not",
"numpy.array",
"det3d.core.bbox.box_np_ops.center_to_corner_box2d",
"copy.deepcopy",
"numpy.arange",
"pathlib.Path",
"numpy.max",
"numpy.stack",
"numpy.concatenate",
"n... | [((11129, 11151), 'copy.deepcopy', 'copy.deepcopy', (['sampled'], {}), '(sampled)\n', (11142, 11151), False, 'import copy\n'), ((11290, 11380), 'det3d.core.bbox.box_np_ops.center_to_corner_box2d', 'box_np_ops.center_to_corner_box2d', (['gt_boxes[:, 0:2]', 'gt_boxes[:, 3:5]', 'gt_boxes[:, -1]'], {}), '(gt_boxes[:, 0:2], gt_boxes[:, 3:5],\n gt_boxes[:, -1])\n', (11323, 11380), False, 'from det3d.core.bbox import box_np_ops\n'), ((11396, 11449), 'numpy.stack', 'np.stack', (["[i['box3d_lidar'] for i in sampled]"], {'axis': '(0)'}), "([i['box3d_lidar'] for i in sampled], axis=0)\n", (11404, 11449), True, 'import numpy as np\n'), ((11599, 11644), 'numpy.zeros', 'np.zeros', (['[gt_boxes.shape[0]]'], {'dtype': 'np.bool_'}), '([gt_boxes.shape[0]], dtype=np.bool_)\n', (11607, 11644), True, 'import numpy as np\n'), ((12038, 12141), 'det3d.core.bbox.box_np_ops.center_to_corner_box2d', 'box_np_ops.center_to_corner_box2d', (['sp_boxes_new[:, 0:2]', 'sp_boxes_new[:, 3:5]', 'sp_boxes_new[:, -1]'], {}), '(sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5\n ], sp_boxes_new[:, -1])\n', (12071, 12141), False, 'from det3d.core.bbox import box_np_ops\n'), ((12156, 12206), 'numpy.concatenate', 'np.concatenate', (['[gt_boxes_bv, sp_boxes_bv]'], {'axis': '(0)'}), '([gt_boxes_bv, sp_boxes_bv], axis=0)\n', (12170, 12206), True, 'import numpy as np\n'), ((12278, 12321), 'det3d.core.sampler.preprocess.box_collision_test', 'prep.box_collision_test', (['total_bv', 'total_bv'], {}), '(total_bv, total_bv)\n', (12301, 12321), True, 'from det3d.core.sampler import preprocess as prep\n'), ((12361, 12389), 'numpy.arange', 'np.arange', (['total_bv.shape[0]'], {}), '(total_bv.shape[0])\n', (12370, 12389), True, 'import numpy as np\n'), ((13441, 13463), 'copy.deepcopy', 'copy.deepcopy', (['sampled'], {}), '(sampled)\n', (13454, 13463), False, 'import copy\n'), ((13579, 13599), 'numpy.max', 'np.max', (['gt_group_ids'], {}), '(gt_group_ids)\n', (13585, 13599), True, 'import numpy as np\n'), ((13967, 14057), 'det3d.core.bbox.box_np_ops.center_to_corner_box2d', 'box_np_ops.center_to_corner_box2d', (['gt_boxes[:, 0:2]', 'gt_boxes[:, 3:5]', 'gt_boxes[:, -1]'], {}), '(gt_boxes[:, 0:2], gt_boxes[:, 3:5],\n gt_boxes[:, -1])\n', (14000, 14057), False, 'from det3d.core.bbox import box_np_ops\n'), ((14096, 14149), 'numpy.stack', 'np.stack', (["[i['box3d_lidar'] for i in sampled]"], {'axis': '(0)'}), "([i['box3d_lidar'] for i in sampled], axis=0)\n", (14104, 14149), True, 'import numpy as np\n'), ((14173, 14223), 'numpy.stack', 'np.stack', (["[i['group_id'] for i in sampled]"], {'axis': '(0)'}), "([i['group_id'] for i in sampled], axis=0)\n", (14181, 14223), True, 'import numpy as np\n'), ((14245, 14290), 'numpy.zeros', 'np.zeros', (['[gt_boxes.shape[0]]'], {'dtype': 'np.bool_'}), '([gt_boxes.shape[0]], dtype=np.bool_)\n', (14253, 14290), True, 'import numpy as np\n'), ((14505, 14557), 'numpy.concatenate', 'np.concatenate', (['[gt_group_ids, sp_group_ids]'], {'axis': '(0)'}), '([gt_group_ids, sp_group_ids], axis=0)\n', (14519, 14557), True, 'import numpy as np\n'), ((14990, 15093), 'det3d.core.bbox.box_np_ops.center_to_corner_box2d', 'box_np_ops.center_to_corner_box2d', (['sp_boxes_new[:, 0:2]', 'sp_boxes_new[:, 3:5]', 'sp_boxes_new[:, -1]'], {}), '(sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5\n ], sp_boxes_new[:, -1])\n', (15023, 15093), False, 'from det3d.core.bbox import box_np_ops\n'), ((15130, 15180), 'numpy.concatenate', 'np.concatenate', (['[gt_boxes_bv, sp_boxes_bv]'], {'axis': '(0)'}), '([gt_boxes_bv, sp_boxes_bv], axis=0)\n', (15144, 15180), True, 'import numpy as np\n'), ((15253, 15296), 'det3d.core.sampler.preprocess.box_collision_test', 'prep.box_collision_test', (['total_bv', 'total_bv'], {}), '(total_bv, total_bv)\n', (15276, 15296), True, 'from det3d.core.sampler import preprocess as prep\n'), ((15312, 15340), 'numpy.arange', 'np.arange', (['total_bv.shape[0]'], {}), '(total_bv.shape[0])\n', (15321, 15340), True, 'import numpy as np\n'), ((3749, 3772), 'det3d.core.sampler.preprocess.BatchSampler', 'prep.BatchSampler', (['v', 'k'], {}), '(v, k)\n', (3766, 3772), True, 'from det3d.core.sampler import preprocess as prep\n'), ((7460, 7500), 'numpy.concatenate', 'np.concatenate', (['sampled_gt_boxes'], {'axis': '(0)'}), '(sampled_gt_boxes, axis=0)\n', (7474, 7500), True, 'import numpy as np\n'), ((11808, 11906), 'det3d.core.sampler.preprocess.noise_per_object_v3_', 'prep.noise_per_object_v3_', (['boxes', 'None', 'valid_mask', '(0)', '(0)', 'self._global_rot_range'], {'num_try': '(100)'}), '(boxes, None, valid_mask, 0, 0, self.\n _global_rot_range, num_try=100)\n', (11833, 11906), True, 'from det3d.core.sampler import preprocess as prep\n'), ((14660, 14779), 'det3d.core.sampler.preprocess.noise_per_object_v3_', 'prep.noise_per_object_v3_', (['boxes', 'None', 'valid_mask', '(0)', '(0)', 'self._global_rot_range'], {'group_ids': 'group_ids', 'num_try': '(100)'}), '(boxes, None, valid_mask, 0, 0, self.\n _global_rot_range, group_ids=group_ids, num_try=100)\n', (14685, 14779), True, 'from det3d.core.sampler import preprocess as prep\n'), ((4082, 4120), 'det3d.utils.check.shape_mergeable', 'shape_mergeable', (['global_rot_range', '[2]'], {}), '(global_rot_range, [2])\n', (4097, 4120), False, 'from det3d.utils.check import shape_mergeable\n'), ((4156, 4205), 'numpy.abs', 'np.abs', (['(global_rot_range[0] - global_rot_range[1])'], {}), '(global_rot_range[0] - global_rot_range[1])\n', (4162, 4205), True, 'import numpy as np\n'), ((5567, 5593), 'numpy.max', 'np.max', (['sampled_nums_group'], {}), '(sampled_nums_group)\n', (5573, 5593), True, 'import numpy as np\n'), ((8829, 8888), 'det3d.core.bbox.box_np_ops.box3d_to_bbox', 'box_np_ops.box3d_to_bbox', (['sampled_gt_boxes', 'rect', 'Trv2c', 'P2'], {}), '(sampled_gt_boxes, rect, Trv2c, P2)\n', (8853, 8888), False, 'from det3d.core.bbox import box_np_ops\n'), ((8921, 8973), 'det3d.core.sampler.preprocess.random_crop_frustum', 'prep.random_crop_frustum', (['gt_bboxes', 'rect', 'Trv2c', 'P2'], {}), '(gt_bboxes, rect, Trv2c, P2)\n', (8945, 8973), True, 'from det3d.core.sampler import preprocess as prep\n'), ((9528, 9566), 'numpy.array', 'np.array', (["[s['name'] for s in sampled]"], {}), "([s['name'] for s in sampled])\n", (9536, 9566), True, 'import numpy as np\n'), ((9598, 9642), 'numpy.array', 'np.array', (["[s['difficulty'] for s in sampled]"], {}), "([s['difficulty'] for s in sampled])\n", (9606, 9642), True, 'import numpy as np\n'), ((9716, 9753), 'numpy.concatenate', 'np.concatenate', (['s_points_list'], {'axis': '(0)'}), '(s_points_list, axis=0)\n', (9730, 9753), True, 'import numpy as np\n'), ((9783, 9822), 'numpy.ones', 'np.ones', (['(num_sampled,)'], {'dtype': 'np.bool_'}), '((num_sampled,), dtype=np.bool_)\n', (9790, 9822), True, 'import numpy as np\n'), ((9914, 9956), 'numpy.array', 'np.array', (["[s['group_id'] for s in sampled]"], {}), "([s['group_id'] for s in sampled])\n", (9922, 9956), True, 'import numpy as np\n'), ((10357, 10388), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'ret'], {}), '(lambda x, y: x + y, ret)\n', (10363, 10388), False, 'from functools import partial, reduce\n'), ((10780, 10811), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'ret'], {}), '(lambda x, y: x + y, ret)\n', (10786, 10811), False, 'from functools import partial, reduce\n'), ((11498, 11542), 'numpy.concatenate', 'np.concatenate', (['[gt_boxes, sp_boxes]'], {'axis': '(0)'}), '([gt_boxes, sp_boxes], axis=0)\n', (11512, 11542), True, 'import numpy as np\n'), ((11694, 11738), 'numpy.ones', 'np.ones', (['[sp_boxes.shape[0]]'], {'dtype': 'np.bool_'}), '([sp_boxes.shape[0]], dtype=np.bool_)\n', (11701, 11738), True, 'import numpy as np\n'), ((14353, 14397), 'numpy.ones', 'np.ones', (['[sp_boxes.shape[0]]'], {'dtype': 'np.bool_'}), '([sp_boxes.shape[0]], dtype=np.bool_)\n', (14360, 14397), True, 'import numpy as np\n'), ((14433, 14477), 'numpy.concatenate', 'np.concatenate', (['[gt_boxes, sp_boxes]'], {'axis': '(0)'}), '([gt_boxes, sp_boxes], axis=0)\n', (14447, 14477), True, 'import numpy as np\n'), ((4948, 4993), 'numpy.sum', 'np.sum', (['[(n == class_name) for n in gt_names]'], {}), '([(n == class_name) for n in gt_names])\n', (4954, 4993), True, 'import numpy as np\n'), ((5019, 5053), 'numpy.round', 'np.round', (['(self._rate * sampled_num)'], {}), '(self._rate * sampled_num)\n', (5027, 5053), True, 'import numpy as np\n'), ((6841, 6899), 'numpy.concatenate', 'np.concatenate', (['[avoid_coll_boxes, sampled_gt_box]'], {'axis': '(0)'}), '([avoid_coll_boxes, sampled_gt_box], axis=0)\n', (6855, 6899), True, 'import numpy as np\n'), ((9214, 9226), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (9220, 9226), True, 'import numpy as np\n'), ((6686, 6743), 'numpy.stack', 'np.stack', (["[s['box3d_lidar'] for s in sampled_cls]"], {'axis': '(0)'}), "([s['box3d_lidar'] for s in sampled_cls], axis=0)\n", (6694, 6743), True, 'import numpy as np\n'), ((7286, 7346), 'numpy.concatenate', 'np.concatenate', (['[total_group_ids, sampled_group_ids]'], {'axis': '(0)'}), '([total_group_ids, sampled_group_ids], axis=0)\n', (7300, 7346), True, 'import numpy as np\n'), ((8124, 8193), 'det3d.core.bbox.box_np_ops.rotation_points_single_angle', 'box_np_ops.rotation_points_single_angle', (['s_points[:, :4]', 'rot'], {'axis': '(2)'}), '(s_points[:, :4], rot, axis=2)\n', (8163, 8193), False, 'from det3d.core.bbox import box_np_ops\n'), ((7189, 7243), 'numpy.stack', 'np.stack', (["[s['group_id'] for s in sampled_cls]"], {'axis': '(0)'}), "([s['group_id'] for s in sampled_cls], axis=0)\n", (7197, 7243), True, 'import numpy as np\n'), ((9105, 9166), 'det3d.core.sampler.preprocess.mask_points_in_corners', 'prep.mask_points_in_corners', (['s_points', 'crop_frustums[i:i + 1]'], {}), '(s_points, crop_frustums[i:i + 1])\n', (9132, 9166), True, 'from det3d.core.sampler import preprocess as prep\n'), ((9352, 9372), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (9366, 9372), True, 'import numpy as np\n'), ((7057, 7093), 'numpy.array', 'np.array', (["sampled_cls[0]['group_id']"], {}), "(sampled_cls[0]['group_id'])\n", (7065, 7093), True, 'import numpy as np\n'), ((7881, 7904), 'pathlib.Path', 'pathlib.Path', (['root_path'], {}), '(root_path)\n', (7893, 7904), False, 'import pathlib\n')] |
# -*- coding: utf-8 -*-
"""Developer convenience functions for ibs (detections).
TODO: need to split up into sub modules:
consistency_checks
feasibility_fixes
move the export stuff to dbio
then there are also convineience functions that need to be ordered at least
within this file
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip, range
from os.path import expanduser, join, abspath
import numpy as np
import vtool as vt
import utool as ut
import cv2
from ibeis.control import controller_inject
import tqdm
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[other.detectfuncs]')
SAMPLES = 1000
AP_SAMPLE_POINTS = [_ / float(SAMPLES) for _ in range(0, SAMPLES + 1)]
# Must import class before injection
CLASS_INJECT_KEY, register_ibs_method = (
controller_inject.make_ibs_register_decorator(__name__))
def _resize(image, t_width=None, t_height=None, verbose=False):
if verbose:
print('RESIZING WITH t_width = %r and t_height = %r' % (t_width, t_height, ))
height, width = image.shape[:2]
if t_width is None and t_height is None:
return image
elif t_width is not None and t_height is not None:
pass
elif t_width is None:
t_width = (width / height) * float(t_height)
elif t_height is None:
t_height = (height / width) * float(t_width)
t_width, t_height = float(t_width), float(t_height)
t_width, t_height = int(np.around(t_width)), int(np.around(t_height))
assert t_width > 0 and t_height > 0, 'target size too small'
assert t_width <= width * 10 and t_height <= height * 10, 'target size too large (capped at 1000%)'
# interpolation = cv2.INTER_LANCZOS4
interpolation = cv2.INTER_LINEAR
return cv2.resize(image, (t_width, t_height), interpolation=interpolation)
def simple_code(label):
from ibeis.constants import YAWALIAS, SPECIES_MAPPING
if label == 'ignore':
return 'IGNORE'
for key in SPECIES_MAPPING:
if key in label:
species_code, species_nice = SPECIES_MAPPING[key]
while species_code is None:
species_code, species_nice = SPECIES_MAPPING[species_nice]
assert species_code is not None
label = label.replace(key, species_code)
for key in sorted(YAWALIAS.keys(), key=len, reverse=True):
value = YAWALIAS[key]
label = label.replace(key, value)
return label
##########################################################################################
def general_precision_recall_algo(ibs, label_list, confidence_list, category='positive', samples=SAMPLES, **kwargs):
def errors(zipped, conf, category):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for index, (label, confidence) in enumerate(zipped):
if label == category:
if conf <= confidence:
tp += 1
else:
fn += 1
else:
if conf <= confidence:
fp += 1
else:
tn += 1
return tp, tn, fp, fn
zipped = list(zip(label_list, confidence_list))
conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
conf_dict = {}
for conf in conf_list:
conf_dict[conf] = errors(zipped, conf, category)
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
tpr_list = [0.0, 1.0]
fpr_list = [0.0, 1.0]
# conf_list_ = []
# pr_list = []
# re_list = []
# tpr_list = []
# fpr_list = []
for conf in sorted(conf_dict.keys(), reverse=True):
error_list = conf_dict[conf]
tp, tn, fp, fn = error_list
try:
pr = tp / (tp + fp)
re = tp / (tp + fn)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
tpr_list.append(tpr)
fpr_list.append(fpr)
except ZeroDivisionError:
print('Zero division error (%r) - tp: %r tn: %r fp: %r fn: %r' % (conf, tp, tn, fp, fn, ))
return conf_list_, pr_list, re_list, tpr_list, fpr_list
def general_interpolate_precision_recall(conf_list, re_list, pr_list):
conf_list_, re_list_, pr_list_ = [], [], []
zipped = zip(re_list, conf_list, pr_list)
zipped = sorted(zipped, reverse=True)
max_pr = None
for re, conf, pr in zipped:
if max_pr is None or pr > max_pr:
if max_pr is not None:
conf_list_.append(np.nan)
re_list_.append(re)
pr_list_.append(max_pr)
max_pr = pr
if pr < max_pr:
pr = max_pr
conf_list_.append(conf)
re_list_.append(re)
pr_list_.append(pr)
return conf_list_, re_list_, pr_list_
def general_identify_operating_point(conf_list, x_list, y_list, target=(1.0, 1.0)):
best_length = np.inf
best_conf_list = []
best_x_list = []
best_y_list = []
tx, ty = target
for conf, x, y in sorted(zip(conf_list, x_list, y_list)):
x_ = x
y_ = y
x_ = (x_ - tx)
y_ = (y_ - ty)
length = np.sqrt(x_ * x_ + y_ * y_)
if length < best_length:
best_length = length
best_conf_list = [conf]
best_x_list = [x]
best_y_list = [y]
elif length == best_length:
flag_list = [
abs(best_conf - conf) > 0.01
for best_conf in best_conf_list
]
if False in flag_list:
continue
best_conf_list.append(conf)
best_x_list.append(x)
best_y_list.append(y)
return best_conf_list, best_x_list, best_y_list, best_length
def general_area_best_conf(conf_list, x_list, y_list, label='Unknown', color='b',
marker='o', plot_point=True, interpolate=True,
target=(1.0, 1.0), target_recall=None, **kwargs):
import matplotlib.pyplot as plt
zipped = list(sorted(zip(x_list, y_list, conf_list)))
x_list = [_[0] for _ in zipped]
y_list = [_[1] for _ in zipped]
conf_list = [_[2] for _ in zipped]
if interpolate:
conf_list, x_list, y_list = general_interpolate_precision_recall(
conf_list,
x_list,
y_list
)
if interpolate:
ap_list = []
for AP_POINT in AP_SAMPLE_POINTS:
for re, pr in sorted(zip(x_list, y_list)):
if AP_POINT <= re:
ap_list.append(pr)
break
ap = sum(ap_list) / len(ap_list)
else:
ap = np.trapz(y_list, x=x_list)
tup1 = general_identify_operating_point(conf_list, x_list, y_list, target=target)
best_conf_list, best_x_list, best_y_list, best_length = tup1
tup2 = None
if target_recall is not None:
for x, y, conf in sorted(zip(x_list, y_list, conf_list)):
if target_recall <= x and not np.isnan(conf):
tup2 = [conf], [x], [y], None
break
if len(best_conf_list) > 1:
print('WARNING: Multiple best operating points found %r' % (best_conf_list, ))
assert len(best_conf_list) > 0
best_conf = best_conf_list[0]
if interpolate:
# label = '%s [AP = %0.02f, OP = %0.02f]' % (label, ap * 100.0, best_conf)
label = '%s [AP = %0.02f]' % (label, ap * 100.0)
else:
label = '%s [AUC = %0.02f]' % (label, ap * 100.0, )
linestyle = '--' if kwargs.get('line_dotted', False) else '-'
plt.plot(x_list, y_list, color=color, linestyle=linestyle, label=label)
if plot_point:
plt.plot(best_x_list, best_y_list, color=color, marker=marker)
return ap, best_conf, tup1, tup2
def general_confusion_matrix_algo(label_correct_list, label_predict_list,
category_list, category_mapping,
fig_, axes_, fuzzy_dict=None, conf=None,
conf_list=None, size=10, **kwargs):
# import matplotlib.colors as colors
import matplotlib.pyplot as plt
suppressed_label = 'SUP'
if conf is not None:
assert conf_list is not None
category_list.append(suppressed_label)
index = len(category_list) - 1
category_mapping[suppressed_label] = index
if fuzzy_dict is not None:
fuzzy_dict[index] = set([])
if category_mapping is not None:
index_list = [category_mapping[category] for category in category_list]
zipped = list(sorted(zip(index_list, category_list)))
category_list = [_[1] for _ in zipped]
# Get the number of categories
num_categories = len(category_list)
# Build the confusion matrix
confusion_matrix = np.zeros((num_categories, num_categories))
zipped = zip(label_correct_list, label_predict_list)
suppressed = 0.0
suppressed_correct = 0.0
suppressed_fuzzy = 0.0
for index, (label_correct, label_predict) in enumerate(zipped):
if conf is not None:
conf_ = conf_list[index]
if conf_ < conf:
if label_correct != label_predict:
suppressed_correct += 1
if fuzzy_dict is not None:
x = category_mapping[label_correct]
y = category_mapping[label_predict]
if not (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
suppressed_fuzzy += 1
label_predict = suppressed_label
suppressed += 1
# Perform any mapping that needs to be done
correct_ = category_mapping[label_correct]
predict_ = category_mapping[label_predict]
# Add to the confidence matrix
confusion_matrix[correct_][predict_] += 1
# Normalize the confusion matrix using the rows
row_normalizer = np.sum(confusion_matrix, axis=1)
confusion_normalized = np.array((confusion_matrix.T / row_normalizer).T)
# Draw the confusion matrix
res = axes_.imshow(confusion_normalized, cmap=plt.cm.jet,
interpolation='nearest')
correct = suppressed_correct
fuzzy = suppressed_fuzzy
total = 0.0
for x in range(num_categories):
for y in range(num_categories):
number = int(confusion_matrix[x][y])
if x == y:
correct += number
if fuzzy_dict is not None and (y in fuzzy_dict[x] or x in fuzzy_dict[y]):
fuzzy += number
total += number
axes_.annotate(
str(number), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
size=size,
)
cb = fig_.colorbar(res) # NOQA
cb.set_clim(0.0, 1.0)
plt.xticks(np.arange(num_categories), category_list, rotation=90)
plt.yticks(np.arange(num_categories), category_list)
margin_small = 0.1
margin_large = 0.9
plt.subplots_adjust(
left=margin_small,
right=margin_large,
bottom=margin_small,
top=margin_large
)
correct_rate = correct / total
fuzzy_rate = fuzzy / total
return correct_rate, fuzzy_rate
def general_intersection_over_union(bbox1, bbox2):
intersection_xtl = max(bbox1['xtl'], bbox2['xtl'])
intersection_ytl = max(bbox1['ytl'], bbox2['ytl'])
intersection_xbr = min(bbox1['xbr'], bbox2['xbr'])
intersection_ybr = min(bbox1['ybr'], bbox2['ybr'])
intersection_w = intersection_xbr - intersection_xtl
intersection_h = intersection_ybr - intersection_ytl
if intersection_w <= 0 or intersection_h <= 0:
return 0.0
intersection = intersection_w * intersection_h
union = (bbox1['width'] * bbox1['height']) + (bbox2['width'] * bbox2['height']) - intersection
return intersection / union
def general_overlap(gt_list, pred_list):
overlap = np.zeros((len(gt_list), len(pred_list)), dtype=np.float32)
for i, gt in enumerate(gt_list):
for j, pred in enumerate(pred_list):
overlap[i, j] = general_intersection_over_union(gt, pred)
return overlap
def general_tp_fp_fn(gt_list, pred_list, min_overlap, **kwargs):
overlap = general_overlap(gt_list, pred_list)
num_gt, num_pred = overlap.shape
if num_gt == 0:
tp = 0.0
fp = num_pred
fn = 0.0
elif num_pred == 0:
tp = 0.0
fp = 0.0
fn = num_gt
else:
pred_index_list = range(num_pred)
gt_index_list = np.argmax(overlap, axis=0)
max_overlap_list = np.max(overlap, axis=0)
confidence_list = [
pred.get('confidence', None)
for pred in pred_list
]
assert None not in confidence_list
zipped = zip(
confidence_list,
max_overlap_list,
pred_index_list,
gt_index_list
)
pred_conf_list = [
(
confidence,
max_overlap,
pred_index,
gt_index,
)
for confidence, max_overlap, pred_index, gt_index in zipped
]
pred_conf_list = sorted(pred_conf_list, reverse=True)
assignment_dict = {}
for pred_conf, max_overlap, pred_index, gt_index in pred_conf_list:
if max_overlap > min_overlap:
if gt_index not in assignment_dict:
assignment_dict[gt_index] = pred_index
tp = len(assignment_dict.keys())
fp = num_pred - tp
fn = num_gt - tp
assert tp >= 0
assert fp >= 0
assert fn >= 0
return tp, fp, fn
def general_get_imageset_gids(ibs, imageset_text, unique=True, **kwargs):
imageset_id = ibs.get_imageset_imgsetids_from_text(imageset_text)
test_gid_list = ibs.get_imageset_gids(imageset_id)
if unique:
test_gid_list = list(set(test_gid_list))
return test_gid_list
def general_parse_gt_annots(ibs, aid_list, include_parts=True, species_mapping={},
**kwargs):
gid_list = ibs.get_annot_gids(aid_list)
species_set = set([])
gt_list = []
for gid, aid in zip(gid_list, aid_list):
width, height = ibs.get_image_sizes(gid)
bbox = ibs.get_annot_bboxes(aid)
theta = ibs.get_annot_thetas(aid)
# Transformation matrix
R = vt.rotation_around_bbox_mat3x3(theta, bbox)
# Get verticies of the annotation polygon
verts = vt.verts_from_bbox(bbox, close=True)
# Rotate and transform vertices
xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
new_verts = np.round(trans_pts).astype(np.int).T.tolist()
x_points = [pt[0] for pt in new_verts]
y_points = [pt[1] for pt in new_verts]
xtl = int(min(x_points))
xbr = int(max(x_points))
ytl = int(min(y_points))
ybr = int(max(y_points))
bbox = (xtl, ytl, xbr - xtl, ybr - ytl)
species = ibs.get_annot_species_texts(aid)
viewpoint = ibs.get_annot_viewpoints(aid)
interest = ibs.get_annot_interest(aid)
temp = {
'gid' : gid,
'aid' : aid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : species_mapping.get(species, species),
'viewpoint' : viewpoint,
'interest' : interest,
'confidence' : 1.0,
}
species_set.add(temp['class'])
gt_list.append(temp)
part_rowid_list = ibs.get_annot_part_rowids(aid)
if include_parts:
for part_rowid in part_rowid_list:
bbox = ibs.get_part_bboxes(part_rowid)
theta = ibs.get_part_thetas(part_rowid)
# Transformation matrix
R = vt.rotation_around_bbox_mat3x3(theta, bbox)
# Get verticies of the annotation polygon
verts = vt.verts_from_bbox(bbox, close=True)
# Rotate and transform vertices
xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T)
trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts))
new_verts = np.round(trans_pts).astype(np.int).T.tolist()
x_points = [pt[0] for pt in new_verts]
y_points = [pt[1] for pt in new_verts]
xtl = int(min(x_points))
xbr = int(max(x_points))
ytl = int(min(y_points))
ybr = int(max(y_points))
bbox = (xtl, ytl, xbr - xtl, ybr - ytl)
tag = ibs.get_part_tag_text(part_rowid)
if tag is None:
tag = species
else:
tag = '%s+%s' % (species, tag, )
temp = {
'gid' : gid,
'aid' : aid,
'part_id' : part_rowid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : tag,
'viewpoint' : viewpoint,
'interest' : interest,
'confidence' : 1.0,
}
species_set.add(temp['class'])
gt_list.append(temp)
return gt_list, species_set
def general_parse_gt(ibs, test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
gid_list = ibs.get_image_gids_from_uuid(uuid_list)
species_set = set([])
gt_dict = {}
for gid, uuid in zip(gid_list, uuid_list):
aid_list = ibs.get_image_aids(gid)
gt_list, species_set = general_parse_gt_annots(ibs, aid_list, **kwargs)
species_set = species_set | species_set
gt_dict[uuid] = gt_list
# print('General Parse GT species_set = %r' % (species_set, ))
return gt_dict
##########################################################################################
def localizer_parse_pred(ibs, test_gid_list=None, species_mapping={}, **kwargs):
depc = ibs.depc_image
if 'feature2_algo' not in kwargs:
kwargs['feature2_algo'] = 'resnet'
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
size_list = ibs.get_image_sizes(test_gid_list)
# Unsure, but we need to call this multiple times? Lazy loading bug?
bboxes_list = depc.get_property('localizations', test_gid_list, 'bboxes', config=kwargs)
# Get actual data
bboxes_list = depc.get_property('localizations', test_gid_list, 'bboxes', config=kwargs)
thetas_list = depc.get_property('localizations', test_gid_list, 'thetas', config=kwargs)
confss_list = depc.get_property('localizations', test_gid_list, 'confs', config=kwargs)
classs_list = depc.get_property('localizations', test_gid_list, 'classes', config=kwargs)
length_list = [ len(bbox_list) for bbox_list in bboxes_list ]
# Establish primitives
test_gids_list = [ [test_gid] * length for test_gid, length in zip(test_gid_list, length_list) ]
sizes_list = [ [size] * length for size, length in zip(size_list, length_list) ]
keeps_list = [ [True] * length for length in length_list ]
features_list = [ [None] * length for length in length_list ]
features_lazy_list = [ [None] * length for length in length_list ]
viewpoints_list = [ [None] * length for length in length_list ]
interests_list = [ [None] * length for length in length_list ]
# Get features
if kwargs.get('features', False):
features_list = depc.get_property('localizations_features', test_gid_list,
'vector', config=kwargs)
if kwargs.get('features_lazy', False):
from functools import partial
def features_lazy_func(gid, offset):
vector_list = depc.get_property('localizations_features', gid,
'vector', config=kwargs)
vector = vector_list[offset]
return vector
features_lazy_list = [
[
partial(features_lazy_func, test_gid, test_offset)
for test_offset in range(length)
]
for test_gid, length in zip(test_gid_list, length_list)
]
# Get species and viewpoints labels
if kwargs.get('labels', False):
classs_list = depc.get_property('localizations_labeler', test_gid_list,
'species', config=kwargs)
viewpoints_list = depc.get_property('localizations_labeler', test_gid_list,
'viewpoint', config=kwargs)
# Get updated confidences for boxes
if kwargs.get('classify', False):
print('Using alternate classifications')
# depc.delete_property('localizations_classifier', test_gid_list, config=kwargs)
confss_list = depc.get_property('localizations_classifier', test_gid_list,
'score', config=kwargs)
# Get updated confidences for boxes
if kwargs.get('interest', False):
print('Using alternate AoI interest flags')
interests_list = depc.get_property('localizations_classifier', test_gid_list,
'score', config=kwargs)
# Reformat results for json
zipped_list_list = zip(
keeps_list,
test_gids_list,
sizes_list,
bboxes_list,
thetas_list,
confss_list,
classs_list,
viewpoints_list,
interests_list,
features_list,
features_lazy_list,
)
results_list = [
[
{
'gid' : test_gid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'theta' : theta,
'confidence' : conf,
'class' : species_mapping.get(class_, class_),
'viewpoint' : viewpoint,
'interest' : None if interest is None else interest >= 0.84,
'feature' : feature,
'feature_lazy' : feature_lazy,
}
for keep_, test_gid, (width, height), bbox, theta, conf, class_, viewpoint, interest, feature, feature_lazy in zip(*zipped_list)
if keep_
]
for zipped_list in zipped_list_list
]
pred_dict = {
uuid_ : result_list
for uuid_, result_list in zip(uuid_list, results_list)
}
return pred_dict
def localizer_precision_recall_algo(ibs, samples=SAMPLES, test_gid_list=None,
**kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
dict_[image_uuid] = [
val
for val in dict_[image_uuid]
if val.get('class', None) in species_set_
]
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, **kwargs)
conf_list, tp_list, fp_list, total = values
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
for conf, tp, fp in zip(conf_list, tp_list, fp_list):
try:
pr = tp / (tp + fp)
re = tp / total
except ZeroDivisionError:
continue
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
return conf_list_, pr_list, re_list
def localizer_assign(gt_list, pred, min_overlap):
best_overlap = min_overlap
best_index = None
for index, gt in enumerate(gt_list):
if gt['class'] != pred['class']:
continue
overlap = general_intersection_over_union(gt, pred)
if overlap < best_overlap:
continue
best_overlap = overlap
best_index = index
if best_index is None:
best_overlap = None
return best_index, best_overlap
def localizer_assignments(pred_list, gt_list, gt_list_=[], min_overlap=0.5):
pred_list = sorted(pred_list, key=lambda pred: pred['confidence'], reverse=True)
match_list = []
for pred in pred_list:
flag = False
match_index, best_overlap = localizer_assign(gt_list, pred, min_overlap)
match_index_, best_overlap_ = localizer_assign(gt_list_, pred, min_overlap)
if match_index is not None:
flag = True
del gt_list[match_index]
elif match_index_ is not None:
flag = None
if flag is not None:
match_list += [
(pred['confidence'], flag, match_index, best_overlap)
]
return match_list
def localizer_tp_fp(uuid_list, gt_dict, pred_dict, min_overlap=0.5, **kwargs):
total = 0.0
interest_species_set = set([])
species_set = kwargs.get('species_set', None)
if species_set is not None:
for species in species_set:
if species.startswith('!'):
species = species.lstrip('!')
interest_species_set.add(species)
match_list = []
for image_uuid in uuid_list:
gt_list = []
gt_list_ = []
pred_list = pred_dict[image_uuid]
for gt in gt_dict[image_uuid]:
species = gt['class']
interest = gt['interest']
if species in interest_species_set and not interest:
gt_list_.append(gt)
else:
gt_list.append(gt)
total += len(gt_list)
# Match predictions
match_list_ = localizer_assignments(pred_list, gt_list, gt_list_, min_overlap)
for match_ in match_list_:
match_list.append(match_)
# sort matches by confidence from high to low
match_list = sorted(match_list, key=lambda match: match[0], reverse=True)
conf_list = []
tp_list = []
fp_list = []
tp_counter = 0
fp_counter = 0
for conf, flag, index, overlap in match_list:
if flag:
tp_counter += 1
else:
fp_counter += 1
conf_list.append(conf)
tp_list.append(tp_counter)
fp_list.append(fp_counter)
# print('\t tps [:10] : %r' % (tp_list[:10], ))
# print('\t fps [:10] : %r' % (fp_list[:10], ))
# print('\t con [:10] : %r' % (conf_list[:10], ))
# print('\t tps [-10:] : %r' % (tp_list[-10:], ))
# print('\t fps [-10:] : %r' % (fp_list[-10:], ))
# print('\t con [-10:] : %r' % (conf_list[-10:], ))
# print('\t num_annotations: %r' % (total, ))
return conf_list, tp_list, fp_list, total
def localizer_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list = localizer_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def _ignore_filter_identity_func(*args, **kwargs):
return False
def localizer_iou_recall_algo(ibs, samples=100, test_gid_list=None,
ignore_filter_func=None, **kwargs):
assert 'min_overlap' not in kwargs
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
if ignore_filter_func is None:
ignore_filter_func = _ignore_filter_identity_func
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
temp = []
for val in dict_[image_uuid]:
if val.get('class', None) not in species_set_:
continue
if ignore_filter_func(ibs, val):
continue
temp.append(val)
dict_[image_uuid] = temp
target = (1.0, 1.0)
iou_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
conf_list_ = []
iou_list_ = []
recall_list = []
for iou in tqdm.tqdm(iou_list):
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, min_overlap=iou, **kwargs)
conf_list, tp_list, fp_list, total = values
conf_list_ = []
pr_list = []
re_list = []
for conf, tp, fp in zip(conf_list, tp_list, fp_list):
try:
pr = tp / (tp + fp)
re = tp / total
except ZeroDivisionError:
continue
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
best_tup = general_identify_operating_point(conf_list, re_list, pr_list, target=target)
best_conf_list, best_re_list, best_pr_list, best_length = best_tup
if len(best_conf_list) > 1:
print('WARNING: Multiple best operating points found %r' % (best_conf_list, ))
assert len(best_conf_list) > 0
best_re_index = np.argmax(best_re_list)
best_re = best_re_list[best_re_index]
best_conf = best_conf_list[best_re_index]
conf_list_.append(best_conf)
iou_list_.append(iou)
recall_list.append(best_re)
return conf_list_, iou_list_, recall_list
def localizer_iou_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing IoU-Recall for: %r' % (label, ))
conf_list, iou_list, recall_list = localizer_iou_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, iou_list, recall_list,
interpolate=False, **kwargs)
# def localizer_iou_precision_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, iou_list, pr_list, re_list = localizer_iou_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, iou_list, re_list, **kwargs)
def localizer_confusion_matrix_algo_plot(ibs, label=None, target_conf=None,
test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
test_uuid_list = ibs.get_image_uuids(test_gid_list)
print('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
print('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
species_set = kwargs.get('species_set', None)
if species_set is not None:
# filter out any prefix ! to denote interest only
species_set_ = set([ species.lstrip('!') for species in species_set ])
dict_list = [
(gt_dict, 'Ground-Truth'),
(pred_dict, 'Predictions'),
]
for dict_, dict_tag in dict_list:
for image_uuid in dict_:
dict_[image_uuid] = [
val
for val in dict_[image_uuid]
if val.get('class', None) in species_set_
]
values = localizer_tp_fp(test_uuid_list, gt_dict, pred_dict, **kwargs)
conf_list, tp_list, fp_list, total = values
best_conf = None
best_accuracy = None
best_args = None
for conf, tp, fp in sorted(zip(conf_list, tp_list, fp_list)):
fn = total - tp
accuracy = tp / (tp + fp + fn)
if target_conf is None:
if best_accuracy is None or accuracy > best_accuracy:
best_conf = conf
best_accuracy = accuracy
best_args = (tp, fp, fn)
else:
if target_conf <= conf:
best_conf = conf
best_accuracy = accuracy
best_args = (tp, fp, fn)
break
try:
assert None not in [best_conf, best_accuracy, best_args]
except AssertionError:
ut.embed()
return np.nan, (np.nan, None)
print('Processing Confusion Matrix for: %r (Conf = %0.02f, Accuracy = %0.02f)' % (label, best_conf, best_accuracy, ))
tp, fp, fn = best_args
label_list = []
prediction_list = []
for _ in range(int(tp)):
label_list.append('positive')
prediction_list.append('positive')
for _ in range(int(fp)):
label_list.append('negative')
prediction_list.append('positive')
for _ in range(int(fn)):
label_list.append('positive')
prediction_list.append('negative')
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
values = general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, size=20, **kwargs)
return best_conf, values
@register_ibs_method
def localizer_precision_recall(ibs, config_dict=None, output_path=None,
test_gid_list=None, **kwargs):
if config_dict is None:
if test_gid_list is not None:
print('Using %d test gids' % (len(test_gid_list), ))
# species_mapping = { # NOQA
# 'giraffe_masai' : 'giraffe',
# 'giraffe_reticulated' : 'giraffe',
# 'zebra_grevys' : 'zebra',
# 'zebra_plains' : 'zebra',
# }
config_dict = {
# 'seaturtle': (
# [
# {'label': 'Sea Turtle', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green', 'turtle_hawksbill'])},
# {'label': 'Sea Turtle Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green+head', 'turtle_hawksbill+head'])},
# {'label': 'Green', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green'])},
# {'label': 'Green Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_green+head'])},
# {'label': 'Hawksbill', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['turtle_hawksbill+head'])},
# ],
# {'BEST_INDEX': 0},
# ),
# '!seaturtle': (
# [
# {'label': '! Sea Turtle', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green', '!turtle_hawksbill'])},
# {'label': '! Sea Turtle Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green+head', '!turtle_hawksbill+head'])},
# {'label': '! Green', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green'])},
# {'label': '! Green Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_green+head'])},
# {'label': '! Hawksbill', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_hawksbill'])},
# {'label': '! Hawksbill Heads', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.2, 'species_set' : set(['!turtle_hawksbill+head'])},
# ],
# {'BEST_INDEX': 0},
# ),
# 'hawksbills': (
# [
# {'label': 'Hawksbill NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['turtle_hawksbill'])},
# {'label': 'Hawksbill NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['turtle_hawksbill'])},
# ],
# {},
# ),
# 'hawsbills+heads': (
# [
# {'label': 'Hawksbill Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['turtle_hawksbill+head'])},
# {'label': 'Hawksbill Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seaturtle', 'weight_filepath' : 'seaturtle', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['turtle_hawksbill+head'])},
# ],
# {},
# ),
# 'hammerhead': (
# [
# {'label': 'Hammerhead NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['shark_hammerhead'])},
# ],
# {},
# ),
# '!hammerhead': (
# [
# {'label': 'Hammerhead NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!shark_hammerhead'])},
# {'label': 'Hammerhead ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'hammerhead', 'weight_filepath' : 'hammerhead', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!shark_hammerhead'])},
# ],
# {'offset_color': 1},
# ),
# 'ggr2-giraffe-lightnet': (
# [
# {'label': 'Giraffe NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# ],
# {},
# ),
# 'ggr2-zebra-lightnet': (
# [
# {'label': 'Zebra NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# ],
# {},
# ),
# 'ggr2-!giraffe-lightnet': (
# [
# {'label': 'Giraffe ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# ],
# {},
# ),
# 'ggr2-!zebra-lightnet': (
# [
# {'label': 'Zebra ! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'ggr2', 'weight_filepath' : 'ggr2', 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# ],
# {},
# ),
# 'ggr2-giraffe-azure': (
# [
# {'label': 'Giraffe NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# {'label': 'Giraffe NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['giraffe'])},
# ],
# {},
# ),
# 'ggr2-zebra-azure': (
# [
# {'label': 'Zebra NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# {'label': 'Zebra NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['zebra'])},
# ],
# {},
# ),
# 'ggr2-!giraffe-azure': (
# [
# {'label': 'Giraffe ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# {'label': 'Giraffe ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!giraffe'])},
# ],
# {},
# ),
# 'ggr2-!zebra-azure': (
# [
# {'label': 'Zebra ! NMS 0%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 10%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.10, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 20%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.20, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 30%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.30, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 40%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.40, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 50%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.50, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 60%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.60, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 70%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.70, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 80%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.80, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 90%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 0.90, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# {'label': 'Zebra ! NMS 100%', 'grid' : False, 'algo': 'azure', 'config_filepath' : None, 'weight_filepath' : None, 'nms': True, 'nms_thresh': 1.00, 'test_gid_list': test_gid_list, 'species_mapping': species_mapping, 'species_set': set(['!zebra'])},
# ],
# {},
# ),
# 'lynx': (
# [
# {'label': 'Lynx NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['lynx'])},
# {'label': 'Lynx NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'lynx', 'weight_filepath' : 'lynx', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['lynx'])},
# ],
# {},
# ),
# 'jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['jaguar'])},
# {'label': 'Jaguar NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['jaguar'])},
# ],
# {},
# ),
# '!jaguar': (
# [
# {'label': 'Jaguar NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!jaguar'])},
# {'label': 'Jaguar NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'jaguar_v2', 'weight_filepath' : 'jaguar_v2', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!jaguar'])},
# ],
# {},
# ),
# 'manta': (
# [
# {'label': 'Manta NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['manta_ray_giant'])},
# {'label': 'Manta NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['manta_ray_giant'])},
# ],
# {},
# ),
# '!manta': (
# [
# {'label': 'Manta NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!manta_ray_giant'])},
# {'label': 'Manta NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'manta', 'weight_filepath' : 'manta', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!manta_ray_giant'])},
# ],
# {},
# ),
# 'giraffe': (
# [
# {'label': 'Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_masai', 'giraffe_reticulated'])},
# {'label': 'Masai Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Masai Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_masai'])},
# {'label': 'Reticulated Giraffe NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['giraffe_reticulated'])},
# {'label': 'Reticulated Giraffe NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'giraffe_v1', 'weight_filepath' : 'giraffe_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['giraffe_reticulated'])},
# ],
# {},
# ),
# 'spotted_skunk_v0': (
# [
# {'label': 'Spotted Skunk NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['skunk_spotted'])},
# ],
# {},
# ),
# '!spotted_skunk_v0': (
# [
# {'label': 'Spotted Skunk NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!skunk_spotted'])},
# {'label': 'Spotted Skunk NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_skunk_v0', 'weight_filepath' : 'spotted_skunk_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!skunk_spotted'])},
# ],
# {},
# ),
# 'nassau_grouper_v0': (
# [
# {'label': 'Nassau Grouper NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['grouper_nassau'])},
# {'label': 'Nassau Grouper NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['grouper_nassau'])},
# ],
# {},
# ),
# '!nassau_grouper_v0': (
# [
# {'label': 'Nassau Grouper! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!grouper_nassau'])},
# {'label': 'Nassau Grouper! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'nassau_grouper_v0', 'weight_filepath' : 'nassau_grouper_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!grouper_nassau'])},
# ],
# {},
# ),
# 'spotted_dolphin_v0': (
# [
# {'label': 'Spotted DolphinNMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['dolphin_spotted'])},
# {'label': 'Spotted DolphinNMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['dolphin_spotted'])},
# ],
# {},
# ),
# '!spotted_dolphin_v0': (
# [
# {'label': 'Spotted Dolphin! NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['!dolphin_spotted'])},
# {'label': 'Spotted Dolphin! NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'spotted_dolphin_v0', 'weight_filepath' : 'spotted_dolphin_v0', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['!dolphin_spotted'])},
# ],
# {},
# ),
'seadragon_weedy_v1': (
[
{'label': 'Weedy Body NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_leafy'])},
{'label': 'Weedy Body NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_leafy'])},
],
{},
),
'seadragon_leafy_v1': (
[
{'label': 'Leafy Body NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_weedy'])},
{'label': 'Leafy Body NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_weedy'])},
],
{},
),
'seadragon_weedy_head_v1': (
[
{'label': 'Weedy Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_leafy+head'])},
{'label': 'Weedy Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_leafy+head'])},
],
{},
),
'seadragon_leafy_head_v1': (
[
{'label': 'Leafy Head NMS 0%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.00, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 10%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.10, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 20%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.20, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 30%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.30, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 40%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.40, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 50%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.50, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 60%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.60, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 70%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.70, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 80%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.80, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 90%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 0.90, 'species_set' : set(['seadragon_weedy+head'])},
{'label': 'Leafy Head NMS 100%', 'grid' : False, 'algo': 'lightnet', 'config_filepath' : 'seadragon_v1', 'weight_filepath' : 'seadragon_v1', 'nms': True, 'nms_thresh': 1.00, 'species_set' : set(['seadragon_weedy+head'])},
],
{},
),
}
for config_key in config_dict:
config_list, config = config_dict[config_key]
for key in kwargs:
config[key] = kwargs[key]
# Backwards compatibility hack
if test_gid_list is not None:
for config_ in config_list:
if 'test_gid_list' not in config_:
config_['test_gid_list'] = test_gid_list
ibs.localizer_precision_recall_algo_display(
config_list,
config_tag=config_key,
output_path=output_path,
**config
)
@register_ibs_method
def localizer_precision_recall_algo_display(ibs, config_list, config_tag='', min_overlap=0.5, figsize=(40, 9),
target_recall=0.8, BEST_INDEX=None, offset_color=0,
write_images=False, plot_point=True, output_path=None, **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
if output_path is None:
output_path = abspath(expanduser(join('~', 'Desktop')))
color_list_ = []
for _ in range(offset_color):
color_list_ += [(0.2, 0.2, 0.2)]
color_list = pt.distinct_colors(len(config_list) - len(color_list_), randomize=False)
color_list = color_list_ + color_list
fig_ = plt.figure(figsize=figsize, dpi=400)
######################################################################################
axes_ = plt.subplot(141)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall (Ground-Truth IOU >= %0.02f)' % (min_overlap, ))
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
localizer_precision_recall_algo_plot(ibs, color=color, min_overlap=min_overlap,
plot_point=plot_point,
target_recall=target_recall, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
tup2_list = [ ret[3] for ret in ret_list ]
best_index = None if BEST_INDEX is None else BEST_INDEX # Match formatting of below, this is a silly conditional
best_y = 0.0
best_index_ = None
valid_best_index = []
for index, tup2 in enumerate(tup2_list):
if tup2 is None:
continue
conf_list, x_list, y_list, length = tup2
y = y_list[0]
if best_y < y:
valid_best_index.append(index)
best_index_ = index
best_y = y
# If user defined best_index is invalid, don't use it
if best_index is None:
best_index = best_index_
else:
if best_index not in valid_best_index:
best_index = None
if best_index is not None:
best_conf_list, best_x_list, best_y_list, best_length = tup2_list[best_index]
color = 'xkcd:gold'
marker = 'D'
plt.plot(best_x_list, best_y_list, color=color, marker=marker)
plt.title('Precision-Recall Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
######################################################################################
axes_ = plt.subplot(142)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('IOU (Intersection / Union)')
axes_.set_ylabel('Recall')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
localizer_iou_recall_algo_plot(ibs, color=color_, plot_point=False, **config_)
for color_, config_ in zip(color_list, config_list)
]
# area_list = [ ret[0] for ret in ret_list ]
# tup2_list = [ ret[3] for ret in ret_list ]
# best_index = None if BEST_INDEX is None else BEST_INDEX # Match formatting of below, this is a silly conditional
# best_y = 0.0
# best_index_ = None
# valid_best_index = []
# for index, tup2 in enumerate(tup2_list):
# if tup2 is None:
# continue
# conf_list, x_list, y_list, length = tup2
# y = y_list[0]
# if best_y < y:
# valid_best_index.append(index)
# best_index_ = index
# best_y = y
# # If user defined best_index is invalid, don't use it
# if best_index is None:
# best_index = best_index_
# else:
# if best_index not in valid_best_index:
# best_index = None
# if best_index is not None:
# best_conf_list, best_x_list, best_y_list, best_length = tup2_list[best_index]
# color = 'xkcd:gold'
# marker = 'D'
# plt.plot(best_x_list, best_y_list, color=color, marker=marker)
plt.title('Recall-IOU Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
######################################################################################
# axes_ = plt.subplot(153)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('IOU (Intersection / Union)')
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# ret_list = [
# localizer_iou_precision_algo_plot(ibs, color=color_, plot_point=False, **config_)
# for color_, config_ in zip(color_list, config_list)
# ]
# plt.title('Precision-IOU Curves', y=1.19)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
######################################################################################
if best_index is not None:
axes_ = plt.subplot(144)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
target_conf = best_conf_list[0]
best_config = config_list[best_index]
best_label = config_list[best_index]['label']
best_area = area_list[best_index]
values = localizer_confusion_matrix_algo_plot(ibs, min_overlap=min_overlap,
fig_=fig_, axes_=axes_,
target_conf=target_conf,
**best_config)
best_conf, (correct_rate, _) = values
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
args = (target_recall, best_label, best_area, best_conf, )
plt.title('Confusion Matrix for Recall >= %0.02f\n(Algo: %s, mAP = %0.02f, OP = %0.02f)' % args, y=1.26)
######################################################################################
axes_ = plt.subplot(143)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
best_index = np.argmax(area_list) if BEST_INDEX is None else BEST_INDEX
best_config = config_list[best_index]
best_label = config_list[best_index]['label']
best_area = area_list[best_index]
values = localizer_confusion_matrix_algo_plot(ibs, min_overlap=min_overlap,
fig_=fig_, axes_=axes_,
**best_config)
best_conf, (correct_rate, _) = values
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
args = (best_label, best_area, best_conf, )
plt.title('Confusion Matrix\n(Algo: %s, mAP = %0.02f, OP = %0.02f)' % args, y=1.26)
######################################################################################
if len(config_tag) > 0:
config_tag = '%s-' % (config_tag, )
fig_filename = '%slocalizer-precision-recall-%0.2f.png' % (config_tag, min_overlap, )
fig_path = join(output_path, fig_filename)
plt.savefig(fig_path, bbox_inches='tight')
return fig_path
@register_ibs_method
def localizer_precision_recall_algo_display_animate(ibs, config_list, **kwargs):
for value in range(10):
min_overlap = value / 10.0
print('Processing: %r' % (min_overlap, ))
ibs.localizer_precision_recall_algo_display(config_list, min_overlap=min_overlap, **kwargs)
# def localizer_classification_tp_tn_fp_fn(gt_list, pred_list, conf, min_overlap,
# check_species=False,
# check_viewpoint=False, **kwargs):
# overlap = general_overlap(gt_list, pred_list)
# num_gt, num_pred = overlap.shape
# # Get confidences
# conf_list = [pred['confidence'] for pred in pred_list]
# pred_flag_list = [conf <= conf_ for conf_ in conf_list]
# if num_gt == 0:
# tp_list = [False] * len(pred_list)
# tn_list = [not pred_flag for pred_flag in pred_flag_list]
# fp_list = [ pred_flag for pred_flag in pred_flag_list]
# fn_list = [False] * len(pred_list)
# elif num_pred == 0:
# tp_list = []
# tn_list = []
# fp_list = []
# fn_list = []
# else:
# max_overlap = np.max(overlap, axis=0)
# gt_flag_list = min_overlap < max_overlap
# status_list = []
# for gt_flag, pred_flag in zip(gt_flag_list, pred_flag_list):
# if gt_flag and pred_flag:
# status_list.append('tp')
# elif gt_flag and not pred_flag:
# status_list.append('fn')
# elif not gt_flag and pred_flag:
# status_list.append('fp')
# elif not gt_flag and not pred_flag:
# status_list.append('tn')
# else:
# raise ValueError
# tp_list = [status == 'tp' for status in status_list]
# tn_list = [status == 'tn' for status in status_list]
# fp_list = [status == 'fp' for status in status_list]
# fn_list = [status == 'fn' for status in status_list]
# return tp_list, tn_list, fp_list, fn_list
# def localizer_classification_confusion_matrix_algo_plot(ibs, color, conf,
# label=None,
# min_overlap=0.25,
# write_images=False,
# **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# test_uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = general_parse_gt(ibs, test_gid_list=test_gid_list, **kwargs)
# print('\tGather Predictions')
# pred_dict = localizer_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# if write_images:
# output_folder = 'localizer-classification-confusion-matrix-%0.2f-%0.2f-images' % (min_overlap, conf, )
# output_path = abspath(expanduser(join('~', 'Desktop', output_folder)))
# ut.ensuredir(output_path)
# label_list = []
# prediction_list = []
# for index, (test_gid, test_uuid) in enumerate(zip(test_gid_list, test_uuid_list)):
# if test_uuid in pred_dict:
# gt_list = gt_dict[test_uuid]
# pred_list = pred_dict[test_uuid]
# values = localizer_classification_tp_tn_fp_fn(gt_list, pred_list, conf,
# min_overlap=min_overlap,
# **kwargs)
# tp_list, tn_list, fp_list, fn_list = values
# tp = tp_list.count(True)
# tn = tn_list.count(True)
# fp = fp_list.count(True)
# fn = fn_list.count(True)
# for _ in range(int(tp)):
# label_list.append('positive')
# prediction_list.append('positive')
# for _ in range(int(tn)):
# label_list.append('negative')
# prediction_list.append('negative')
# for _ in range(int(fp)):
# label_list.append('negative')
# prediction_list.append('positive')
# for _ in range(int(fn)):
# label_list.append('positive')
# prediction_list.append('negative')
# if write_images:
# test_image = ibs.get_images(test_gid)
# test_image = _resize(test_image, t_width=600, verbose=False)
# height_, width_, channels_ = test_image.shape
# for gt in gt_list:
# xtl = int(gt['xtl'] * width_)
# ytl = int(gt['ytl'] * height_)
# xbr = int(gt['xbr'] * width_)
# ybr = int(gt['ybr'] * height_)
# cv2.rectangle(test_image, (xtl, ytl), (xbr, ybr), (0, 0, 255))
# zipped = zip(pred_list, tp_list, tn_list, fp_list, fn_list)
# for pred, tp_, tn_, fp_, fn_ in zipped:
# if tp_:
# color = (0, 255, 0)
# elif fp_:
# continue
# # color = (255, 0, 0)
# elif fn_:
# color = (255, 0, 0)
# elif tn_:
# continue
# else:
# continue
# xtl = int(pred['xtl'] * width_)
# ytl = int(pred['ytl'] * height_)
# xbr = int(pred['xbr'] * width_)
# ybr = int(pred['ybr'] * height_)
# cv2.rectangle(test_image, (xtl, ytl), (xbr, ybr), color)
# status_str = 'success' if (fp + fn) == 0 else 'failure'
# status_val = tp - fp - fn
# args = (status_str, status_val, test_gid, tp, fp, fn, )
# output_filename = 'test_%s_%d_gid_%d_tp_%d_fp_%d_fn_%d.png' % args
# output_filepath = join(output_path, output_filename)
# cv2.imwrite(output_filepath, test_image)
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, size=20, **kwargs)
# @register_ibs_method
# def localizer_classifications_confusion_matrix_algo_display(ibs, conf,
# min_overlap=0.25,
# figsize=(24, 7),
# write_images=False,
# target_recall=0.9,
# plot_point=True,
# masking=False,
# **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# config = {
# 'label' : 'WIC',
# 'algo' : '_COMBINED',
# 'species_set' : set(['zebra']),
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_masking': masking,
# 'classifier_weight_filepath': '/home/jason/code/ibeis/models-bootstrap/classifier.svm.image.zebra.pkl',
# }
# axes_ = plt.subplot(111)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = localizer_classification_confusion_matrix_algo_plot(ibs, None, conf,
# min_overlap=min_overlap,
# write_images=write_images,
# fig_=fig_, axes_=axes_,
# **config)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# args = (min_overlap, conf, )
# plt.title('Confusion Matrix (IoU %0.02f, Conf %0.02f)' % args, y=1.13)
# # plt.show()
# args = (min_overlap, conf, )
# fig_filename = 'localizer-classification-confusion-matrix-%0.2f-%0.2f.png' % args
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
# @register_ibs_method
# def localizer_classifications_confusion_matrix_algo_display_animate(ibs, total=10, **kwargs):
# for index in range(0, total + 1):
# conf = index / total
# ibs.localizer_classifications_confusion_matrix_algo_display(conf, **kwargs)
def classifier_cameratrap_precision_recall_algo(ibs, positive_imageset_id, negative_imageset_id, **kwargs):
depc = ibs.depc_image
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set_ = list(test_gid_set_)
positive_gid_set = set(ibs.get_imageset_gids(positive_imageset_id))
negative_gid_set = set(ibs.get_imageset_gids(negative_imageset_id))
test_gid_set = []
label_list = []
for gid in test_gid_set_:
if gid in positive_gid_set:
label = 'positive'
elif gid in negative_gid_set:
label = 'negative'
else:
# label = 'unknown'
continue
test_gid_set.append(gid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list)
def classifier_cameratrap_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_cameratrap_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def classifier_cameratrap_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_cameratrap_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def classifier_cameratrap_confusion_matrix_algo_plot(ibs, label, color, conf, positive_imageset_id, negative_imageset_id, output_cases=False, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_image
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set_ = list(test_gid_set_)
positive_gid_set = set(ibs.get_imageset_gids(positive_imageset_id))
negative_gid_set = set(ibs.get_imageset_gids(negative_imageset_id))
test_gid_set = []
label_list = []
for gid in test_gid_set_:
if gid in positive_gid_set:
label = 'positive'
elif gid in negative_gid_set:
label = 'negative'
else:
# label = 'unknown'
continue
test_gid_set.append(gid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'cameratrap-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
positive_path = join(output_path, 'positive')
negative_path = join(output_path, 'negative')
ut.delete(output_path)
ut.ensuredir(output_path)
ut.ensuredir(positive_path)
ut.ensuredir(negative_path)
interpolation = cv2.INTER_LANCZOS4
warpkw = dict(interpolation=interpolation)
for gid, label, prediction in zip(test_gid_set, label_list, prediction_list):
if label == prediction:
continue
image = ibs.get_images(gid)
image = cv2.resize(image, (192, 192), **warpkw)
# Get path
image_path = positive_path if label == 'positive' else negative_path
image_filename = 'hardidx_%d_pred_%s_case_fail.jpg' % (gid, prediction, )
image_filepath = join(image_path, image_filename)
# Save path
cv2.imwrite(image_filepath, image)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, **kwargs)
@register_ibs_method
def classifier_cameratrap_precision_recall_algo_display(ibs, positive_imageset_id, negative_imageset_id, config_list=None, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400)
if config_list is None:
config_list = [
# {'label': 'Initial Model (5%) - IBEIS_CNN', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'ryan.ibeis_cnn.v1'},
# {'label': 'Initial Model (5%) - DenseNet', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1'},
# {'label': 'Initial Model (5%) - DenseNet 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:0'},
# {'label': 'Initial Model (5%) - DenseNet 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:1'},
# {'label': 'Initial Model (5%) - DenseNet 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v1:2'},
{'label': 'Initial Model (10%) - DenseNet', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2'},
# {'label': 'Initial Model (10%) - DenseNet 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:0'},
# {'label': 'Initial Model (10%) - DenseNet 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:1'},
# {'label': 'Initial Model (10%) - DenseNet 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'ryan_densenet_v2:2'},
# {'label': 'Initial Model (0%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.1'},
# {'label': 'Retrained Model (1%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.2'},
# {'label': 'Retrained Model (2%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.3'},
# {'label': 'Retrained Model (3%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.4'},
# {'label': 'Retrained Model (4%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.5'},
# {'label': 'Retrained Model (5%)', 'classifier_algo': 'cnn', 'classifier_weight_filepath': 'megan2.6'},
# {'label': 'Initial Model (0%)', 'classifier_weight_filepath': 'megan1.1'},
# {'label': 'Retrained Model (1%)', 'classifier_weight_filepath': 'megan1.2'},
# {'label': 'Retrained Model (2%)', 'classifier_weight_filepath': 'megan1.3'},
# {'label': 'Retrained Model (3%)', 'classifier_weight_filepath': 'megan1.4'},
# {'label': 'Retrained Model (3.5%)', 'classifier_weight_filepath': 'megan1.5'},
# {'label': 'Retrained Model (5%)', 'classifier_weight_filepath': 'megan1.6'},
]
color_list = pt.distinct_colors(len(config_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
classifier_cameratrap_precision_recall_algo_plot(ibs, color=color,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
index = np.argmax(area_list)
# index = 0
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
classifier_cameratrap_roc_algo_plot(ibs, color=color,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
index = np.argmax(area_list)
# index = 0
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = classifier_cameratrap_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
output_cases=True, **best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = classifier_cameratrap_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
positive_imageset_id=positive_imageset_id,
negative_imageset_id=negative_imageset_id,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'classifier-cameratrap-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
# def classifier_binary_precision_recall_algo(ibs, category_set, **kwargs):
# depc = ibs.depc_image
# test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
# test_gid_set = list(test_gid_set)
# aids_list = ibs.get_image_aids(test_gid_set)
# species_set_list = [
# set(ibs.get_annot_species_texts(aid_list))
# for aid_list in aids_list
# ]
# label_list = [
# 'negative' if len(species_set & category_set) == 0 else 'positive'
# for species_set in species_set_list
# ]
# prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
# confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
# confidence_list = [
# confidence if prediction == 'positive' else 1.0 - confidence
# for prediction, confidence in zip(prediction_list, confidence_list)
# ]
# return general_precision_recall_algo(ibs, label_list, confidence_list)
# def classifier_binary_precision_recall_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_binary_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
# def classifier_binary_roc_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing ROC for: %r' % (label, ))
# conf_list, pr_list, re_list, tpr_list, fpr_list = classifier_binary_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
# target=(0.0, 1.0), **kwargs)
# def classifier_binary_confusion_matrix_algo_plot(ibs, label, color, conf, category_set, **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# depc = ibs.depc_image
# test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
# test_gid_set = list(test_gid_set)
# aids_list = ibs.get_image_aids(test_gid_set)
# species_set_list = [
# set(ibs.get_annot_species_texts(aid_list))
# for aid_list in aids_list
# ]
# label_list = [
# 'negative' if len(species_set & category_set) == 0 else 'positive'
# for species_set in species_set_list
# ]
# prediction_list = depc.get_property('classifier', test_gid_set, 'class', config=kwargs)
# confidence_list = depc.get_property('classifier', test_gid_set, 'score', config=kwargs)
# confidence_list = [
# confidence if prediction == 'positive' else 1.0 - confidence
# for prediction, confidence in zip(prediction_list, confidence_list)
# ]
# prediction_list = [
# 'positive' if confidence >= conf else 'negative'
# for confidence in confidence_list
# ]
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, **kwargs)
# @register_ibs_method
# def classifier_binary_precision_recall_algo_display(ibs, figsize=(16, 16), **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# # label = 'V1'
# # species_list = ['zebra']
# # kwargs['classifier_weight_filepath'] = 'coco_zebra'
# label = 'V3'
# species_list = ['zebra_plains', 'zebra_grevys']
# kwargs['classifier_weight_filepath'] = 'v3_zebra'
# category_set = set(species_list)
# axes_ = plt.subplot(221)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('Recall')
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# area, best_conf1, _ = classifier_binary_precision_recall_algo_plot(ibs, label=label, color='r', category_set=category_set, **kwargs)
# plt.title('Precision-Recall Curve (AP = %0.02f)' % (area, ), y=1.10)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(222)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('False-Positive Rate')
# axes_.set_ylabel('True-Positive Rate')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# area, best_conf2, _ = classifier_binary_roc_algo_plot(ibs, label=label, color='r', category_set=category_set, **kwargs)
# plt.title('ROC Curve (AP = %0.02f)' % (area, ), y=1.10)
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(223)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = classifier_binary_confusion_matrix_algo_plot(ibs, label, 'r', conf=best_conf1, fig_=fig_, axes_=axes_, category_set=category_set, **kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
# axes_ = plt.subplot(224)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = classifier_binary_confusion_matrix_algo_plot(ibs, label, 'r', conf=best_conf2, fig_=fig_, axes_=axes_, category_set=category_set, **kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
# fig_filename = 'classifier-precision-recall-roc.png'
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
def classifier2_precision_recall_algo(ibs, category, species_mapping={},
output_path=None, test_gid_list=None,
test_label_list=None, **kwargs):
depc = ibs.depc_image
if test_gid_list is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list = list(test_gid_set)
if test_label_list is None:
aids_list = ibs.get_image_aids(test_gid_list)
species_list_list = list(map(ibs.get_annot_species_texts, aids_list))
species_set_list = []
for species_list in species_list_list:
species_set = set([])
for species in species_list:
species = species_mapping.get(species, species)
species_set.add(species)
species_set_list.append(species_set)
else:
species_set_list = [
set([label])
for label in test_label_list
]
label_list = [
'positive' if category in species_set_ else 'negative'
for species_set_ in species_set_list
]
confidence_dict_list = depc.get_property('classifier_two', test_gid_list, 'scores', config=kwargs)
confidence_list = [
confidence_dict[category]
for confidence_dict in confidence_dict_list
]
if output_path is not None:
ut.ensuredir(output_path)
config_ = {
'draw_annots' : False,
'thumbsize' : (192, 192),
}
thumbnail_list = depc.get_property('thumbnails', test_gid_list, 'img', config=config_)
zipped = zip(test_gid_list, thumbnail_list, species_set_list, confidence_dict_list)
for index, (test_gid, thumbnail, species_set, confidence_dict) in enumerate(zipped):
print(index)
x = ';'.join(species_set)
y = []
for key in confidence_dict:
y.append('%s-%0.04f' % (key, confidence_dict[key], ))
y = ';'.join(y)
output_filename = 'image-index-%s-gid-%s-gt-%s-pred-%s.png' % (index, test_gid, x, y)
output_filepath = join(output_path, output_filename)
cv2.imwrite(output_filepath, thumbnail)
kwargs.pop('category', None)
return general_precision_recall_algo(ibs, label_list, confidence_list)
def classifier2_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def classifier2_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = classifier2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
@register_ibs_method
def classifier2_precision_recall_algo_display(ibs, species_list=None,
species_mapping={},
nice_mapping={},
test_gid_list=None,
test_label_list=None,
figsize=(20, 9), **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
depc = ibs.depc_image
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
# kwargs['classifier_two_weight_filepath'] = 'v3'
# kwargs['classifier_two_weight_filepath'] = 'candidacy'
# kwargs['classifier_two_weight_filepath'] = 'ggr2'
is_labeled = test_label_list is not None
kwargs['classifier_two_algo'] = 'densenet'
kwargs['classifier_two_weight_filepath'] = 'flukebook_v1'
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set) if test_gid_list is None else test_gid_list
test_label_list_ = test_label_list if is_labeled else [None] * len(test_gid_list_)
zipped = list(zip(test_gid_list_, test_label_list_))
test_gid_list_ = []
test_label_list_ = []
for test_gid_, test_label_ in zipped:
if test_gid_ in test_gid_set:
test_gid_list_.append(test_gid_)
test_label_list_.append(test_label_)
test_gid_list = test_gid_list_
test_label_list = test_label_list_ if is_labeled else None
# depc.delete_property('classifier_two', test_gid_list, config=kwargs)
if species_list is None:
test_gid = test_gid_list[0]
confidence_dict = depc.get_property('classifier_two', test_gid, 'scores', config=kwargs)
species_list = confidence_dict.keys()
category_set = sorted(species_list)
config_list = []
for category in category_set:
category_nice = nice_mapping.get(category, category)
config_dict = {
'label': category_nice,
'category': category,
}
config_dict.update(kwargs)
config_list.append(config_dict)
color_list_ = []
color_list = pt.distinct_colors(len(config_list) - len(color_list_), randomize=False)
color_list = color_list_ + color_list
axes_ = plt.subplot(121)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
for color, config in zip(color_list, config_list):
classifier2_precision_recall_algo_plot(ibs, color=color,
test_gid_list=test_gid_list,
test_label_list=test_label_list,
species_mapping=species_mapping,
**config)
plt.title('Precision-Recall Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(122)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
op_dict = {}
for color, config in zip(color_list, config_list):
values = classifier2_roc_algo_plot(ibs, color=color,
test_gid_list=test_gid_list,
test_label_list=test_label_list,
species_mapping=species_mapping,
**config)
ap, best_conf, tup1, tup2 = values
op_dict[config['category']] = best_conf
plt.title('ROC Curves', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
if is_labeled:
species_set_list = [
set([label])
for label in test_label_list
]
else:
aids_list = ibs.get_image_aids(test_gid_list)
species_list_list = list(map(ibs.get_annot_species_texts, aids_list))
species_set_list = []
for species_list in species_list_list:
species_set = set([])
for species in species_list:
species = species_mapping.get(species, species)
species_set.add(species)
species_set_list.append(species_set)
confidence_dict_list = depc.get_property('classifier_two', test_gid_list, 'scores', config=kwargs)
correct = 0
for test_gid, confidence_dict, species_set in zip(test_gid_list, confidence_dict_list, species_set_list):
species_set_ = set([])
for key in confidence_dict:
if op_dict[key] <= confidence_dict[key]:
species_set_.add(key)
if len(species_set ^ species_set_) == 0:
correct += 1
else:
print(test_gid, confidence_dict, species_set)
print('Accuracy: %0.04f' % (100.0 * correct / len(test_gid_list)))
print('\t using op_dict = %r' % (op_dict, ))
fig_filename = 'classifier2-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def labeler_tp_tn_fp_fn(ibs, category_list, species_mapping={}, viewpoint_mapping={},
samples=SAMPLES, test_gid_set=None, **kwargs):
def errors(zipped, conf, category):
tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0
for index, (label, confidence) in enumerate(zipped):
if label == category:
if conf <= confidence:
tp += 1
else:
fn += 1
else:
if conf <= confidence:
fp += 1
else:
tn += 1
return tp, tn, fp, fn
depc = ibs.depc_annot
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
# Get annot species and viewpoints
species_list = ibs.get_annot_species_texts(aid_list)
viewpoint_list = ibs.get_annot_viewpoints(aid_list)
# Filter aids with species of interest and undefined viewpoints
species_list = [
species_mapping.get(species, species)
for species in species_list
]
viewpoint_list = [
viewpoint_mapping.get(species, {}).get(viewpoint, viewpoint)
for species, viewpoint in zip(species_list, viewpoint_list)
]
flag_list = [
species in category_list and viewpoint is not None
for species, viewpoint in zip(species_list, viewpoint_list)
]
if False in flag_list:
aid_list = ut.compress(aid_list, flag_list)
species_list = ut.compress(species_list, flag_list)
viewpoint_list = ut.compress(viewpoint_list, flag_list)
# Make ground-truth
label_list = [
'%s:%s' % (species, viewpoint_, )
for species, viewpoint_ in zip(species_list, viewpoint_list)
]
# Get predictions
# depc.delete_property('labeler', aid_list, config=kwargs)
probability_dict_list = depc.get_property('labeler', aid_list, 'probs', config=kwargs)
value1_list = set(label_list)
value2_list = set(probability_dict_list[0].keys())
assert len(value1_list - value2_list) == 0
assert len(value2_list - value1_list) == 0
conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
label_dict = {}
for key in value1_list:
print('\t%r' % (key, ))
conf_dict = {}
confidence_list = [
probability_dict[key]
for probability_dict in probability_dict_list
]
zipped = list(zip(label_list, confidence_list))
for conf in conf_list:
conf_dict[conf] = errors(zipped, conf, key)
label_dict[key] = conf_dict
return label_dict
def labeler_precision_recall_algo(ibs, category_list, label_dict, **kwargs):
if category_list is None:
category_list_ = label_dict.keys()
else:
category_list_ = []
for category in category_list:
for key in label_dict:
if category in key or category is None:
category_list_.append(key)
global_conf_dict = {}
for category in category_list_:
conf_dict = label_dict[category]
for conf in conf_dict:
new_list = conf_dict[conf]
if conf not in global_conf_dict:
global_conf_dict[conf] = new_list
else:
cur_list = global_conf_dict[conf]
zipped_ = zip(cur_list, new_list)
global_conf_dict[conf] = [cur + new for cur, new in zipped_]
conf_list_ = [-1.0, -1.0]
pr_list = [1.0, 0.0]
re_list = [0.0, 1.0]
tpr_list = [0.0, 1.0]
fpr_list = [0.0, 1.0]
# conf_list_ = []
# pr_list = []
# re_list = []
# tpr_list = []
# fpr_list = []
for conf in sorted(global_conf_dict.keys(), reverse=True):
error_list = global_conf_dict[conf]
tp, tn, fp, fn = error_list
try:
pr = tp / (tp + fp)
re = tp / (tp + fn)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
conf_list_.append(conf)
pr_list.append(pr)
re_list.append(re)
tpr_list.append(tpr)
fpr_list.append(fpr)
except ZeroDivisionError:
print('Zero division error (%r) - tp: %r tn: %r fp: %r fn: %r' % (conf, tp, tn, fp, fn, ))
return conf_list_, pr_list, re_list, tpr_list, fpr_list
def labeler_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
category_list = kwargs['category_list']
print('Processing Precision-Recall for: %r (category_list = %r)' % (label, category_list, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = labeler_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def labeler_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
category_list = kwargs['category_list']
print('Processing ROC for: %r (category_list = %r)' % (label, category_list, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = labeler_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def labeler_confusion_matrix_algo_plot(ibs, category_list, species_mapping={},
viewpoint_mapping={}, category_mapping=None,
test_gid_set=None, **kwargs):
print('Processing Confusion Matrix')
depc = ibs.depc_annot
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
viewpoint_list = ibs.get_annot_viewpoints(aid_list)
label_list = [
'%s:%s' % (
species_mapping.get(species, species),
viewpoint_mapping.get(species, {}).get(viewpoint, viewpoint),
)
for species, viewpoint in zip(species_list, viewpoint_list)
]
temp_list = [
(aid, label)
for aid, label in zip(aid_list, label_list)
if label in category_list
]
aid_list = [_[0] for _ in temp_list]
label_list = [_[1] for _ in temp_list]
conf_list = depc.get_property('labeler', aid_list, 'score', config=kwargs)
species_list = depc.get_property('labeler', aid_list, 'species', config=kwargs)
viewpoint_list = depc.get_property('labeler', aid_list, 'viewpoint', config=kwargs)
prediction_list = [
'%s:%s' % (species, viewpoint, )
for species, viewpoint in zip(species_list, viewpoint_list)
]
category_list = list(map(simple_code, category_list))
label_list = list(map(simple_code, label_list))
prediction_list = list(map(simple_code, prediction_list))
if category_mapping is None:
category_mapping = { key: index for index, key in enumerate(category_list) }
category_mapping = {
simple_code(key): category_mapping[key]
for key in category_mapping
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, conf_list=conf_list,
size=8, **kwargs)
@register_ibs_method
def labeler_precision_recall_algo_display(ibs, category_list=None, species_mapping={}, viewpoint_mapping={},
category_mapping=None, fuzzy_dict=None,
figsize=(30, 9), test_gid_set=None,
use_axis_aligned_chips=False,
labeler_weight_filepath=None,
config_list=None, **kwargs):
import matplotlib.pyplot as plt
import plottool as pt
if category_list is None:
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
species_list = [
species_mapping.get(species, species)
for species in species_list
]
category_list = sorted(list(set(species_list)))
print('Compiling raw numbers...')
kwargs['labeler_algo'] = 'densenet'
if labeler_weight_filepath is None:
# kwargs['labeler_weight_filepath'] = 'zebra_v1'
# kwargs['labeler_weight_filepath'] = 'seaturtle'
# kwargs['labeler_weight_filepath'] = 'giraffe_v1'
# kwargs['labeler_weight_filepath'] = 'lynx_v3'
# kwargs['labeler_weight_filepath'] = 'seaturtle_v3'
# kwargs['labeler_weight_filepath'] = 'jaguar_v3'
# kwargs['labeler_weight_filepath'] = 'hendrik_dorsal_v2'
# kwargs['labeler_weight_filepath'] = 'spotted_skunk_v0'
# kwargs['labeler_weight_filepath'] = 'nassau_grouper_v0'
# kwargs['labeler_weight_filepath'] = 'spotted_dolphin_v0'
# kwargs['labeler_weight_filepath'] = 'seadragon_v1'
kwargs['labeler_weight_filepath'] = 'seadragon_v2'
else:
kwargs['labeler_weight_filepath'] = labeler_weight_filepath
kwargs['labeler_axis_aligned'] = use_axis_aligned_chips
label_dict = labeler_tp_tn_fp_fn(ibs, category_list, species_mapping=species_mapping, viewpoint_mapping=viewpoint_mapping,
test_gid_set=test_gid_set, **kwargs)
if config_list is None:
config_list = [
# {'label': 'Giraffe', 'category_list': None},
# {'label': 'Masai Giraffe', 'category_list': ['giraffe_masai']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Lynx', 'category_list': ['lynx_pardinus']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Sea Turtle Head', 'category_list': ['turtle_sea+head']},
# {'label': 'Manta', 'category_list': ['manta_ray_giant']},
# {'label': 'Jaguar', 'category_list': ['jaguar']},
# {'label': 'Dorsal Fin', 'category_list': ['dolphin_bottlenose_fin']},
# {'label': 'Reticulated Giraffe', 'category_list': ['giraffe_reticulated']},
# {'label': 'Sea Turtle', 'category_list': ['turtle_sea']},
# {'label': 'Whale Fluke', 'category_list': ['whale_fluke']},
# {'label': 'Grevy\'s Zebra', 'category_list': ['zebra_grevys']},
# {'label': 'Plains Zebra', 'category_list': ['zebra_plains']},
# {'label': 'Spotted Skunk', 'category_list': ['skunk_spotted']},
# {'label': 'Nassau Grouper', 'category_list': ['grouper_nassau']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
# {'label': 'Spotted Dolphin', 'category_list': ['dolphin_spotted']},
{'label': 'Weedy SD ', 'category_list': ['seadragon_weedy']},
{'label': 'Weedy Head', 'category_list': ['seadragon_weedy+head']},
{'label': 'Leafy SD ', 'category_list': ['seadragon_leafy']},
{'label': 'Leafy Head', 'category_list': ['seadragon_leafy+head']},
]
color_list = [(0.0, 0.0, 0.0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
axes_ = plt.subplot(131)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
area_list = []
for color, config in zip(color_list, config_list):
ret = labeler_precision_recall_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
area = ret[0]
area_list.append(area)
plt.title('Precision-Recall Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(132)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
for color, config in zip(color_list, config_list):
labeler_roc_algo_plot(ibs, label_dict=label_dict,
color=color, **config)
plt.title('ROC Curve', y=1.19)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
key_list = sorted(label_dict.keys())
fuzzy = fuzzy_dict is not None
if not fuzzy:
fuzzy_dict = {}
for index1, label1 in enumerate(key_list):
if label1 == 'ignore':
fuzzy_list = []
else:
species, viewpoint = label1.strip().split(':')
fuzzy_list = []
for index2, label2 in enumerate(key_list):
if species in label2:
fuzzy_list.append(index2)
fuzzy_dict[index1] = set(fuzzy_list)
axes_ = plt.subplot(133)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, fuzzy_rate = labeler_confusion_matrix_algo_plot(
ibs,
key_list,
species_mapping=species_mapping,
viewpoint_mapping=viewpoint_mapping,
category_mapping=category_mapping,
fig_=fig_,
axes_=axes_,
fuzzy_dict=fuzzy_dict,
test_gid_set=test_gid_set,
**kwargs
)
if fuzzy:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Fuzzy = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
else:
axes_.set_xlabel('Predicted (Correct = %0.02f%%, Species = %0.02f%%)' % (correct_rate * 100.0, fuzzy_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
# area_list_ = area_list[1:]
area_list_ = area_list
mAP = sum(area_list_) / len(area_list_)
args = (mAP * 100.0, )
plt.title('Confusion Matrix\nmAP = %0.02f' % args, y=1.19)
fig_filename = 'labeler-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def canonical_precision_recall_algo(ibs, species, **kwargs):
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list)
def canonical_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def canonical_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = canonical_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def canonical_confusion_matrix_algo_plot(ibs, label, color, conf, species, output_cases=False, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_list_ = ut.flatten(ibs.get_image_aids(test_gid_list_))
test_aid_list_ = ibs.filter_annotation_set(test_aid_list_, species=species)
test_flag_list_ = ibs.get_annot_canonical(test_aid_list_)
test_aid_set = []
label_list = []
for aid, flag in zip(test_aid_list_, test_flag_list_):
if flag:
label = 'positive'
else:
label = 'negative'
test_aid_set.append(aid)
label_list.append(label)
prediction_list = depc.get_property('classifier', test_aid_set, 'class', config=kwargs)
confidence_list = depc.get_property('classifier', test_aid_set, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'canonical-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
positive_path = join(output_path, 'positive')
negative_path = join(output_path, 'negative')
ut.delete(output_path)
ut.ensuredir(output_path)
ut.ensuredir(positive_path)
ut.ensuredir(negative_path)
config = {
'dim_size': (192, 192),
'resize_dim': 'wh',
}
chip_list = ibs.depc_annot.get_property('chips', test_aid_set, 'img', config=config)
zipped = zip(test_aid_set, chip_list, label_list, prediction_list)
for aid, chip, label, prediction in zipped:
if label == prediction:
continue
# Get path
image_path = positive_path if label == 'positive' else negative_path
image_filename = 'hardidx_%d_pred_%s_case_fail.jpg' % (aid, prediction, )
image_filepath = join(image_path, image_filename)
# Save path
cv2.imwrite(image_filepath, chip)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, **kwargs)
@register_ibs_method
def canonical_precision_recall_algo_display(ibs, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400)
config_list = [
{'label': 'CA V1 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v1', 'species': 'zebra_grevys'}, # SMALLER DATASET
{'label': 'CA V2 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v2', 'species': 'zebra_grevys'}, # BROKEN L/R AUGMENTATION
{'label': 'CA V3 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v3', 'species': 'zebra_grevys'}, # LARGER DATASET, TOO HARSH AUGMENTATION
{'label': 'CA V4 Ensemble', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4', 'species': 'zebra_grevys'}, # BETTER AUGMENTATION
# {'label': 'CA V4 Model 0', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:0', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 1', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:1', 'species': 'zebra_grevys'},
# {'label': 'CA V4 Model 2', 'classifier_algo': 'densenet', 'classifier_weight_filepath': 'canonical_zebra_grevys_v4:2', 'species': 'zebra_grevys'},
]
color_list = []
# color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_precision_recall_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
canonical_roc_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = -1
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
output_cases=True, **best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = canonical_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'canonical-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def _canonical_get_boxes(ibs, gid_list, species):
from ibeis.web.appfuncs import CANONICAL_PART_TYPE
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
aid_list = ibs.filter_annotation_set(aid_list, species=species)
flag_list = ibs.get_annot_canonical(aid_list)
part_rowids_list = ibs.get_annot_part_rowids(aid_list)
part_types_list = list(map(ibs.get_part_types, part_rowids_list))
aid_set = []
bbox_set = []
zipped = zip(aid_list, flag_list, part_rowids_list, part_types_list)
for aid, flag, part_rowid_list, part_type_list in zipped:
part_rowid_ = None
if flag:
for part_rowid, part_type in zip(part_rowid_list, part_type_list):
if part_type == CANONICAL_PART_TYPE:
assert part_rowid_ is None, 'Cannot have multiple CA for one image'
part_rowid_ = part_rowid
if part_rowid_ is not None:
axtl, aytl, aw, ah = ibs.get_annot_bboxes(aid)
axbr, aybr = axtl + aw, aytl + ah
pxtl, pytl, pw, ph = ibs.get_part_bboxes(part_rowid_)
pxbr, pybr = pxtl + pw, pytl + ph
x0 = pxtl - axtl
y0 = pytl - aytl
x1 = axbr - pxbr
y1 = aybr - pybr
x0 = max(x0 / aw, 0.0)
y0 = max(y0 / ah, 0.0)
x1 = max(x1 / aw, 0.0)
y1 = max(y1 / ah, 0.0)
assert x0 + x1 < 0.99
assert y0 + y1 < 0.99
bbox = (x0, y0, x1, y1)
aid_set.append(aid)
bbox_set.append(bbox)
return aid_set, bbox_set
def canonical_localization_deviation_plot(ibs, attribute, color, index,
label=None, species=None, marker='o',
**kwargs):
import random
import matplotlib.pyplot as plt
assert None not in [label, species]
print('Processing Deviation for: %r' % (label, ))
depc = ibs.depc_annot
if attribute == 'x0':
take_index = 0
elif attribute == 'y0':
take_index = 1
elif attribute == 'x1':
take_index = 2
elif attribute == 'y1':
take_index = 3
else:
raise ValueError('attribute not valid')
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_set, test_bbox_set = _canonical_get_boxes(ibs, test_gid_list_, species)
value_list = ut.take_column(test_bbox_set, take_index)
prediction_list = depc.get_property('canonical', test_aid_set, attribute, config=kwargs)
x_list = []
y_list = []
overshoot = 0.0
for value, prediction in zip(value_list, prediction_list):
x = random.uniform(index, index + 1)
y = (value - prediction)
if y < 0:
overshoot += 1
x_list.append(x)
y_list.append(y)
mean = np.mean(y_list)
std = np.std(y_list)
overshoot /= len(y_list)
label = '%s (Over: %0.02f, %0.02f+/-%0.02f)' % (label, overshoot, mean, std, )
plt.plot(x_list, y_list, color=color, linestyle='None', marker=marker, label=label, alpha=0.5)
plt.plot([index, index + 1], [0.0, 0.0], color=(0.2, 0.2, 0.2), linestyle='-', alpha=0.3)
if index % 4 == 3:
plt.plot([index + 1, index + 1], [-1.0, 1.0], color=(0.2, 0.2, 0.2), linestyle='--', alpha=0.1)
color = 'xkcd:gold'
marker = 'D'
plt.errorbar([index + 0.5], [mean], [std], linestyle='None', color=color, marker=marker, zorder=999, barsabove=True)
# plt.plot([index + 0.5], [mean], color=color, marker=marker)
def canonical_localization_iou_plot(ibs, color, index,
label=None, species=None, marker='o',
threshold=0.75, **kwargs):
import random
import matplotlib.pyplot as plt
def _convert(bbox):
x0, y0, x1, y1 = bbox
retval = {
'xtl' : x0,
'ytl' : y0,
'xbr' : 1.0 - x1,
'ybr' : 1.0 - y1,
}
retval['width'] = retval['xbr'] - retval['xtl']
retval['height'] = retval['ybr'] - retval['ytl']
return retval
assert None not in [label, species]
print('Processing IoU for: %r' % (label, ))
depc = ibs.depc_annot
test_gid_set_ = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_list_ = list(test_gid_set_)
test_aid_set, test_bbox_set = _canonical_get_boxes(ibs, test_gid_list_, species)
prediction_list = depc.get_property('canonical', test_aid_set, None, config=kwargs)
gt_list = [_convert(test_bbox) for test_bbox in test_bbox_set]
pred_list = [_convert(prediction) for prediction in prediction_list]
correct = 0.0
x_list = []
y_list = []
for gt, pred in zip(gt_list, pred_list):
overlap = general_overlap([gt], [pred])
x = random.uniform(index, index + 1)
y = overlap[0][0]
if y >= threshold:
correct += 1.0
x_list.append(x)
y_list.append(y)
accuracy = correct / len(y_list)
mean = np.mean(y_list)
std = np.std(y_list)
label = '%s (Acc: %0.02f, %0.02f+/-%0.02f)' % (label, accuracy, mean, std, )
plt.plot(x_list, y_list, color=color, linestyle='None', marker=marker, label=label, alpha=0.5)
for y_value in [0.5, 0.75, 0.9]:
plt.plot([index, index + 1], [y_value, y_value], color=(0.2, 0.2, 0.2), linestyle='-', alpha=0.3)
if index % 4 == 3:
plt.plot([index + 1, index + 1], [0.0, 1.0], color=(0.2, 0.2, 0.2), linestyle='--', alpha=0.1)
color = 'xkcd:gold'
marker = 'D'
plt.errorbar([index + 0.5], [mean], [std], linestyle='None', color=color, marker=marker, zorder=999, barsabove=True)
# plt.plot([index + 0.5], [mean], color=color, marker=marker)
return test_aid_set, test_bbox_set, prediction_list, y_list, accuracy
@register_ibs_method
def canonical_localization_iou_visualize(ibs, index, test_aid_set, test_bbox_set, prediction_list,
overlap_list, color_list, label=None, species=None,
**kwargs):
assert None not in [label, species]
assert len(color_list) == 4
print('Processing Renderings for: %r' % (label, ))
color_list_ = []
for color in color_list:
color_ = []
for value in color:
value_ = int(np.around(255.0 * value))
color_ = [value_] + color_
color_ = tuple(color_)
color_list_.append(color_)
color_list = color_list_
output_path = expanduser(join('~', 'Desktop', 'canonical-regression-%d' % (index, )))
ut.delete(output_path)
ut.ensuredir(output_path)
config = {
'dim_size': 600,
'resize_dim': 'maxwh',
}
chip_list = ibs.depc_annot.get_property('chips', test_aid_set, 'img', config=config)
zipped = list(zip(test_aid_set, chip_list, test_bbox_set, prediction_list, overlap_list))
for test_aid, chip, test_bbox, prediction, overlap in zipped:
h, w = chip.shape[:2]
chipa = chip.copy()
chipb = chip.copy()
x0a, y0a, x1a, y1a = test_bbox
x0b, y0b, x1b, y1b = prediction
x0a = int(np.around(x0a * w))
y0a = int(np.around(y0a * h))
x1a = int(np.around(x1a * w))
y1a = int(np.around(y1a * h))
x0b = int(np.around(x0b * w))
y0b = int(np.around(y0b * h))
x1b = int(np.around(x1b * w))
y1b = int(np.around(y1b * h))
x1a = w - x1a
x1b = w - x1b
y1a = h - y1a
y1b = h - y1b
chipa = cv2.line(chipa, (x0a, y0a), (x0a, y1a), color_list[0], 3)
chipa = cv2.line(chipa, (x0a, y0a), (x1a, y0a), color_list[1], 3)
chipa = cv2.line(chipa, (x1a, y0a), (x1a, y1a), color_list[2], 3)
chipa = cv2.line(chipa, (x0a, y1a), (x1a, y1a), color_list[3], 3)
chipb = cv2.line(chipb, (x0b, y0b), (x0b, y1b), color_list[0], 3)
chipb = cv2.line(chipb, (x0b, y0b), (x1b, y0b), color_list[1], 3)
chipb = cv2.line(chipb, (x1b, y0b), (x1b, y1b), color_list[2], 3)
chipb = cv2.line(chipb, (x0b, y1b), (x1b, y1b), color_list[3], 3)
canvas = np.hstack((chipa, chipb))
canvas_filepath = join(output_path, 'canonical-regression-iou-%0.02f-aid-%s.jpg' % (overlap, test_aid, ))
cv2.imwrite(canvas_filepath, canvas)
@register_ibs_method
def canonical_localization_precision_recall_algo_display(ibs, figsize=(20, 40)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize, dpi=400) # NOQA
config_list = [
# {'label': 'CA V1 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:0', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:1', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V1 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v1:2', 'species': 'zebra_grevys'}, # OVER = 1.0, small dataset
# {'label': 'CA V2 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:0', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:1', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V2 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v2:2', 'species': 'zebra_grevys'}, # OVER = 1.0, large dataset
# {'label': 'CA V3 Ensemble', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:0', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:1', 'species': 'zebra_grevys'}, # OVER = 2.0
# {'label': 'CA V3 Model 2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v3:2', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V5-1.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:0', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:1', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V5-1.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v5:2', 'species': 'zebra_grevys'}, # OVER = 1.0
{'label': 'CA V6-2.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:0', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:1', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V6-2.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v6:2', 'species': 'zebra_grevys'}, # OVER = 2.0
{'label': 'CA V4-4.0 Ens.', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M0', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:0', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M1', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:1', 'species': 'zebra_grevys'}, # OVER = 4.0
{'label': 'CA V4-4.0 M2', 'canonical_weight_filepath': 'canonical_zebra_grevys_v4:2', 'species': 'zebra_grevys'}, # OVER = 4.0
]
color_list = []
# color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
min_, max_ = -1.0, 1.0
axes_ = plt.subplot(321)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'x0', color=color, index=index, **config)
plt.title('X0 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(322)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'x1', color=color, index=index, **config)
plt.title('Y0 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(323)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'y0', color=color, index=index, **config)
plt.title('X1 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(324)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
for index, (color, config) in enumerate(zip(color_list, config_list)):
canonical_localization_deviation_plot(ibs, 'y1', color=color, index=index, **config)
plt.title('Y1 Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(325)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('GT - Pred Deviation (in percentages)')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([min_, max_])
axes_.fill_between([0.0, len(config_list)], -1, 0, facecolor='red', alpha=0.1)
assert len(config_list) % 4 == 0
rounds = len(config_list) // 4
colors = pt.distinct_colors(4, randomize=False)
attribute_list = []
color_list_ = []
for _ in range(rounds):
attribute_list += ['x0', 'y0', 'x1', 'y1']
color_list_ += colors
for index, (attribute, color_) in enumerate(zip(attribute_list, color_list_)):
index_ = (index // 4) * 4
config_ = config_list[index_].copy()
config_['label'] = '%s %s' % (config_['label'], attribute, )
canonical_localization_deviation_plot(ibs, attribute, color=color_, index=index, **config_)
plt.title('Ensemble Deviation Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(326)
axes_.grid(True, which='major')
axes_.grid(False, which='minor')
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.get_xaxis().set_ticks([])
axes_.set_ylabel('IoU')
axes_.set_xlim([0.0, len(config_list)])
axes_.set_ylim([0.0, 1.0])
for index, (color, config) in enumerate(zip(color_list, config_list)):
values_ = canonical_localization_iou_plot(ibs, color=color, index=index, **config)
if index % 4 == 0:
config_ = config_list[index]
test_aid_set, test_bbox_set, prediction_list, y_list, accuracy = values_
ibs.canonical_localization_iou_visualize(index, test_aid_set, test_bbox_set,
prediction_list, y_list, colors,
**config_)
plt.title('IoU Scatter Plot')
plt.legend(bbox_to_anchor=(0.0, 1.04, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
fig_filename = 'canonical-localization-deviance.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
@register_ibs_method
def background_accuracy_display(ibs, category_list, test_gid_set=None,
output_path=None):
if output_path is None:
output_path = abspath(expanduser(join('~', 'Desktop', 'background')))
ut.ensuredir(output_path)
if test_gid_set is None:
test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET'))
test_gid_set = list(test_gid_set)
aids_list = ibs.get_image_aids(test_gid_set)
aid_list = ut.flatten(aids_list)
species_list = ibs.get_annot_species_texts(aid_list)
aid_list = [
aid
for aid, species in zip(aid_list, species_list)
if species in category_list
]
species_list = ibs.get_annot_species_texts(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
config2_ = {
'fw_detector': 'cnn'
}
hough_cpath_list = ibs.get_annot_probchip_fpath(aid_list, config2_=config2_)
image_list = [vt.imread(hough_cpath) for hough_cpath in hough_cpath_list]
chip_list = ibs.get_annot_chips(aid_list, config2_=config2_)
zipped = zip(aid_list, gid_list, species_list, image_list, chip_list)
for index, (aid, gid, species, image, chip) in enumerate(zipped):
print(index)
mask = vt.resize_mask(image, chip)
blended = vt.blend_images_multiply(chip, mask)
blended *= 255.0
blended = np.around(blended)
blended[blended < 0] = 0
blended[blended > 255] = 255
blended = blended.astype(np.uint8)
canvas = np.hstack((chip, mask, blended))
output_filepath = join(output_path, 'background.%s.%d.%d.png' % (species, gid, aid, ))
cv2.imwrite(output_filepath, canvas)
def aoi2_precision_recall_algo(ibs, category_list=None, test_gid_set_=None, **kwargs):
depc = ibs.depc_annot
if test_gid_set_ is None:
test_gid_set_ = general_get_imageset_gids(ibs, 'TEST_SET')
test_aid_list_ = list(set(ut.flatten(ibs.get_image_aids(test_gid_set_))))
species_list = ibs.get_annot_species_texts(test_aid_list_)
interest_list = ibs.get_annot_interest(test_aid_list_)
test_aid_list = []
label_list = []
for test_aid, species, interest in zip(test_aid_list_, species_list, interest_list):
if category_list is not None:
if species not in category_list:
continue
if interest is None:
continue
label = 'positive' if interest else 'negative'
test_aid_list.append(test_aid)
label_list.append(label)
prediction_list = depc.get_property('aoi_two', test_aid_list, 'class', config=kwargs)
confidence_list = depc.get_property('aoi_two', test_aid_list, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
return general_precision_recall_algo(ibs, label_list, confidence_list, **kwargs)
def aoi2_precision_recall_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing Precision-Recall for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = aoi2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
def aoi2_roc_algo_plot(ibs, **kwargs):
label = kwargs['label']
print('Processing ROC for: %r' % (label, ))
conf_list, pr_list, re_list, tpr_list, fpr_list = aoi2_precision_recall_algo(ibs, **kwargs)
return general_area_best_conf(conf_list, fpr_list, tpr_list, interpolate=False,
target=(0.0, 1.0), **kwargs)
def aoi2_confusion_matrix_algo_plot(ibs, label, color, conf, output_cases=False,
category_list=None, test_gid_set_=None, **kwargs):
print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
depc = ibs.depc_annot
if test_gid_set_ is None:
test_gid_set_ = general_get_imageset_gids(ibs, 'TEST_SET')
test_aid_list_ = list(set(ut.flatten(ibs.get_image_aids(test_gid_set_))))
species_list = ibs.get_annot_species_texts(test_aid_list_)
interest_list = ibs.get_annot_interest(test_aid_list_)
test_aid_list = []
label_list = []
for test_aid, species, interest in zip(test_aid_list_, species_list, interest_list):
if category_list is not None:
if species not in category_list:
continue
if interest is None:
continue
label = 'positive' if interest else 'negative'
test_aid_list.append(test_aid)
label_list.append(label)
prediction_list = depc.get_property('aoi_two', test_aid_list, 'class', config=kwargs)
confidence_list = depc.get_property('aoi_two', test_aid_list, 'score', config=kwargs)
confidence_list = [
confidence if prediction == 'positive' else 1.0 - confidence
for prediction, confidence in zip(prediction_list, confidence_list)
]
prediction_list = [
'positive' if confidence >= conf else 'negative'
for confidence in confidence_list
]
if output_cases:
output_path = 'aoi2-confusion-incorrect'
output_path = abspath(expanduser(join('~', 'Desktop', output_path)))
ut.delete(output_path)
ut.ensuredir(output_path)
manifest_dict = {}
test_gid_list = ibs.get_annot_gids(test_aid_list)
zipped = zip(test_gid_list, test_aid_list, label_list, prediction_list)
for test_gid, test_aid, label, prediction in zipped:
if test_gid not in manifest_dict:
manifest_dict[test_gid] = {}
assert test_aid not in manifest_dict[test_gid]
manifest_dict[test_gid][test_aid] = (label, prediction, )
for test_gid in manifest_dict:
image = ibs.get_images(test_gid)
w, h = ibs.get_image_sizes(test_gid)
image = _resize(image, t_width=600, verbose=False)
height_, width_, channels_ = image.shape
for test_aid in manifest_dict[test_gid]:
label, prediction = manifest_dict[test_gid][test_aid]
bbox = ibs.get_annot_bboxes(test_aid)
xtl, ytl, width, height = bbox
xbr = xtl + width
ybr = ytl + height
xtl = int(np.round((xtl / w) * width_))
ytl = int(np.round((ytl / h) * height_))
xbr = int(np.round((xbr / w) * width_))
ybr = int(np.round((ybr / h) * height_))
if label == 'positive':
color = (255, 99, 46)
else:
color = (127, 255, 127)
cv2.rectangle(image, (xtl, ytl), (xbr, ybr), color, 4)
if prediction == 'positive':
color = (255, 99, 46)
else:
color = (127, 255, 127)
cv2.rectangle(image, (xtl - 4, ytl - 4), (xbr + 4, ybr + 4), color, 4)
image_filename = 'image_%d.png' % (test_gid, )
image_filepath = join(output_path, image_filename)
cv2.imwrite(image_filepath, image)
category_list = ['positive', 'negative']
category_mapping = {
'positive': 0,
'negative': 1,
}
return general_confusion_matrix_algo(label_list, prediction_list, category_list,
category_mapping, size=20, **kwargs)
@register_ibs_method
def aoi2_precision_recall_algo_display(ibs, test_gid_list=None, output_cases=False, figsize=(20, 20)):
import matplotlib.pyplot as plt
import plottool as pt
fig_ = plt.figure(figsize=figsize)
test_gid_set = None if test_gid_list is None else sorted(set(test_gid_list))
config_list = [
# {'label': 'All Species', 'aoi_two_weight_filepath': 'ggr2', 'category_list': None},
# {'label': 'Masai Giraffe', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['giraffe_masai']},
# {'label': 'Reticulated Giraffe', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['giraffe_reticulated']},
# {'label': 'Sea Turtle', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['turtle_sea']},
# {'label': 'Whale Fluke', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['whale_fluke']},
# {'label': 'Grevy\'s Zebra', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['zebra_grevys']},
# {'label': 'Plains Zebra', 'aoi_two_weight_filepath': 'ggr2', 'category_list': ['zebra_plains']},
# {'label': 'Hammerhead', 'aoi_two_weight_filepath': 'hammerhead', 'category_list': ['shark_hammerhead']},
{'label': 'Jaguar', 'aoi_two_weight_filepath': 'jaguar', 'category_list': ['jaguar']},
]
color_list = [(0, 0, 0)]
color_list += pt.distinct_colors(len(config_list) - len(color_list), randomize=False)
axes_ = plt.subplot(221)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('Recall')
axes_.set_ylabel('Precision')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
aoi2_precision_recall_algo_plot(ibs, color=color, test_gid_set_=test_gid_set, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = 0
best_label1 = config_list[index]['label']
best_config1 = config_list[index]
best_color1 = color_list[index]
best_area1 = area_list[index]
best_conf1 = conf_list[index]
plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
axes_ = plt.subplot(222)
axes_.set_autoscalex_on(False)
axes_.set_autoscaley_on(False)
axes_.set_xlabel('False-Positive Rate')
axes_.set_ylabel('True-Positive Rate')
axes_.set_xlim([0.0, 1.01])
axes_.set_ylim([0.0, 1.01])
ret_list = [
aoi2_roc_algo_plot(ibs, color=color, **config)
for color, config in zip(color_list, config_list)
]
area_list = [ ret[0] for ret in ret_list ]
conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
index = 0
best_label2 = config_list[index]['label']
best_config2 = config_list[index]
best_color2 = color_list[index]
best_area2 = area_list[index]
best_conf2 = conf_list[index]
plt.title('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2, ), y=1.10)
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
borderaxespad=0.0)
plt.plot([0.0, 1.0], [0.0, 1.0], color=(0.5, 0.5, 0.5), linestyle='--')
axes_ = plt.subplot(223)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = aoi2_confusion_matrix_algo_plot(ibs, color=best_color1,
conf=best_conf1, fig_=fig_, axes_=axes_,
output_cases=output_cases,
test_gid_set_=test_gid_set,
**best_config1)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1, ), y=1.12)
axes_ = plt.subplot(224)
axes_.set_aspect(1)
gca_ = plt.gca()
gca_.grid(False)
correct_rate, _ = aoi2_confusion_matrix_algo_plot(ibs, color=best_color2,
conf=best_conf2, fig_=fig_, axes_=axes_,
test_gid_set_=test_gid_set,
**best_config2)
axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
axes_.set_ylabel('Ground-Truth')
plt.title('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2, ), y=1.12)
fig_filename = 'aoi2-precision-recall-roc.png'
fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
plt.savefig(fig_path, bbox_inches='tight')
def detector_parse_gt(ibs, test_gid_list=None, **kwargs):
if test_gid_list is None:
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
uuid_list = ibs.get_image_uuids(test_gid_list)
gid_list = ibs.get_image_gids_from_uuid(uuid_list)
gt_dict = {}
for gid, uuid in zip(gid_list, uuid_list):
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
gt_list = []
for aid in aid_list:
bbox = ibs.get_annot_bboxes(aid)
temp = {
'gid' : gid,
'xtl' : bbox[0] / width,
'ytl' : bbox[1] / height,
'xbr' : (bbox[0] + bbox[2]) / width,
'ybr' : (bbox[1] + bbox[3]) / height,
'width' : bbox[2] / width,
'height' : bbox[3] / height,
'class' : ibs.get_annot_species_texts(aid),
'viewpoint' : ibs.get_annot_viewpoints(aid),
'confidence' : 1.0,
}
gt_list.append(temp)
gt_dict[uuid] = gt_list
return gt_dict
# def detector_parse_pred(ibs, test_gid_list=None, **kwargs):
# depc = ibs.depc_image
# if test_gid_list is None:
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# # depc.delete_property('detections', test_gid_list, config=kwargs)
# results_list = depc.get_property('detections', test_gid_list, None, config=kwargs)
# size_list = ibs.get_image_sizes(test_gid_list)
# zipped_list = zip(results_list)
# # Reformat results for json
# results_list = [
# [
# {
# 'gid' : test_gid,
# 'xtl' : bbox[0] / width,
# 'ytl' : bbox[1] / height,
# 'width' : bbox[2] / width,
# 'height' : bbox[3] / height,
# 'theta' : theta, # round(theta, 4),
# 'confidence' : conf, # round(conf, 4),
# 'class' : class_,
# 'viewpoint' : viewpoint,
# }
# for bbox, theta, class_, viewpoint, conf in zip(*zipped[0][1:])
# ]
# for zipped, (width, height), test_gid in zip(zipped_list, size_list, test_gid_list)
# ]
# pred_dict = {
# uuid_ : result_list
# for uuid_, result_list in zip(uuid_list, results_list)
# }
# # print(pred_dict)
# return pred_dict
# def detector_precision_recall_algo(ibs, samples=SAMPLES, force_serial=FORCE_SERIAL, **kwargs):
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = detector_parse_gt(ibs, test_gid_list=test_gid_list)
# print('\tGather Predictions')
# pred_dict = detector_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# print('\tGenerate Curves...')
# conf_list = [ _ / float(samples) for _ in range(0, int(samples) + 1) ]
# conf_list = sorted(conf_list, reverse=True)
# uuid_list_list = [ uuid_list for _ in conf_list ]
# gt_dict_list = [ gt_dict for _ in conf_list ]
# pred_dict_list = [ pred_dict for _ in conf_list ]
# kwargs_list = [ kwargs for _ in conf_list ]
# arg_iter = zip(conf_list, uuid_list_list, gt_dict_list, pred_dict_list, kwargs_list)
# pr_re_gen = ut.generate2(detector_precision_recall_algo_worker, arg_iter,
# nTasks=len(conf_list), ordered=True,
# chunksize=CHUNK_SIZE, force_serial=force_serial)
# conf_list_ = [-1.0, -1.0]
# pr_list = [1.0, 0.0]
# re_list = [0.0, 1.0]
# # conf_list_ = []
# # pr_list = []
# # re_list = []
# for conf, pr, re in pr_re_gen:
# conf_list_.append(conf)
# pr_list.append(pr)
# re_list.append(re)
# print('...complete')
# return conf_list_, pr_list, re_list
# def detector_precision_recall_algo_worker(conf, uuid_list, gt_dict, pred_dict,
# kwargs):
# tp, fp, fn = 0.0, 0.0, 0.0
# for index, uuid_ in enumerate(uuid_list):
# if uuid_ in pred_dict:
# pred_list = [
# pred
# for pred in pred_dict[uuid_]
# if pred['confidence'] >= conf
# ]
# tp_, fp_, fn_ = general_tp_fp_fn(gt_dict[uuid_], pred_list, **kwargs)
# tp += tp_
# fp += fp_
# fn += fn_
# pr = tp / (tp + fp)
# re = tp / (tp + fn)
# return (conf, pr, re)
# def detector_precision_recall_algo_plot(ibs, **kwargs):
# label = kwargs['label']
# print('Processing Precision-Recall for: %r' % (label, ))
# conf_list, pr_list, re_list = detector_precision_recall_algo(ibs, **kwargs)
# return general_area_best_conf(conf_list, re_list, pr_list, **kwargs)
# def detector_confusion_matrix_algo_plot(ibs, label, color, conf, **kwargs):
# print('Processing Confusion Matrix for: %r (Conf = %0.02f)' % (label, conf, ))
# test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
# uuid_list = ibs.get_image_uuids(test_gid_list)
# print('\tGather Ground-Truth')
# gt_dict = detector_parse_gt(ibs, test_gid_list=test_gid_list)
# print('\tGather Predictions')
# pred_dict = detector_parse_pred(ibs, test_gid_list=test_gid_list, **kwargs)
# label_list = []
# prediction_list = []
# for index, uuid_ in enumerate(uuid_list):
# if uuid_ in pred_dict:
# gt_list = gt_dict[uuid_]
# pred_list = [
# pred
# for pred in pred_dict[uuid_]
# if pred['confidence'] >= conf
# ]
# tp, fp, fn = general_tp_fp_fn(gt_list, pred_list, **kwargs)
# for _ in range(int(tp)):
# label_list.append('positive')
# prediction_list.append('positive')
# for _ in range(int(fp)):
# label_list.append('negative')
# prediction_list.append('positive')
# for _ in range(int(fn)):
# label_list.append('positive')
# prediction_list.append('negative')
# category_list = ['positive', 'negative']
# category_mapping = {
# 'positive': 0,
# 'negative': 1,
# }
# return general_confusion_matrix_algo(label_list, prediction_list, category_list,
# category_mapping, **kwargs)
# @register_ibs_method
# def detector_precision_recall_algo_display(ibs, min_overlap=0.5, figsize=(24, 7), **kwargs):
# import matplotlib.pyplot as plt
# fig_ = plt.figure(figsize=figsize)
# axes_ = plt.subplot(131)
# axes_.set_autoscalex_on(False)
# axes_.set_autoscaley_on(False)
# axes_.set_xlabel('Recall (Ground-Truth IOU >= %0.02f)' % (min_overlap, ))
# axes_.set_ylabel('Precision')
# axes_.set_xlim([0.0, 1.01])
# axes_.set_ylim([0.0, 1.01])
# kwargs_list = [
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# 'check_species' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.64,
# 'localizer_grid' : False,
# 'localizer_sensitivity' : 0.16,
# 'labeler_sensitivity' : 0.42,
# 'check_viewpoint' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# 'check_species' : True,
# },
# {
# 'min_overlap' : min_overlap,
# 'classifier_sensitivity' : 0.04,
# 'localizer_grid' : True,
# 'localizer_sensitivity' : 0.05,
# 'labeler_sensitivity' : 0.39,
# 'check_viewpoint' : True,
# },
# ]
# label_list = [
# 'Opt L',
# 'Opt L+S',
# 'Opt L+S+V',
# 'Rec L',
# 'Rec L+S',
# 'Rec L+S+V',
# ]
# color_list = [
# 'r',
# 'b',
# 'g',
# 'k',
# 'y',
# 'c',
# ]
# ret_list = [
# detector_precision_recall_algo_plot(ibs, label=label, color=color, **kwargs_)
# for label, color, kwargs_ in zip(label_list, color_list, kwargs_list)
# ]
# area_list = [ ret[0] for ret in ret_list ]
# conf_list = [ ret[1] for ret in ret_list ]
# index = np.argmax(area_list)
# best_label = label_list[index]
# best_kwargs = kwargs_list[index]
# best_area = area_list[index]
# best_conf = conf_list[index]
# plt.title('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label, best_area, ), y=1.20)
# # Display graph
# plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, .102), loc=3, ncol=2, mode="expand",
# borderaxespad=0.0)
# axes_ = plt.subplot(132)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = detector_confusion_matrix_algo_plot(ibs, 'V1', 'r', conf=best_conf, fig_=fig_, axes_=axes_, **best_kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (Algo: %s, OP = %0.02f)' % (best_label, best_conf, ), y=1.26)
# best_index = None
# best_conf = None
# best_pr = 0.0
# best_re = 0.0
# tup_list = [ ret[2] for ret in ret_list ]
# for index, tup in enumerate(tup_list):
# for conf, re, pr in zip(*tup):
# if pr > best_pr:
# best_index = index
# best_conf = conf
# best_pr = pr
# best_re = re
# if best_index is not None:
# axes_ = plt.subplot(131)
# plt.plot([best_re], [best_pr], 'yo')
# best_label = label_list[best_index]
# best_kwargs = kwargs_list[best_index]
# axes_ = plt.subplot(133)
# axes_.set_aspect(1)
# gca_ = plt.gca()
# gca_.grid(False)
# correct_rate, _ = detector_confusion_matrix_algo_plot(ibs, 'V1', 'r', conf=best_conf, fig_=fig_, axes_=axes_, **best_kwargs)
# axes_.set_xlabel('Predicted (Correct = %0.02f%%)' % (correct_rate * 100.0, ))
# axes_.set_ylabel('Ground-Truth')
# plt.title('P-R Confusion Matrix (Algo: %s, OP = %0.02f)' % (best_label, best_conf, ), y=1.26)
# # plt.show()
# fig_filename = 'detector-precision-recall-%0.2f.png' % (min_overlap, )
# fig_path = abspath(expanduser(join('~', 'Desktop', fig_filename)))
# plt.savefig(fig_path, bbox_inches='tight')
# @register_ibs_method
# def detector_metric_graphs(ibs, species_list=[]):
# ibs.classifier_precision_recall_algo_display(species_list)
# ibs.localizer_precision_recall_algo_display()
# ibs.labeler_precision_recall_algo_display()
# ibs.detector_precision_recall_algo_display()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.other.detectfuncs
python -m ibeis.other.detectfuncs --allexamples
python -m ibeis.other.detectfuncs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
ut.doctest_funcs()
| [
"cv2.rectangle",
"numpy.sqrt",
"vtool.verts_from_bbox",
"numpy.hstack",
"utool.embed",
"utool.delete",
"utool.doctest_funcs",
"numpy.array",
"utool.take_column",
"multiprocessing.freeze_support",
"matplotlib.pyplot.errorbar",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.plot",
"cv2.l... | [((644, 687), 'utool.inject2', 'ut.inject2', (['__name__', '"""[other.detectfuncs]"""'], {}), "(__name__, '[other.detectfuncs]')\n", (654, 687), True, 'import utool as ut\n'), ((859, 914), 'ibeis.control.controller_inject.make_ibs_register_decorator', 'controller_inject.make_ibs_register_decorator', (['__name__'], {}), '(__name__)\n', (904, 914), False, 'from ibeis.control import controller_inject\n'), ((1801, 1868), 'cv2.resize', 'cv2.resize', (['image', '(t_width, t_height)'], {'interpolation': 'interpolation'}), '(image, (t_width, t_height), interpolation=interpolation)\n', (1811, 1868), False, 'import cv2\n'), ((4395, 4427), 'six.moves.zip', 'zip', (['re_list', 'conf_list', 'pr_list'], {}), '(re_list, conf_list, pr_list)\n', (4398, 4427), False, 'from six.moves import zip, range\n'), ((7685, 7756), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'color': 'color', 'linestyle': 'linestyle', 'label': 'label'}), '(x_list, y_list, color=color, linestyle=linestyle, label=label)\n', (7693, 7756), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8956), 'numpy.zeros', 'np.zeros', (['(num_categories, num_categories)'], {}), '((num_categories, num_categories))\n', (8922, 8956), True, 'import numpy as np\n'), ((8970, 9013), 'six.moves.zip', 'zip', (['label_correct_list', 'label_predict_list'], {}), '(label_correct_list, label_predict_list)\n', (8973, 9013), False, 'from six.moves import zip, range\n'), ((10019, 10051), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (10025, 10051), True, 'import numpy as np\n'), ((10079, 10128), 'numpy.array', 'np.array', (['(confusion_matrix.T / row_normalizer).T'], {}), '((confusion_matrix.T / row_normalizer).T)\n', (10087, 10128), True, 'import numpy as np\n'), ((10364, 10385), 'six.moves.range', 'range', (['num_categories'], {}), '(num_categories)\n', (10369, 10385), False, 'from six.moves import zip, range\n'), ((11118, 11220), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'margin_small', 'right': 'margin_large', 'bottom': 'margin_small', 'top': 'margin_large'}), '(left=margin_small, right=margin_large, bottom=\n margin_small, top=margin_large)\n', (11137, 11220), True, 'import matplotlib.pyplot as plt\n'), ((14516, 14539), 'six.moves.zip', 'zip', (['gid_list', 'aid_list'], {}), '(gid_list, aid_list)\n', (14519, 14539), False, 'from six.moves import zip, range\n'), ((18555, 18579), 'six.moves.zip', 'zip', (['gid_list', 'uuid_list'], {}), '(gid_list, uuid_list)\n', (18558, 18579), False, 'from six.moves import zip, range\n'), ((22432, 22603), 'six.moves.zip', 'zip', (['keeps_list', 'test_gids_list', 'sizes_list', 'bboxes_list', 'thetas_list', 'confss_list', 'classs_list', 'viewpoints_list', 'interests_list', 'features_list', 'features_lazy_list'], {}), '(keeps_list, test_gids_list, sizes_list, bboxes_list, thetas_list,\n confss_list, classs_list, viewpoints_list, interests_list,\n features_list, features_lazy_list)\n', (22435, 22603), False, 'from six.moves import zip, range\n'), ((25191, 25223), 'six.moves.zip', 'zip', (['conf_list', 'tp_list', 'fp_list'], {}), '(conf_list, tp_list, fp_list)\n', (25194, 25223), False, 'from six.moves import zip, range\n'), ((30560, 30579), 'tqdm.tqdm', 'tqdm.tqdm', (['iou_list'], {}), '(iou_list)\n', (30569, 30579), False, 'import tqdm\n'), ((123636, 123655), 'six.moves.range', 'range', (['offset_color'], {}), '(offset_color)\n', (123641, 123655), False, 'from six.moves import zip, range\n'), ((123843, 123879), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (123853, 123879), True, 'import matplotlib.pyplot as plt\n'), ((123985, 124001), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (123996, 124001), True, 'import matplotlib.pyplot as plt\n'), ((125584, 125628), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall Curves"""'], {'y': '(1.19)'}), "('Precision-Recall Curves', y=1.19)\n", (125593, 125628), True, 'import matplotlib.pyplot as plt\n'), ((125633, 125737), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (125643, 125737), True, 'import matplotlib.pyplot as plt\n'), ((125852, 125868), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (125863, 125868), True, 'import matplotlib.pyplot as plt\n'), ((127322, 127360), 'matplotlib.pyplot.title', 'plt.title', (['"""Recall-IOU Curves"""'], {'y': '(1.19)'}), "('Recall-IOU Curves', y=1.19)\n", (127331, 127360), True, 'import matplotlib.pyplot as plt\n'), ((127365, 127469), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (127375, 127469), True, 'import matplotlib.pyplot as plt\n'), ((129374, 129390), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (129385, 129390), True, 'import matplotlib.pyplot as plt\n'), ((129426, 129435), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (129433, 129435), True, 'import matplotlib.pyplot as plt\n'), ((130099, 130189), 'matplotlib.pyplot.title', 'plt.title', (['("""Confusion Matrix\n(Algo: %s, mAP = %0.02f, OP = %0.02f)""" % args)'], {'y': '(1.26)'}), '("""Confusion Matrix\n(Algo: %s, mAP = %0.02f, OP = %0.02f)""" %\n args, y=1.26)\n', (130108, 130189), True, 'import matplotlib.pyplot as plt\n'), ((130453, 130484), 'os.path.join', 'join', (['output_path', 'fig_filename'], {}), '(output_path, fig_filename)\n', (130457, 130484), False, 'from os.path import expanduser, join, abspath\n'), ((130489, 130531), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (130500, 130531), True, 'import matplotlib.pyplot as plt\n'), ((130674, 130683), 'six.moves.range', 'range', (['(10)'], {}), '(10)\n', (130679, 130683), False, 'from six.moves import zip, range\n'), ((144386, 144422), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (144396, 144422), True, 'import matplotlib.pyplot as plt\n'), ((147121, 147137), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (147132, 147137), True, 'import matplotlib.pyplot as plt\n'), ((147866, 147886), 'numpy.argmax', 'np.argmax', (['area_list'], {}), '(area_list)\n', (147875, 147886), True, 'import numpy as np\n'), ((148095, 148193), 'matplotlib.pyplot.title', 'plt.title', (["('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1))"], {'y': '(1.1)'}), "('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1,\n best_area1), y=1.1)\n", (148104, 148193), True, 'import matplotlib.pyplot as plt\n'), ((148197, 148301), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (148207, 148301), True, 'import matplotlib.pyplot as plt\n'), ((148324, 148340), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (148335, 148340), True, 'import matplotlib.pyplot as plt\n'), ((149039, 149059), 'numpy.argmax', 'np.argmax', (['area_list'], {}), '(area_list)\n', (149048, 149059), True, 'import numpy as np\n'), ((149268, 149353), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2))"], {'y': '(1.1)'}), "('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2),\n y=1.1)\n", (149277, 149353), True, 'import matplotlib.pyplot as plt\n'), ((149357, 149461), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (149367, 149461), True, 'import matplotlib.pyplot as plt\n'), ((149484, 149500), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (149495, 149500), True, 'import matplotlib.pyplot as plt\n'), ((149536, 149545), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (149543, 149545), True, 'import matplotlib.pyplot as plt\n'), ((150231, 150302), 'matplotlib.pyplot.title', 'plt.title', (["('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,))"], {'y': '(1.12)'}), "('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,), y=1.12)\n", (150240, 150302), True, 'import matplotlib.pyplot as plt\n'), ((150317, 150333), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (150328, 150333), True, 'import matplotlib.pyplot as plt\n'), ((150369, 150378), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (150376, 150378), True, 'import matplotlib.pyplot as plt\n'), ((151045, 151116), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,))"], {'y': '(1.12)'}), "('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,), y=1.12)\n", (151054, 151116), True, 'import matplotlib.pyplot as plt\n'), ((151262, 151304), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (151273, 151304), True, 'import matplotlib.pyplot as plt\n'), ((160806, 160842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (160816, 160842), True, 'import matplotlib.pyplot as plt\n'), ((162588, 162604), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (162599, 162604), True, 'import matplotlib.pyplot as plt\n'), ((162830, 162858), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (162833, 162858), False, 'from six.moves import zip, range\n'), ((163222, 163266), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall Curves"""'], {'y': '(1.19)'}), "('Precision-Recall Curves', y=1.19)\n", (163231, 163266), True, 'import matplotlib.pyplot as plt\n'), ((163271, 163375), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (163281, 163375), True, 'import matplotlib.pyplot as plt\n'), ((163398, 163414), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (163409, 163414), True, 'import matplotlib.pyplot as plt\n'), ((163679, 163707), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (163682, 163707), False, 'from six.moves import zip, range\n'), ((164143, 164174), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curves"""'], {'y': '(1.19)'}), "('ROC Curves', y=1.19)\n", (164152, 164174), True, 'import matplotlib.pyplot as plt\n'), ((164179, 164283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (164189, 164283), True, 'import matplotlib.pyplot as plt\n'), ((165036, 165094), 'six.moves.zip', 'zip', (['test_gid_list', 'confidence_dict_list', 'species_set_list'], {}), '(test_gid_list, confidence_dict_list, species_set_list)\n', (165039, 165094), False, 'from six.moves import zip, range\n'), ((165654, 165696), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (165665, 165696), True, 'import matplotlib.pyplot as plt\n'), ((166553, 166574), 'utool.flatten', 'ut.flatten', (['aids_list'], {}), '(aids_list)\n', (166563, 166574), True, 'import utool as ut\n'), ((171524, 171545), 'utool.flatten', 'ut.flatten', (['aids_list'], {}), '(aids_list)\n', (171534, 171545), True, 'import utool as ut\n'), ((177588, 177624), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (177598, 177624), True, 'import matplotlib.pyplot as plt\n'), ((177646, 177662), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (177657, 177662), True, 'import matplotlib.pyplot as plt\n'), ((177906, 177934), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (177909, 177934), False, 'from six.moves import zip, range\n'), ((178142, 178185), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall Curve"""'], {'y': '(1.19)'}), "('Precision-Recall Curve', y=1.19)\n", (178151, 178185), True, 'import matplotlib.pyplot as plt\n'), ((178190, 178294), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (178200, 178294), True, 'import matplotlib.pyplot as plt\n'), ((178317, 178333), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (178328, 178333), True, 'import matplotlib.pyplot as plt\n'), ((178580, 178608), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (178583, 178608), False, 'from six.moves import zip, range\n'), ((178725, 178755), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {'y': '(1.19)'}), "('ROC Curve', y=1.19)\n", (178734, 178755), True, 'import matplotlib.pyplot as plt\n'), ((178760, 178864), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (178770, 178864), True, 'import matplotlib.pyplot as plt\n'), ((179438, 179454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (179449, 179454), True, 'import matplotlib.pyplot as plt\n'), ((179490, 179499), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (179497, 179499), True, 'import matplotlib.pyplot as plt\n'), ((180325, 180386), 'matplotlib.pyplot.title', 'plt.title', (['("""Confusion Matrix\nmAP = %0.02f""" % args)'], {'y': '(1.19)'}), '("""Confusion Matrix\nmAP = %0.02f""" % args, y=1.19)\n', (180334, 180386), True, 'import matplotlib.pyplot as plt\n'), ((180514, 180556), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (180525, 180556), True, 'import matplotlib.pyplot as plt\n'), ((181030, 181066), 'six.moves.zip', 'zip', (['test_aid_list_', 'test_flag_list_'], {}), '(test_aid_list_, test_flag_list_)\n', (181033, 181066), False, 'from six.moves import zip, range\n'), ((182955, 182991), 'six.moves.zip', 'zip', (['test_aid_list_', 'test_flag_list_'], {}), '(test_aid_list_, test_flag_list_)\n', (182958, 182991), False, 'from six.moves import zip, range\n'), ((185184, 185220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (185194, 185220), True, 'import matplotlib.pyplot as plt\n'), ((186615, 186631), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (186626, 186631), True, 'import matplotlib.pyplot as plt\n'), ((187321, 187419), 'matplotlib.pyplot.title', 'plt.title', (["('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1))"], {'y': '(1.1)'}), "('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1,\n best_area1), y=1.1)\n", (187330, 187419), True, 'import matplotlib.pyplot as plt\n'), ((187423, 187527), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (187433, 187527), True, 'import matplotlib.pyplot as plt\n'), ((187550, 187566), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (187561, 187566), True, 'import matplotlib.pyplot as plt\n'), ((188265, 188350), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2))"], {'y': '(1.1)'}), "('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2),\n y=1.1)\n", (188274, 188350), True, 'import matplotlib.pyplot as plt\n'), ((188354, 188458), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (188364, 188458), True, 'import matplotlib.pyplot as plt\n'), ((188481, 188497), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (188492, 188497), True, 'import matplotlib.pyplot as plt\n'), ((188533, 188542), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (188540, 188542), True, 'import matplotlib.pyplot as plt\n'), ((188964, 189035), 'matplotlib.pyplot.title', 'plt.title', (["('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,))"], {'y': '(1.12)'}), "('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,), y=1.12)\n", (188973, 189035), True, 'import matplotlib.pyplot as plt\n'), ((189050, 189066), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (189061, 189066), True, 'import matplotlib.pyplot as plt\n'), ((189102, 189111), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (189109, 189111), True, 'import matplotlib.pyplot as plt\n'), ((189514, 189585), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,))"], {'y': '(1.12)'}), "('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,), y=1.12)\n", (189523, 189585), True, 'import matplotlib.pyplot as plt\n'), ((189719, 189761), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (189730, 189761), True, 'import matplotlib.pyplot as plt\n'), ((190222, 190281), 'six.moves.zip', 'zip', (['aid_list', 'flag_list', 'part_rowids_list', 'part_types_list'], {}), '(aid_list, flag_list, part_rowids_list, part_types_list)\n', (190225, 190281), False, 'from six.moves import zip, range\n'), ((192219, 192260), 'utool.take_column', 'ut.take_column', (['test_bbox_set', 'take_index'], {}), '(test_bbox_set, take_index)\n', (192233, 192260), True, 'import utool as ut\n'), ((192436, 192468), 'six.moves.zip', 'zip', (['value_list', 'prediction_list'], {}), '(value_list, prediction_list)\n', (192439, 192468), False, 'from six.moves import zip, range\n'), ((192654, 192669), 'numpy.mean', 'np.mean', (['y_list'], {}), '(y_list)\n', (192661, 192669), True, 'import numpy as np\n'), ((192680, 192694), 'numpy.std', 'np.std', (['y_list'], {}), '(y_list)\n', (192686, 192694), True, 'import numpy as np\n'), ((192812, 192910), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'color': 'color', 'linestyle': '"""None"""', 'marker': 'marker', 'label': 'label', 'alpha': '(0.5)'}), "(x_list, y_list, color=color, linestyle='None', marker=marker,\n label=label, alpha=0.5)\n", (192820, 192910), True, 'import matplotlib.pyplot as plt\n'), ((192913, 193007), 'matplotlib.pyplot.plot', 'plt.plot', (['[index, index + 1]', '[0.0, 0.0]'], {'color': '(0.2, 0.2, 0.2)', 'linestyle': '"""-"""', 'alpha': '(0.3)'}), "([index, index + 1], [0.0, 0.0], color=(0.2, 0.2, 0.2), linestyle=\n '-', alpha=0.3)\n", (192921, 193007), True, 'import matplotlib.pyplot as plt\n'), ((193176, 193296), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['[index + 0.5]', '[mean]', '[std]'], {'linestyle': '"""None"""', 'color': 'color', 'marker': 'marker', 'zorder': '(999)', 'barsabove': '(True)'}), "([index + 0.5], [mean], [std], linestyle='None', color=color,\n marker=marker, zorder=999, barsabove=True)\n", (193188, 193296), True, 'import matplotlib.pyplot as plt\n'), ((194545, 194568), 'six.moves.zip', 'zip', (['gt_list', 'pred_list'], {}), '(gt_list, pred_list)\n', (194548, 194568), False, 'from six.moves import zip, range\n'), ((194841, 194856), 'numpy.mean', 'np.mean', (['y_list'], {}), '(y_list)\n', (194848, 194856), True, 'import numpy as np\n'), ((194867, 194881), 'numpy.std', 'np.std', (['y_list'], {}), '(y_list)\n', (194873, 194881), True, 'import numpy as np\n'), ((194968, 195066), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'color': 'color', 'linestyle': '"""None"""', 'marker': 'marker', 'label': 'label', 'alpha': '(0.5)'}), "(x_list, y_list, color=color, linestyle='None', marker=marker,\n label=label, alpha=0.5)\n", (194976, 195066), True, 'import matplotlib.pyplot as plt\n'), ((195381, 195501), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['[index + 0.5]', '[mean]', '[std]'], {'linestyle': '"""None"""', 'color': 'color', 'marker': 'marker', 'zorder': '(999)', 'barsabove': '(True)'}), "([index + 0.5], [mean], [std], linestyle='None', color=color,\n marker=marker, zorder=999, barsabove=True)\n", (195393, 195501), True, 'import matplotlib.pyplot as plt\n'), ((196412, 196434), 'utool.delete', 'ut.delete', (['output_path'], {}), '(output_path)\n', (196421, 196434), True, 'import utool as ut\n'), ((196439, 196464), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (196451, 196464), True, 'import utool as ut\n'), ((198331, 198367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(400)'}), '(figsize=figsize, dpi=400)\n', (198341, 198367), True, 'import matplotlib.pyplot as plt\n'), ((202033, 202049), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (202044, 202049), True, 'import matplotlib.pyplot as plt\n'), ((202623, 202661), 'matplotlib.pyplot.title', 'plt.title', (['"""X0 Deviation Scatter Plot"""'], {}), "('X0 Deviation Scatter Plot')\n", (202632, 202661), True, 'import matplotlib.pyplot as plt\n'), ((202666, 202770), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (202676, 202770), True, 'import matplotlib.pyplot as plt\n'), ((202793, 202809), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (202804, 202809), True, 'import matplotlib.pyplot as plt\n'), ((203383, 203421), 'matplotlib.pyplot.title', 'plt.title', (['"""Y0 Deviation Scatter Plot"""'], {}), "('Y0 Deviation Scatter Plot')\n", (203392, 203421), True, 'import matplotlib.pyplot as plt\n'), ((203426, 203530), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (203436, 203530), True, 'import matplotlib.pyplot as plt\n'), ((203553, 203569), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (203564, 203569), True, 'import matplotlib.pyplot as plt\n'), ((204143, 204181), 'matplotlib.pyplot.title', 'plt.title', (['"""X1 Deviation Scatter Plot"""'], {}), "('X1 Deviation Scatter Plot')\n", (204152, 204181), True, 'import matplotlib.pyplot as plt\n'), ((204186, 204290), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (204196, 204290), True, 'import matplotlib.pyplot as plt\n'), ((204313, 204329), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (204324, 204329), True, 'import matplotlib.pyplot as plt\n'), ((204903, 204941), 'matplotlib.pyplot.title', 'plt.title', (['"""Y1 Deviation Scatter Plot"""'], {}), "('Y1 Deviation Scatter Plot')\n", (204912, 204941), True, 'import matplotlib.pyplot as plt\n'), ((204946, 205050), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (204956, 205050), True, 'import matplotlib.pyplot as plt\n'), ((205073, 205089), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (205084, 205089), True, 'import matplotlib.pyplot as plt\n'), ((205576, 205614), 'plottool.distinct_colors', 'pt.distinct_colors', (['(4)'], {'randomize': '(False)'}), '(4, randomize=False)\n', (205594, 205614), True, 'import plottool as pt\n'), ((205674, 205687), 'six.moves.range', 'range', (['rounds'], {}), '(rounds)\n', (205679, 205687), False, 'from six.moves import zip, range\n'), ((206107, 206151), 'matplotlib.pyplot.title', 'plt.title', (['"""Ensemble Deviation Scatter Plot"""'], {}), "('Ensemble Deviation Scatter Plot')\n", (206116, 206151), True, 'import matplotlib.pyplot as plt\n'), ((206156, 206260), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (206166, 206260), True, 'import matplotlib.pyplot as plt\n'), ((206283, 206299), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (206294, 206299), True, 'import matplotlib.pyplot as plt\n'), ((207146, 207175), 'matplotlib.pyplot.title', 'plt.title', (['"""IoU Scatter Plot"""'], {}), "('IoU Scatter Plot')\n", (207155, 207175), True, 'import matplotlib.pyplot as plt\n'), ((207180, 207284), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.04, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.04, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (207190, 207284), True, 'import matplotlib.pyplot as plt\n'), ((207427, 207469), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (207438, 207469), True, 'import matplotlib.pyplot as plt\n'), ((207963, 207984), 'utool.flatten', 'ut.flatten', (['aids_list'], {}), '(aids_list)\n', (207973, 207984), True, 'import utool as ut\n'), ((208561, 208621), 'six.moves.zip', 'zip', (['aid_list', 'gid_list', 'species_list', 'image_list', 'chip_list'], {}), '(aid_list, gid_list, species_list, image_list, chip_list)\n', (208564, 208621), False, 'from six.moves import zip, range\n'), ((209672, 209720), 'six.moves.zip', 'zip', (['test_aid_list_', 'species_list', 'interest_list'], {}), '(test_aid_list_, species_list, interest_list)\n', (209675, 209720), False, 'from six.moves import zip, range\n'), ((211779, 211827), 'six.moves.zip', 'zip', (['test_aid_list_', 'species_list', 'interest_list'], {}), '(test_aid_list_, species_list, interest_list)\n', (211782, 211827), False, 'from six.moves import zip, range\n'), ((215145, 215172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (215155, 215172), True, 'import matplotlib.pyplot as plt\n'), ((216428, 216444), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (216439, 216444), True, 'import matplotlib.pyplot as plt\n'), ((217156, 217254), 'matplotlib.pyplot.title', 'plt.title', (["('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1, best_area1))"], {'y': '(1.1)'}), "('Precision-Recall Curve (Best: %s, AP = %0.02f)' % (best_label1,\n best_area1), y=1.1)\n", (217165, 217254), True, 'import matplotlib.pyplot as plt\n'), ((217258, 217362), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (217268, 217362), True, 'import matplotlib.pyplot as plt\n'), ((217385, 217401), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (217396, 217401), True, 'import matplotlib.pyplot as plt\n'), ((218094, 218179), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2))"], {'y': '(1.1)'}), "('ROC Curve (Best: %s, AP = %0.02f)' % (best_label2, best_area2),\n y=1.1)\n", (218103, 218179), True, 'import matplotlib.pyplot as plt\n'), ((218183, 218287), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '(3)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=2, mode=\n 'expand', borderaxespad=0.0)\n", (218193, 218287), True, 'import matplotlib.pyplot as plt\n'), ((218301, 218372), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, 1.0]', '[0.0, 1.0]'], {'color': '(0.5, 0.5, 0.5)', 'linestyle': '"""--"""'}), "([0.0, 1.0], [0.0, 1.0], color=(0.5, 0.5, 0.5), linestyle='--')\n", (218309, 218372), True, 'import matplotlib.pyplot as plt\n'), ((218385, 218401), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (218396, 218401), True, 'import matplotlib.pyplot as plt\n'), ((218437, 218446), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (218444, 218446), True, 'import matplotlib.pyplot as plt\n'), ((218997, 219068), 'matplotlib.pyplot.title', 'plt.title', (["('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,))"], {'y': '(1.12)'}), "('P-R Confusion Matrix (OP = %0.02f)' % (best_conf1,), y=1.12)\n", (219006, 219068), True, 'import matplotlib.pyplot as plt\n'), ((219083, 219099), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (219094, 219099), True, 'import matplotlib.pyplot as plt\n'), ((219135, 219144), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (219142, 219144), True, 'import matplotlib.pyplot as plt\n'), ((219614, 219685), 'matplotlib.pyplot.title', 'plt.title', (["('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,))"], {'y': '(1.12)'}), "('ROC Confusion Matrix (OP = %0.02f)' % (best_conf2,), y=1.12)\n", (219623, 219685), True, 'import matplotlib.pyplot as plt\n'), ((219814, 219856), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (219825, 219856), True, 'import matplotlib.pyplot as plt\n'), ((220169, 220193), 'six.moves.zip', 'zip', (['gid_list', 'uuid_list'], {}), '(gid_list, uuid_list)\n', (220172, 220193), False, 'from six.moves import zip, range\n'), ((232180, 232212), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (232210, 232212), False, 'import multiprocessing\n'), ((232230, 232248), 'utool.doctest_funcs', 'ut.doctest_funcs', ([], {}), '()\n', (232246, 232248), True, 'import utool as ut\n'), ((752, 773), 'six.moves.range', 'range', (['(0)', '(SAMPLES + 1)'], {}), '(0, SAMPLES + 1)\n', (757, 773), False, 'from six.moves import zip, range\n'), ((2359, 2374), 'ibeis.constants.YAWALIAS.keys', 'YAWALIAS.keys', ([], {}), '()\n', (2372, 2374), False, 'from ibeis.constants import YAWALIAS, SPECIES_MAPPING\n'), ((3182, 3214), 'six.moves.zip', 'zip', (['label_list', 'confidence_list'], {}), '(label_list, confidence_list)\n', (3185, 3214), False, 'from six.moves import zip, range\n'), ((5143, 5173), 'six.moves.zip', 'zip', (['conf_list', 'x_list', 'y_list'], {}), '(conf_list, x_list, y_list)\n', (5146, 5173), False, 'from six.moves import zip, range\n'), ((5269, 5295), 'numpy.sqrt', 'np.sqrt', (['(x_ * x_ + y_ * y_)'], {}), '(x_ * x_ + y_ * y_)\n', (5276, 5295), True, 'import numpy as np\n'), ((6771, 6797), 'numpy.trapz', 'np.trapz', (['y_list'], {'x': 'x_list'}), '(y_list, x=x_list)\n', (6779, 6797), True, 'import numpy as np\n'), ((7785, 7847), 'matplotlib.pyplot.plot', 'plt.plot', (['best_x_list', 'best_y_list'], {'color': 'color', 'marker': 'marker'}), '(best_x_list, best_y_list, color=color, marker=marker)\n', (7793, 7847), True, 'import matplotlib.pyplot as plt\n'), ((10404, 10425), 'six.moves.range', 'range', (['num_categories'], {}), '(num_categories)\n', (10409, 10425), False, 'from six.moves import zip, range\n'), ((10956, 10981), 'numpy.arange', 'np.arange', (['num_categories'], {}), '(num_categories)\n', (10965, 10981), True, 'import numpy as np\n'), ((11026, 11051), 'numpy.arange', 'np.arange', (['num_categories'], {}), '(num_categories)\n', (11035, 11051), True, 'import numpy as np\n'), ((14719, 14762), 'vtool.rotation_around_bbox_mat3x3', 'vt.rotation_around_bbox_mat3x3', (['theta', 'bbox'], {}), '(theta, bbox)\n', (14749, 14762), True, 'import vtool as vt\n'), ((14829, 14865), 'vtool.verts_from_bbox', 'vt.verts_from_bbox', (['bbox'], {'close': '(True)'}), '(bbox, close=True)\n', (14847, 14865), True, 'import vtool as vt\n'), ((30824, 30856), 'six.moves.zip', 'zip', (['conf_list', 'tp_list', 'fp_list'], {}), '(conf_list, tp_list, fp_list)\n', (30827, 30856), False, 'from six.moves import zip, range\n'), ((31467, 31490), 'numpy.argmax', 'np.argmax', (['best_re_list'], {}), '(best_re_list)\n', (31476, 31490), True, 'import numpy as np\n'), ((33775, 33807), 'six.moves.zip', 'zip', (['conf_list', 'tp_list', 'fp_list'], {}), '(conf_list, tp_list, fp_list)\n', (33778, 33807), False, 'from six.moves import zip, range\n'), ((125516, 125578), 'matplotlib.pyplot.plot', 'plt.plot', (['best_x_list', 'best_y_list'], {'color': 'color', 'marker': 'marker'}), '(best_x_list, best_y_list, color=color, marker=marker)\n', (125524, 125578), True, 'import matplotlib.pyplot as plt\n'), ((128327, 128343), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (128338, 128343), True, 'import matplotlib.pyplot as plt\n'), ((128387, 128396), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (128394, 128396), True, 'import matplotlib.pyplot as plt\n'), ((129165, 129282), 'matplotlib.pyplot.title', 'plt.title', (['("""Confusion Matrix for Recall >= %0.02f\n(Algo: %s, mAP = %0.02f, OP = %0.02f)"""\n % args)'], {'y': '(1.26)'}), '(\n """Confusion Matrix for Recall >= %0.02f\n(Algo: %s, mAP = %0.02f, OP = %0.02f)"""\n % args, y=1.26)\n', (129174, 129282), True, 'import matplotlib.pyplot as plt\n'), ((129475, 129495), 'numpy.argmax', 'np.argmax', (['area_list'], {}), '(area_list)\n', (129484, 129495), True, 'import numpy as np\n'), ((142984, 143013), 'os.path.join', 'join', (['output_path', '"""positive"""'], {}), "(output_path, 'positive')\n", (142988, 143013), False, 'from os.path import expanduser, join, abspath\n'), ((143038, 143067), 'os.path.join', 'join', (['output_path', '"""negative"""'], {}), "(output_path, 'negative')\n", (143042, 143067), False, 'from os.path import expanduser, join, abspath\n'), ((143076, 143098), 'utool.delete', 'ut.delete', (['output_path'], {}), '(output_path)\n', (143085, 143098), True, 'import utool as ut\n'), ((143107, 143132), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (143119, 143132), True, 'import utool as ut\n'), ((143141, 143168), 'utool.ensuredir', 'ut.ensuredir', (['positive_path'], {}), '(positive_path)\n', (143153, 143168), True, 'import utool as ut\n'), ((143177, 143204), 'utool.ensuredir', 'ut.ensuredir', (['negative_path'], {}), '(negative_path)\n', (143189, 143204), True, 'import utool as ut\n'), ((143338, 143384), 'six.moves.zip', 'zip', (['test_gid_set', 'label_list', 'prediction_list'], {}), '(test_gid_set, label_list, prediction_list)\n', (143341, 143384), False, 'from six.moves import zip, range\n'), ((158621, 158646), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (158633, 158646), True, 'import utool as ut\n'), ((158864, 158938), 'six.moves.zip', 'zip', (['test_gid_list', 'thumbnail_list', 'species_set_list', 'confidence_dict_list'], {}), '(test_gid_list, thumbnail_list, species_set_list, confidence_dict_list)\n', (158867, 158938), False, 'from six.moves import zip, range\n'), ((161437, 161474), 'six.moves.zip', 'zip', (['test_gid_list_', 'test_label_list_'], {}), '(test_gid_list_, test_label_list_)\n', (161440, 161474), False, 'from six.moves import zip, range\n'), ((167270, 167302), 'utool.compress', 'ut.compress', (['aid_list', 'flag_list'], {}), '(aid_list, flag_list)\n', (167281, 167302), True, 'import utool as ut\n'), ((167326, 167362), 'utool.compress', 'ut.compress', (['species_list', 'flag_list'], {}), '(species_list, flag_list)\n', (167337, 167362), True, 'import utool as ut\n'), ((167388, 167426), 'utool.compress', 'ut.compress', (['viewpoint_list', 'flag_list'], {}), '(viewpoint_list, flag_list)\n', (167399, 167426), True, 'import utool as ut\n'), ((173988, 174009), 'utool.flatten', 'ut.flatten', (['aids_list'], {}), '(aids_list)\n', (173998, 174009), True, 'import utool as ut\n'), ((183819, 183848), 'os.path.join', 'join', (['output_path', '"""positive"""'], {}), "(output_path, 'positive')\n", (183823, 183848), False, 'from os.path import expanduser, join, abspath\n'), ((183873, 183902), 'os.path.join', 'join', (['output_path', '"""negative"""'], {}), "(output_path, 'negative')\n", (183877, 183902), False, 'from os.path import expanduser, join, abspath\n'), ((183911, 183933), 'utool.delete', 'ut.delete', (['output_path'], {}), '(output_path)\n', (183920, 183933), True, 'import utool as ut\n'), ((183942, 183967), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (183954, 183967), True, 'import utool as ut\n'), ((183976, 184003), 'utool.ensuredir', 'ut.ensuredir', (['positive_path'], {}), '(positive_path)\n', (183988, 184003), True, 'import utool as ut\n'), ((184012, 184039), 'utool.ensuredir', 'ut.ensuredir', (['negative_path'], {}), '(negative_path)\n', (184024, 184039), True, 'import utool as ut\n'), ((184249, 184306), 'six.moves.zip', 'zip', (['test_aid_set', 'chip_list', 'label_list', 'prediction_list'], {}), '(test_aid_set, chip_list, label_list, prediction_list)\n', (184252, 184306), False, 'from six.moves import zip, range\n'), ((192482, 192514), 'random.uniform', 'random.uniform', (['index', '(index + 1)'], {}), '(index, index + 1)\n', (192496, 192514), False, 'import random\n'), ((193034, 193133), 'matplotlib.pyplot.plot', 'plt.plot', (['[index + 1, index + 1]', '[-1.0, 1.0]'], {'color': '(0.2, 0.2, 0.2)', 'linestyle': '"""--"""', 'alpha': '(0.1)'}), "([index + 1, index + 1], [-1.0, 1.0], color=(0.2, 0.2, 0.2),\n linestyle='--', alpha=0.1)\n", (193042, 193133), True, 'import matplotlib.pyplot as plt\n'), ((194630, 194662), 'random.uniform', 'random.uniform', (['index', '(index + 1)'], {}), '(index, index + 1)\n', (194644, 194662), False, 'import random\n'), ((195110, 195211), 'matplotlib.pyplot.plot', 'plt.plot', (['[index, index + 1]', '[y_value, y_value]'], {'color': '(0.2, 0.2, 0.2)', 'linestyle': '"""-"""', 'alpha': '(0.3)'}), "([index, index + 1], [y_value, y_value], color=(0.2, 0.2, 0.2),\n linestyle='-', alpha=0.3)\n", (195118, 195211), True, 'import matplotlib.pyplot as plt\n'), ((195240, 195338), 'matplotlib.pyplot.plot', 'plt.plot', (['[index + 1, index + 1]', '[0.0, 1.0]'], {'color': '(0.2, 0.2, 0.2)', 'linestyle': '"""--"""', 'alpha': '(0.1)'}), "([index + 1, index + 1], [0.0, 1.0], color=(0.2, 0.2, 0.2),\n linestyle='--', alpha=0.1)\n", (195248, 195338), True, 'import matplotlib.pyplot as plt\n'), ((196347, 196405), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', "('canonical-regression-%d' % (index,))"], {}), "('~', 'Desktop', 'canonical-regression-%d' % (index,))\n", (196351, 196405), False, 'from os.path import expanduser, join, abspath\n'), ((196650, 196724), 'six.moves.zip', 'zip', (['test_aid_set', 'chip_list', 'test_bbox_set', 'prediction_list', 'overlap_list'], {}), '(test_aid_set, chip_list, test_bbox_set, prediction_list, overlap_list)\n', (196653, 196724), False, 'from six.moves import zip, range\n'), ((197372, 197429), 'cv2.line', 'cv2.line', (['chipa', '(x0a, y0a)', '(x0a, y1a)', 'color_list[0]', '(3)'], {}), '(chipa, (x0a, y0a), (x0a, y1a), color_list[0], 3)\n', (197380, 197429), False, 'import cv2\n'), ((197446, 197503), 'cv2.line', 'cv2.line', (['chipa', '(x0a, y0a)', '(x1a, y0a)', 'color_list[1]', '(3)'], {}), '(chipa, (x0a, y0a), (x1a, y0a), color_list[1], 3)\n', (197454, 197503), False, 'import cv2\n'), ((197520, 197577), 'cv2.line', 'cv2.line', (['chipa', '(x1a, y0a)', '(x1a, y1a)', 'color_list[2]', '(3)'], {}), '(chipa, (x1a, y0a), (x1a, y1a), color_list[2], 3)\n', (197528, 197577), False, 'import cv2\n'), ((197594, 197651), 'cv2.line', 'cv2.line', (['chipa', '(x0a, y1a)', '(x1a, y1a)', 'color_list[3]', '(3)'], {}), '(chipa, (x0a, y1a), (x1a, y1a), color_list[3], 3)\n', (197602, 197651), False, 'import cv2\n'), ((197669, 197726), 'cv2.line', 'cv2.line', (['chipb', '(x0b, y0b)', '(x0b, y1b)', 'color_list[0]', '(3)'], {}), '(chipb, (x0b, y0b), (x0b, y1b), color_list[0], 3)\n', (197677, 197726), False, 'import cv2\n'), ((197743, 197800), 'cv2.line', 'cv2.line', (['chipb', '(x0b, y0b)', '(x1b, y0b)', 'color_list[1]', '(3)'], {}), '(chipb, (x0b, y0b), (x1b, y0b), color_list[1], 3)\n', (197751, 197800), False, 'import cv2\n'), ((197817, 197874), 'cv2.line', 'cv2.line', (['chipb', '(x1b, y0b)', '(x1b, y1b)', 'color_list[2]', '(3)'], {}), '(chipb, (x1b, y0b), (x1b, y1b), color_list[2], 3)\n', (197825, 197874), False, 'import cv2\n'), ((197891, 197948), 'cv2.line', 'cv2.line', (['chipb', '(x0b, y1b)', '(x1b, y1b)', 'color_list[3]', '(3)'], {}), '(chipb, (x0b, y1b), (x1b, y1b), color_list[3], 3)\n', (197899, 197948), False, 'import cv2\n'), ((197967, 197992), 'numpy.hstack', 'np.hstack', (['(chipa, chipb)'], {}), '((chipa, chipb))\n', (197976, 197992), True, 'import numpy as np\n'), ((198020, 198109), 'os.path.join', 'join', (['output_path', "('canonical-regression-iou-%0.02f-aid-%s.jpg' % (overlap, test_aid))"], {}), "(output_path, 'canonical-regression-iou-%0.02f-aid-%s.jpg' % (overlap,\n test_aid))\n", (198024, 198109), False, 'from os.path import expanduser, join, abspath\n'), ((198116, 198152), 'cv2.imwrite', 'cv2.imwrite', (['canvas_filepath', 'canvas'], {}), '(canvas_filepath, canvas)\n', (198127, 198152), False, 'import cv2\n'), ((202494, 202522), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (202497, 202522), False, 'from six.moves import zip, range\n'), ((203254, 203282), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (203257, 203282), False, 'from six.moves import zip, range\n'), ((204014, 204042), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (204017, 204042), False, 'from six.moves import zip, range\n'), ((204774, 204802), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (204777, 204802), False, 'from six.moves import zip, range\n'), ((205819, 205851), 'six.moves.zip', 'zip', (['attribute_list', 'color_list_'], {}), '(attribute_list, color_list_)\n', (205822, 205851), False, 'from six.moves import zip, range\n'), ((206627, 206655), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (206630, 206655), False, 'from six.moves import zip, range\n'), ((207730, 207755), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (207742, 207755), True, 'import utool as ut\n'), ((208423, 208445), 'vtool.imread', 'vt.imread', (['hough_cpath'], {}), '(hough_cpath)\n', (208432, 208445), True, 'import vtool as vt\n'), ((208728, 208755), 'vtool.resize_mask', 'vt.resize_mask', (['image', 'chip'], {}), '(image, chip)\n', (208742, 208755), True, 'import vtool as vt\n'), ((208774, 208810), 'vtool.blend_images_multiply', 'vt.blend_images_multiply', (['chip', 'mask'], {}), '(chip, mask)\n', (208798, 208810), True, 'import vtool as vt\n'), ((208854, 208872), 'numpy.around', 'np.around', (['blended'], {}), '(blended)\n', (208863, 208872), True, 'import numpy as np\n'), ((209004, 209036), 'numpy.hstack', 'np.hstack', (['(chip, mask, blended)'], {}), '((chip, mask, blended))\n', (209013, 209036), True, 'import numpy as np\n'), ((209063, 209129), 'os.path.join', 'join', (['output_path', "('background.%s.%d.%d.png' % (species, gid, aid))"], {}), "(output_path, 'background.%s.%d.%d.png' % (species, gid, aid))\n", (209067, 209129), False, 'from os.path import expanduser, join, abspath\n'), ((209140, 209176), 'cv2.imwrite', 'cv2.imwrite', (['output_filepath', 'canvas'], {}), '(output_filepath, canvas)\n', (209151, 209176), False, 'import cv2\n'), ((212755, 212777), 'utool.delete', 'ut.delete', (['output_path'], {}), '(output_path)\n', (212764, 212777), True, 'import utool as ut\n'), ((212786, 212811), 'utool.ensuredir', 'ut.ensuredir', (['output_path'], {}), '(output_path)\n', (212798, 212811), True, 'import utool as ut\n'), ((212915, 212977), 'six.moves.zip', 'zip', (['test_gid_list', 'test_aid_list', 'label_list', 'prediction_list'], {}), '(test_gid_list, test_aid_list, label_list, prediction_list)\n', (212918, 212977), False, 'from six.moves import zip, range\n'), ((1497, 1515), 'numpy.around', 'np.around', (['t_width'], {}), '(t_width)\n', (1506, 1515), True, 'import numpy as np\n'), ((1522, 1541), 'numpy.around', 'np.around', (['t_height'], {}), '(t_height)\n', (1531, 1541), True, 'import numpy as np\n'), ((6157, 6187), 'six.moves.zip', 'zip', (['x_list', 'y_list', 'conf_list'], {}), '(x_list, y_list, conf_list)\n', (6160, 6187), False, 'from six.moves import zip, range\n'), ((7034, 7064), 'six.moves.zip', 'zip', (['x_list', 'y_list', 'conf_list'], {}), '(x_list, y_list, conf_list)\n', (7037, 7064), False, 'from six.moves import zip, range\n'), ((12679, 12694), 'six.moves.range', 'range', (['num_pred'], {}), '(num_pred)\n', (12684, 12694), False, 'from six.moves import zip, range\n'), ((12723, 12749), 'numpy.argmax', 'np.argmax', (['overlap'], {'axis': '(0)'}), '(overlap, axis=0)\n', (12732, 12749), True, 'import numpy as np\n'), ((12781, 12804), 'numpy.max', 'np.max', (['overlap'], {'axis': '(0)'}), '(overlap, axis=0)\n', (12787, 12804), True, 'import numpy as np\n'), ((13002, 13072), 'six.moves.zip', 'zip', (['confidence_list', 'max_overlap_list', 'pred_index_list', 'gt_index_list'], {}), '(confidence_list, max_overlap_list, pred_index_list, gt_index_list)\n', (13005, 13072), False, 'from six.moves import zip, range\n'), ((20094, 20125), 'six.moves.zip', 'zip', (['test_gid_list', 'length_list'], {}), '(test_gid_list, length_list)\n', (20097, 20125), False, 'from six.moves import zip, range\n'), ((20183, 20210), 'six.moves.zip', 'zip', (['size_list', 'length_list'], {}), '(size_list, length_list)\n', (20186, 20210), False, 'from six.moves import zip, range\n'), ((23786, 23814), 'six.moves.zip', 'zip', (['uuid_list', 'results_list'], {}), '(uuid_list, results_list)\n', (23789, 23814), False, 'from six.moves import zip, range\n'), ((34384, 34394), 'utool.embed', 'ut.embed', ([], {}), '()\n', (34392, 34394), True, 'import utool as ut\n'), ((124535, 124563), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (124538, 124563), False, 'from six.moves import zip, range\n'), ((126221, 126249), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (126224, 126249), False, 'from six.moves import zip, range\n'), ((140598, 140635), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (140601, 140635), False, 'from six.moves import zip, range\n'), ((142633, 142670), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (142636, 142670), False, 'from six.moves import zip, range\n'), ((143507, 143546), 'cv2.resize', 'cv2.resize', (['image', '(192, 192)'], {}), '(image, (192, 192), **warpkw)\n', (143517, 143546), False, 'import cv2\n'), ((143766, 143798), 'os.path.join', 'join', (['image_path', 'image_filename'], {}), '(image_path, image_filename)\n', (143770, 143798), False, 'from os.path import expanduser, join, abspath\n'), ((143835, 143869), 'cv2.imwrite', 'cv2.imwrite', (['image_filepath', 'image'], {}), '(image_filepath, image)\n', (143846, 143869), False, 'import cv2\n'), ((147725, 147753), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (147728, 147753), False, 'from six.moves import zip, range\n'), ((148898, 148926), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (148901, 148926), False, 'from six.moves import zip, range\n'), ((151221, 151255), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (151225, 151255), False, 'from os.path import expanduser, join, abspath\n'), ((159380, 159414), 'os.path.join', 'join', (['output_path', 'output_filename'], {}), '(output_path, output_filename)\n', (159384, 159414), False, 'from os.path import expanduser, join, abspath\n'), ((159427, 159466), 'cv2.imwrite', 'cv2.imwrite', (['output_filepath', 'thumbnail'], {}), '(output_filepath, thumbnail)\n', (159438, 159466), False, 'import cv2\n'), ((165613, 165647), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (165617, 165647), False, 'from os.path import expanduser, join, abspath\n'), ((167031, 167064), 'six.moves.zip', 'zip', (['species_list', 'viewpoint_list'], {}), '(species_list, viewpoint_list)\n', (167034, 167064), False, 'from six.moves import zip, range\n'), ((167183, 167216), 'six.moves.zip', 'zip', (['species_list', 'viewpoint_list'], {}), '(species_list, viewpoint_list)\n', (167186, 167216), False, 'from six.moves import zip, range\n'), ((167548, 167581), 'six.moves.zip', 'zip', (['species_list', 'viewpoint_list'], {}), '(species_list, viewpoint_list)\n', (167551, 167581), False, 'from six.moves import zip, range\n'), ((168279, 168311), 'six.moves.zip', 'zip', (['label_list', 'confidence_list'], {}), '(label_list, confidence_list)\n', (168282, 168311), False, 'from six.moves import zip, range\n'), ((171867, 171900), 'six.moves.zip', 'zip', (['species_list', 'viewpoint_list'], {}), '(species_list, viewpoint_list)\n', (171870, 171900), False, 'from six.moves import zip, range\n'), ((171972, 171997), 'six.moves.zip', 'zip', (['aid_list', 'label_list'], {}), '(aid_list, label_list)\n', (171975, 171997), False, 'from six.moves import zip, range\n'), ((172472, 172505), 'six.moves.zip', 'zip', (['species_list', 'viewpoint_list'], {}), '(species_list, viewpoint_list)\n', (172475, 172505), False, 'from six.moves import zip, range\n'), ((180473, 180507), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (180477, 180507), False, 'from os.path import expanduser, join, abspath\n'), ((181543, 181580), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (181546, 181580), False, 'from six.moves import zip, range\n'), ((183469, 183506), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (183472, 183506), False, 'from six.moves import zip, range\n'), ((184639, 184671), 'os.path.join', 'join', (['image_path', 'image_filename'], {}), '(image_path, image_filename)\n', (184643, 184671), False, 'from os.path import expanduser, join, abspath\n'), ((184708, 184741), 'cv2.imwrite', 'cv2.imwrite', (['image_filepath', 'chip'], {}), '(image_filepath, chip)\n', (184719, 184741), False, 'import cv2\n'), ((186950, 186978), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (186953, 186978), False, 'from six.moves import zip, range\n'), ((187894, 187922), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (187897, 187922), False, 'from six.moves import zip, range\n'), ((189678, 189712), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (189682, 189712), False, 'from os.path import expanduser, join, abspath\n'), ((190429, 190465), 'six.moves.zip', 'zip', (['part_rowid_list', 'part_type_list'], {}), '(part_rowid_list, part_type_list)\n', (190432, 190465), False, 'from six.moves import zip, range\n'), ((196979, 196997), 'numpy.around', 'np.around', (['(x0a * w)'], {}), '(x0a * w)\n', (196988, 196997), True, 'import numpy as np\n'), ((197017, 197035), 'numpy.around', 'np.around', (['(y0a * h)'], {}), '(y0a * h)\n', (197026, 197035), True, 'import numpy as np\n'), ((197055, 197073), 'numpy.around', 'np.around', (['(x1a * w)'], {}), '(x1a * w)\n', (197064, 197073), True, 'import numpy as np\n'), ((197093, 197111), 'numpy.around', 'np.around', (['(y1a * h)'], {}), '(y1a * h)\n', (197102, 197111), True, 'import numpy as np\n'), ((197132, 197150), 'numpy.around', 'np.around', (['(x0b * w)'], {}), '(x0b * w)\n', (197141, 197150), True, 'import numpy as np\n'), ((197170, 197188), 'numpy.around', 'np.around', (['(y0b * h)'], {}), '(y0b * h)\n', (197179, 197188), True, 'import numpy as np\n'), ((197208, 197226), 'numpy.around', 'np.around', (['(x1b * w)'], {}), '(x1b * w)\n', (197217, 197226), True, 'import numpy as np\n'), ((197246, 197264), 'numpy.around', 'np.around', (['(y1b * h)'], {}), '(y1b * h)\n', (197255, 197264), True, 'import numpy as np\n'), ((207386, 207420), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (207390, 207420), False, 'from os.path import expanduser, join, abspath\n'), ((208100, 208127), 'six.moves.zip', 'zip', (['aid_list', 'species_list'], {}), '(aid_list, species_list)\n', (208103, 208127), False, 'from six.moves import zip, range\n'), ((210319, 210356), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (210322, 210356), False, 'from six.moves import zip, range\n'), ((212426, 212463), 'six.moves.zip', 'zip', (['prediction_list', 'confidence_list'], {}), '(prediction_list, confidence_list)\n', (212429, 212463), False, 'from six.moves import zip, range\n'), ((214578, 214611), 'os.path.join', 'join', (['output_path', 'image_filename'], {}), '(output_path, image_filename)\n', (214582, 214611), False, 'from os.path import expanduser, join, abspath\n'), ((214624, 214658), 'cv2.imwrite', 'cv2.imwrite', (['image_filepath', 'image'], {}), '(image_filepath, image)\n', (214635, 214658), False, 'import cv2\n'), ((216786, 216814), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (216789, 216814), False, 'from six.moves import zip, range\n'), ((217724, 217752), 'six.moves.zip', 'zip', (['color_list', 'config_list'], {}), '(color_list, config_list)\n', (217727, 217752), False, 'from six.moves import zip, range\n'), ((219773, 219807), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'fig_filename'], {}), "('~', 'Desktop', fig_filename)\n", (219777, 219807), False, 'from os.path import expanduser, join, abspath\n'), ((6585, 6604), 'six.moves.zip', 'zip', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (6588, 6604), False, 'from six.moves import zip, range\n'), ((8701, 8731), 'six.moves.zip', 'zip', (['index_list', 'category_list'], {}), '(index_list, category_list)\n', (8704, 8731), False, 'from six.moves import zip, range\n'), ((14953, 14968), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (14961, 14968), True, 'import numpy as np\n'), ((16455, 16498), 'vtool.rotation_around_bbox_mat3x3', 'vt.rotation_around_bbox_mat3x3', (['theta', 'bbox'], {}), '(theta, bbox)\n', (16485, 16498), True, 'import vtool as vt\n'), ((16581, 16617), 'vtool.verts_from_bbox', 'vt.verts_from_bbox', (['bbox'], {'close': '(True)'}), '(bbox, close=True)\n', (16599, 16617), True, 'import vtool as vt\n'), ((21157, 21207), 'functools.partial', 'partial', (['features_lazy_func', 'test_gid', 'test_offset'], {}), '(features_lazy_func, test_gid, test_offset)\n', (21164, 21207), False, 'from functools import partial\n'), ((21307, 21338), 'six.moves.zip', 'zip', (['test_gid_list', 'length_list'], {}), '(test_gid_list, length_list)\n', (21310, 21338), False, 'from six.moves import zip, range\n'), ((23606, 23623), 'six.moves.zip', 'zip', (['*zipped_list'], {}), '(*zipped_list)\n', (23609, 23623), False, 'from six.moves import zip, range\n'), ((123578, 123598), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""'], {}), "('~', 'Desktop')\n", (123582, 123598), False, 'from os.path import expanduser, join, abspath\n'), ((142924, 142957), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'output_path'], {}), "('~', 'Desktop', output_path)\n", (142928, 142957), False, 'from os.path import expanduser, join, abspath\n'), ((169189, 169212), 'six.moves.zip', 'zip', (['cur_list', 'new_list'], {}), '(cur_list, new_list)\n', (169192, 169212), False, 'from six.moves import zip, range\n'), ((183759, 183792), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'output_path'], {}), "('~', 'Desktop', output_path)\n", (183763, 183792), False, 'from os.path import expanduser, join, abspath\n'), ((196157, 196181), 'numpy.around', 'np.around', (['(255.0 * value)'], {}), '(255.0 * value)\n', (196166, 196181), True, 'import numpy as np\n'), ((207685, 207719), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', '"""background"""'], {}), "('~', 'Desktop', 'background')\n", (207689, 207719), False, 'from os.path import expanduser, join, abspath\n'), ((212711, 212744), 'os.path.join', 'join', (['"""~"""', '"""Desktop"""', 'output_path'], {}), "('~', 'Desktop', output_path)\n", (212715, 212744), False, 'from os.path import expanduser, join, abspath\n'), ((214194, 214248), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xtl, ytl)', '(xbr, ybr)', 'color', '(4)'], {}), '(image, (xtl, ytl), (xbr, ybr), color, 4)\n', (214207, 214248), False, 'import cv2\n'), ((214418, 214488), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xtl - 4, ytl - 4)', '(xbr + 4, ybr + 4)', 'color', '(4)'], {}), '(image, (xtl - 4, ytl - 4), (xbr + 4, ybr + 4), color, 4)\n', (214431, 214488), False, 'import cv2\n'), ((7109, 7123), 'numpy.isnan', 'np.isnan', (['conf'], {}), '(conf)\n', (7117, 7123), True, 'import numpy as np\n'), ((21243, 21256), 'six.moves.range', 'range', (['length'], {}), '(length)\n', (21248, 21256), False, 'from six.moves import zip, range\n'), ((213830, 213856), 'numpy.round', 'np.round', (['(xtl / w * width_)'], {}), '(xtl / w * width_)\n', (213838, 213856), True, 'import numpy as np\n'), ((213886, 213913), 'numpy.round', 'np.round', (['(ytl / h * height_)'], {}), '(ytl / h * height_)\n', (213894, 213913), True, 'import numpy as np\n'), ((213943, 213969), 'numpy.round', 'np.round', (['(xbr / w * width_)'], {}), '(xbr / w * width_)\n', (213951, 213969), True, 'import numpy as np\n'), ((213999, 214026), 'numpy.round', 'np.round', (['(ybr / h * height_)'], {}), '(ybr / h * height_)\n', (214007, 214026), True, 'import numpy as np\n'), ((16721, 16736), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (16729, 16736), True, 'import numpy as np\n'), ((15060, 15079), 'numpy.round', 'np.round', (['trans_pts'], {}), '(trans_pts)\n', (15068, 15079), True, 'import numpy as np\n'), ((16844, 16863), 'numpy.round', 'np.round', (['trans_pts'], {}), '(trans_pts)\n', (16852, 16863), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import mxnet as mx
from mxnet.gluon import HybridBlock
@mx.util.use_np
def test_getitem_hybridized():
class picking_np(HybridBlock):
def __init__(self, **kwargs):
super(picking_np, self).__init__(**kwargs)
def forward(self, sequence, pick_ids):
"""
new implementation in deep numpy
"""
idx_arange = mx.npx.arange_like(pick_ids.reshape((-1, )), axis=0)
batch_idx = mx.np.floor(idx_arange / 2).astype(np.int32)
encoded = sequence[batch_idx, pick_ids.reshape((-1,))]
encoded = mx.npx.reshape_like(encoded, pick_ids, lhs_begin=-2, lhs_end=-1, rhs_begin=0)
return encoded
sequence = mx.np.array(np.random.normal(0, 1, (8, 32, 768)), dtype=np.float32)
# pick_ids: [batch_size, picked_index]
pick_ids = mx.np.random.randint(0, 32, (8,2), dtype=np.int32)
picker_np = picking_np()
seq_np = sequence
np_output = picker_np(seq_np, pick_ids)
seq_np.attach_grad()
with mx.autograd.record():
z = picker_np(seq_np, pick_ids)
z.backward()
picker_np.initialize()
picker_np.hybridize()
nd_output_hybridized = picker_np(sequence, pick_ids)
seq_np_hybridized = sequence
seq_np_hybridized.attach_grad()
with mx.autograd.record():
z_hybridized = picker_np(seq_np_hybridized, pick_ids.as_np_ndarray())
z_hybridized.backward()
mx.test_utils.assert_almost_equal(nd_output_hybridized.asnumpy(), np_output.asnumpy())
mx.test_utils.assert_almost_equal(seq_np.grad.asnumpy(), seq_np_hybridized.grad.asnumpy())
def test_getitem_hybridized_no_F_argument():
class picking_np(HybridBlock):
def __init__(self, **kwargs):
super(picking_np, self).__init__(**kwargs)
def forward(self, sequence, pick_ids):
"""
new implementation in deep numpy
"""
idx_arange = mx.npx.arange_like(pick_ids.reshape((-1, )), axis=0)
batch_idx = np.floor(idx_arange / 2).astype(np.int32)
encoded = sequence[batch_idx, pick_ids.reshape((-1,))]
encoded = mx.npx.reshape_like(encoded, pick_ids, lhs_begin=-2, lhs_end=-1, rhs_begin=0)
return encoded
sequence = mx.nd.array(np.random.normal(0, 1, (8, 32, 768)), dtype=np.float32)
# pick_ids: [batch_size, picked_index]
pick_ids = mx.nd.random.randint(0, 32, (8,2), dtype=np.int32)
mx.npx.set_np()
picker_np = picking_np()
seq_np = sequence.as_np_ndarray()
np_output = picker_np(seq_np, pick_ids.as_np_ndarray())
seq_np.attach_grad()
with mx.autograd.record():
z = picker_np(seq_np, pick_ids.as_np_ndarray())
z.backward()
picker_np.initialize()
picker_np.hybridize()
nd_output_hybridized = picker_np(sequence.as_np_ndarray(), pick_ids.as_np_ndarray())
seq_np_hybridized = sequence.as_np_ndarray()
seq_np_hybridized.attach_grad()
with mx.autograd.record():
z_hybridized = picker_np(seq_np_hybridized, pick_ids.as_np_ndarray())
z_hybridized.backward()
mx.npx.reset_np()
mx.test_utils.assert_almost_equal(nd_output_hybridized.asnumpy(), np_output.asnumpy())
mx.test_utils.assert_almost_equal(z_hybridized.asnumpy(), np_output.asnumpy())
mx.test_utils.assert_almost_equal(seq_np.grad.asnumpy(), seq_np_hybridized.grad.asnumpy())
| [
"numpy.random.normal",
"mxnet.autograd.record",
"mxnet.npx.reset_np",
"mxnet.npx.reshape_like",
"numpy.floor",
"mxnet.np.random.randint",
"mxnet.np.floor",
"mxnet.npx.set_np",
"mxnet.nd.random.randint"
] | [((1658, 1709), 'mxnet.np.random.randint', 'mx.np.random.randint', (['(0)', '(32)', '(8, 2)'], {'dtype': 'np.int32'}), '(0, 32, (8, 2), dtype=np.int32)\n', (1678, 1709), True, 'import mxnet as mx\n'), ((3201, 3252), 'mxnet.nd.random.randint', 'mx.nd.random.randint', (['(0)', '(32)', '(8, 2)'], {'dtype': 'np.int32'}), '(0, 32, (8, 2), dtype=np.int32)\n', (3221, 3252), True, 'import mxnet as mx\n'), ((3257, 3272), 'mxnet.npx.set_np', 'mx.npx.set_np', ([], {}), '()\n', (3270, 3272), True, 'import mxnet as mx\n'), ((3898, 3915), 'mxnet.npx.reset_np', 'mx.npx.reset_np', ([], {}), '()\n', (3913, 3915), True, 'import mxnet as mx\n'), ((1544, 1580), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(8, 32, 768)'], {}), '(0, 1, (8, 32, 768))\n', (1560, 1580), True, 'import numpy as np\n'), ((1839, 1859), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (1857, 1859), True, 'import mxnet as mx\n'), ((2107, 2127), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (2125, 2127), True, 'import mxnet as mx\n'), ((3087, 3123), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(8, 32, 768)'], {}), '(0, 1, (8, 32, 768))\n', (3103, 3123), True, 'import numpy as np\n'), ((3434, 3454), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (3452, 3454), True, 'import mxnet as mx\n'), ((3766, 3786), 'mxnet.autograd.record', 'mx.autograd.record', ([], {}), '()\n', (3784, 3786), True, 'import mxnet as mx\n'), ((1411, 1488), 'mxnet.npx.reshape_like', 'mx.npx.reshape_like', (['encoded', 'pick_ids'], {'lhs_begin': '(-2)', 'lhs_end': '(-1)', 'rhs_begin': '(0)'}), '(encoded, pick_ids, lhs_begin=-2, lhs_end=-1, rhs_begin=0)\n', (1430, 1488), True, 'import mxnet as mx\n'), ((2954, 3031), 'mxnet.npx.reshape_like', 'mx.npx.reshape_like', (['encoded', 'pick_ids'], {'lhs_begin': '(-2)', 'lhs_end': '(-1)', 'rhs_begin': '(0)'}), '(encoded, pick_ids, lhs_begin=-2, lhs_end=-1, rhs_begin=0)\n', (2973, 3031), True, 'import mxnet as mx\n'), ((1277, 1304), 'mxnet.np.floor', 'mx.np.floor', (['(idx_arange / 2)'], {}), '(idx_arange / 2)\n', (1288, 1304), True, 'import mxnet as mx\n'), ((2822, 2846), 'numpy.floor', 'np.floor', (['(idx_arange / 2)'], {}), '(idx_arange / 2)\n', (2830, 2846), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys, os
#sys.path.append("/scratch/bbecker/cv/lib/python3.6/dist-packages")
#sys.path.append("/scratch/bbecker/kmcuda/src/build")
#sys.path.append("/scratch/bbecker/python-libs/lib/python3.6/site-packages/")
import cv2
from pai4sk.cluster import KMeans
import cudf, cuml
import pandas as pd
#from libKMCUDA import kmeans_cuda
#from sklearn.cluster import KMeans
import numpy as np
import sqlite3
from joblib import Parallel, delayed
import logging
from typing import *
import contextlib
from numba import cuda
from numba.cuda.cudadrv.driver import CudaAPIError
import multiprocessing
import cupy as cp
import shutil
from datetime import datetime
from datetime import timedelta
import time
CLUSTERS = 5
DEBUG = False
connection : sqlite3.Connection = None
images = Dict
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(funcName)s - %(message)s', level=logging.DEBUG)
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
@contextlib.contextmanager
def nostderr():
save_stderr = sys.stderr
sys.stderr = DummyFile()
yield
sys.stderr = save_stderr
def np2cudf(df):
# convert numpy array to cuDF dataframe
df = pd.DataFrame({'fea%d'%i:df[:,i] for i in range(df.shape[1])})
pdf = cudf.DataFrame()
for c,column in enumerate(df):
pdf[str(c)] = df[column]
return pdf
def process(filen):
try:
if not filen.endswith("jpg") or os.path.exists("{}.txt".format(filen)):
return
#logging.debug("reading {}".format(file))
img = cv2.imread(filen)
img = img.reshape((img.shape[0] * img.shape[1], 3))
#logging.debug("KMeans for {}".format(file))
#criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
#ret, label, center = cv2.kmeans(np.float32(img), CLUSTERS, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
#center, label = kmeans_cuda(np.float32(img), CLUSTERS, device=1)
worker = multiprocessing.current_process()._identity[0]
try:
if worker%13 > 3:
raise RuntimeError()
logging.info("Starting CUDA-KMeans in Worker {} on Card {} for file {}".format(worker, worker%2, os.path.basename(filen)))
cuda.select_device(worker%2)
kmeans = KMeans(n_clusters=CLUSTERS, n_init=5, verbose=0)
#b, g, r = np.hsplit(img, 3)
img_cuda = np2cudf(np.float32(img))
with nostdout(), nostderr():
kmeans.fit(img_cuda)
center = cp.asarray(kmeans.cluster_centers_.values)
label = cp.asarray(kmeans.labels_.data.mem)
numLabels = cp.arange(0, CLUSTERS + 1)
(hist, he) = cp.histogram(label, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
colors = (center[(-hist).argsort()]).get()
del kmeans
del img_cuda
del center
del label
del numLabels
del hist
del he
#cuda.close()
except (RuntimeError, CudaAPIError):
logging.info("Starting SKLearn-KMeans in Worker {} on CPU for file {}".format(worker, os.path.basename(filen)))
kmeans = KMeans(n_clusters=CLUSTERS, n_init=5, precompute_distances=True, n_jobs=1, verbose=0)
kmeans.use_gpu = False
with nostdout(), nostderr():
kmeans.fit(img)
center = kmeans.cluster_centers_
label = kmeans.labels_
del kmeans
numLabels = np.arange(0, CLUSTERS + 1)
(hist, _) = np.histogram(label, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
colors = center[(-hist).argsort()]
# c = connection.cursor()
#color_ints = []
logging.debug("{}: {}".format(filen, str(colors)))
with open("{}.txt".format(filen), 'w') as fd:
for i in range(CLUSTERS):
col = int(colors[i][2]) << 16 | int(colors[i][1]) << 8 | int(colors[i][0])
assert col <= 2**24
# color_ints.append(col)
fd.write("{}\n".format(str(col)))
#logging.debug("{}: {}".format(file, str(color_ints)))
#c.execute("INSERT INTO images VALUES (\"{}\",{})".format(os.path.basename(file),",".join([str(i) for i in color_ints])))
#connection.commit()
#c.close()
#images.update({os.path.basename(file): color_ints})
except Exception as e:
logging.error(str(e))
pass
def processTar(tarball):
if not tarball.endswith(".tar"):
return
images = dict()
import tarfile
f = tarfile.open(tarball, 'r')
dirn = "/dev/shm/{}".format(os.path.basename(tarball))
try:
os.mkdir(dirn)
f.extractall(path=dirn)
except FileExistsError:
pass
if DEBUG:
for img in os.listdir(dirn):
start=datetime.now()
process(os.path.join(dirn,img))
print("{}: Time: {}".format(os.path.basename(img), datetime.now()-start))
else:
files = os.listdir(dirn)
Parallel(n_jobs=150, backend="multiprocessing", batch_size=10, verbose=10)(
delayed(process)(os.path.join(dirn,img)) for img in files)
for f in os.listdir(dirn):
if f.endswith(".txt"):
with open(os.path.join(dirn, f), 'r') as fd:
lines = fd.readlines()
color_ints = [int(n.strip()) for n in lines]
images.update({f[:-4]: color_ints})
connection = sqlite3.connect("hist_{}_{}.sqlite".format(os.path.basename(tarball),db), check_same_thread = False)
c = connection.cursor()
c.execute("CREATE TABLE images (imgname text)")
connection.commit()
for i in range(CLUSTERS):
c.execute("ALTER TABLE images ADD COLUMN color{} integer".format(i))
connection.commit()
for key, value in images.items():
c.execute("INSERT INTO images VALUES (\"{}\",{})".format(key,
",".join([str(i) for i in value])))
connection.commit()
connection.close()
shutil.rmtree(dirn)
def main(folder, img, db, dst):
times = []
ctr = 0
amnt = len(os.listdir(folder))
for tarball in os.listdir(folder):
ctr += 1
print("\n\n\nProcessing {}: {}/{} ({}%) \n\n\n\n".format(tarball, ctr, amnt, (float(ctr)/amnt)*100))
if os.path.exists(os.path.join(dst,"hist_{}_{}.sqlite".format(tarball,db))):
print("{} already done. skipping.".format(tarball))
continue
start = datetime.now()
try:
processTar(os.path.join(folder,tarball))
except:
print("{} failed. skipping.".format(tarball))
shutil.rmtree(os.path.join("/dev/shm", tarball))
else:
shutil.copyfile(
"hist_{}_{}.sqlite".format(tarball,db),
os.path.join(dst,"hist_{}_{}.sqlite".format(tarball,db))
)
end = datetime.now()
times.append(end-start)
print("\n\n{} took {} - AVG {} - TTE {}, ETA {}".format(
tarball,
end-start,
sum(times, timedelta(0)) / len(times),
sum(times, timedelta(0)),
(sum(times, timedelta(0)) / len(times)) * (amnt - ctr)
))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print(sys.path)
assert len(sys.argv) > 4
folder = sys.argv[1]
img = sys.argv[2]
db = sys.argv[3]
dst = sys.argv[4]
main(folder, img, db, dst)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"tarfile.open",
"cudf.DataFrame",
"datetime.timedelta",
"numpy.arange",
"numpy.histogram",
"os.listdir",
"cupy.arange",
"cupy.histogram",
"os.mkdir",
"cupy.asarray",
"numba.cuda.select_device",
"cv2.imread",
"multiprocessing.current_process",
"logging.basicConfig",
"os.path.join",
"job... | [((806, 923), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(funcName)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(levelname)s - %(funcName)s - %(message)s', level=\n logging.DEBUG)\n", (825, 923), False, 'import logging\n'), ((1394, 1410), 'cudf.DataFrame', 'cudf.DataFrame', ([], {}), '()\n', (1408, 1410), False, 'import cudf, cuml\n'), ((4877, 4903), 'tarfile.open', 'tarfile.open', (['tarball', '"""r"""'], {}), "(tarball, 'r')\n", (4889, 4903), False, 'import tarfile\n'), ((5496, 5512), 'os.listdir', 'os.listdir', (['dirn'], {}), '(dirn)\n', (5506, 5512), False, 'import sys, os\n'), ((6374, 6393), 'shutil.rmtree', 'shutil.rmtree', (['dirn'], {}), '(dirn)\n', (6387, 6393), False, 'import shutil\n'), ((6509, 6527), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (6519, 6527), False, 'import sys, os\n'), ((1690, 1707), 'cv2.imread', 'cv2.imread', (['filen'], {}), '(filen)\n', (1700, 1707), False, 'import cv2\n'), ((4936, 4961), 'os.path.basename', 'os.path.basename', (['tarball'], {}), '(tarball)\n', (4952, 4961), False, 'import sys, os\n'), ((4980, 4994), 'os.mkdir', 'os.mkdir', (['dirn'], {}), '(dirn)\n', (4988, 4994), False, 'import sys, os\n'), ((5103, 5119), 'os.listdir', 'os.listdir', (['dirn'], {}), '(dirn)\n', (5113, 5119), False, 'import sys, os\n'), ((5310, 5326), 'os.listdir', 'os.listdir', (['dirn'], {}), '(dirn)\n', (5320, 5326), False, 'import sys, os\n'), ((6470, 6488), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (6480, 6488), False, 'import sys, os\n'), ((6841, 6855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6853, 6855), False, 'from datetime import datetime\n'), ((2389, 2419), 'numba.cuda.select_device', 'cuda.select_device', (['(worker % 2)'], {}), '(worker % 2)\n', (2407, 2419), False, 'from numba import cuda\n'), ((2439, 2487), 'pai4sk.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'CLUSTERS', 'n_init': '(5)', 'verbose': '(0)'}), '(n_clusters=CLUSTERS, n_init=5, verbose=0)\n', (2445, 2487), False, 'from pai4sk.cluster import KMeans\n'), ((2676, 2718), 'cupy.asarray', 'cp.asarray', (['kmeans.cluster_centers_.values'], {}), '(kmeans.cluster_centers_.values)\n', (2686, 2718), True, 'import cupy as cp\n'), ((2739, 2774), 'cupy.asarray', 'cp.asarray', (['kmeans.labels_.data.mem'], {}), '(kmeans.labels_.data.mem)\n', (2749, 2774), True, 'import cupy as cp\n'), ((2804, 2830), 'cupy.arange', 'cp.arange', (['(0)', '(CLUSTERS + 1)'], {}), '(0, CLUSTERS + 1)\n', (2813, 2830), True, 'import cupy as cp\n'), ((2856, 2891), 'cupy.histogram', 'cp.histogram', (['label'], {'bins': 'numLabels'}), '(label, bins=numLabels)\n', (2868, 2891), True, 'import cupy as cp\n'), ((5139, 5153), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5151, 5153), False, 'from datetime import datetime\n'), ((5335, 5409), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(150)', 'backend': '"""multiprocessing"""', 'batch_size': '(10)', 'verbose': '(10)'}), "(n_jobs=150, backend='multiprocessing', batch_size=10, verbose=10)\n", (5343, 5409), False, 'from joblib import Parallel, delayed\n'), ((5819, 5844), 'os.path.basename', 'os.path.basename', (['tarball'], {}), '(tarball)\n', (5835, 5844), False, 'import sys, os\n'), ((7262, 7276), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7274, 7276), False, 'from datetime import datetime\n'), ((2115, 2148), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (2146, 2148), False, 'import multiprocessing\n'), ((2560, 2575), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2570, 2575), True, 'import numpy as np\n'), ((3398, 3487), 'pai4sk.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'CLUSTERS', 'n_init': '(5)', 'precompute_distances': '(True)', 'n_jobs': '(1)', 'verbose': '(0)'}), '(n_clusters=CLUSTERS, n_init=5, precompute_distances=True, n_jobs=1,\n verbose=0)\n', (3404, 3487), False, 'from pai4sk.cluster import KMeans\n'), ((3730, 3756), 'numpy.arange', 'np.arange', (['(0)', '(CLUSTERS + 1)'], {}), '(0, CLUSTERS + 1)\n', (3739, 3756), True, 'import numpy as np\n'), ((3781, 3816), 'numpy.histogram', 'np.histogram', (['label'], {'bins': 'numLabels'}), '(label, bins=numLabels)\n', (3793, 3816), True, 'import numpy as np\n'), ((5174, 5197), 'os.path.join', 'os.path.join', (['dirn', 'img'], {}), '(dirn, img)\n', (5186, 5197), False, 'import sys, os\n'), ((6892, 6921), 'os.path.join', 'os.path.join', (['folder', 'tarball'], {}), '(folder, tarball)\n', (6904, 6921), False, 'import sys, os\n'), ((2351, 2374), 'os.path.basename', 'os.path.basename', (['filen'], {}), '(filen)\n', (2367, 2374), False, 'import sys, os\n'), ((5238, 5259), 'os.path.basename', 'os.path.basename', (['img'], {}), '(img)\n', (5254, 5259), False, 'import sys, os\n'), ((5423, 5439), 'joblib.delayed', 'delayed', (['process'], {}), '(process)\n', (5430, 5439), False, 'from joblib import Parallel, delayed\n'), ((5440, 5463), 'os.path.join', 'os.path.join', (['dirn', 'img'], {}), '(dirn, img)\n', (5452, 5463), False, 'import sys, os\n'), ((5567, 5588), 'os.path.join', 'os.path.join', (['dirn', 'f'], {}), '(dirn, f)\n', (5579, 5588), False, 'import sys, os\n'), ((7022, 7055), 'os.path.join', 'os.path.join', (['"""/dev/shm"""', 'tarball'], {}), "('/dev/shm', tarball)\n", (7034, 7055), False, 'import sys, os\n'), ((3351, 3374), 'os.path.basename', 'os.path.basename', (['filen'], {}), '(filen)\n', (3367, 3374), False, 'import sys, os\n'), ((5261, 5275), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5273, 5275), False, 'from datetime import datetime\n'), ((7518, 7530), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (7527, 7530), False, 'from datetime import timedelta\n'), ((7463, 7475), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (7472, 7475), False, 'from datetime import timedelta\n'), ((7561, 7573), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (7570, 7573), False, 'from datetime import timedelta\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from tracker.coordinategenerator import LatentCoordinateGenerator
from tracker.worldstate import WorldState
np.set_printoptions(precision=3)
plt.style.use('dark_background')
# TODO use same boilerplate but for different paths to test different paths
# and characteristics such as oscillatory behavior
def test_worldstate():
dt = 50 / 1000
latency = LatentCoordinateGenerator.COORD_LATENCY
state = WorldState()
for _ in range(state.error_history.shape[0]):
state.update(dt)
hist = state.error_history
hist_max = np.max(hist)
hist_mean = np.mean(hist)
hist_std = np.std(hist)
fig, ax = plt.subplots()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
state.draw_error(ax)
fig.savefig('log/test_worldstate_{:.3f}_{:.3f}_{:.3f}_{:.3f}.png'.format(
latency, hist_max, hist_mean, hist_std))
# assert hist_max < 0.9
# assert hist_mean < 0.4
# assert hist_std < 0.3
| [
"numpy.mean",
"matplotlib.pyplot.style.use",
"numpy.max",
"tracker.worldstate.WorldState",
"numpy.std",
"matplotlib.pyplot.subplots",
"numpy.set_printoptions"
] | [((200, 232), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (219, 232), True, 'import numpy as np\n'), ((233, 265), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (246, 265), True, 'import matplotlib.pyplot as plt\n'), ((502, 514), 'tracker.worldstate.WorldState', 'WorldState', ([], {}), '()\n', (512, 514), False, 'from tracker.worldstate import WorldState\n'), ((638, 650), 'numpy.max', 'np.max', (['hist'], {}), '(hist)\n', (644, 650), True, 'import numpy as np\n'), ((667, 680), 'numpy.mean', 'np.mean', (['hist'], {}), '(hist)\n', (674, 680), True, 'import numpy as np\n'), ((697, 709), 'numpy.std', 'np.std', (['hist'], {}), '(hist)\n', (703, 709), True, 'import numpy as np\n'), ((725, 739), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (737, 739), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import datetime
import os
START_DATE = datetime.date(2014, 9, 10)
END_DATE = datetime.date(2016, 7, 16)
def read_stories_without_tags():
stories = list()
current_date = START_DATE
while current_date <= END_DATE:
file_in = open("./TopStories/%s.json" % current_date.isoformat(), 'r')
raw_data = json.loads(str(file_in.read()))
file_in.close()
for raw_story in raw_data['stories']:
story = dict()
story['top_date'] = current_date.isoformat()
story['story_id'] = raw_story['story_id']
story['author'] = raw_story['author']
story['published_date'] = raw_story['published_date']
story['recommends'] = raw_story['recommends']
story['responses'] = raw_story['responses']
story['tags_count'] = len(raw_story['tags'])
stories.append(story)
print(current_date.isoformat())
current_date = current_date + datetime.timedelta(days=1)
return pd.read_json(json.dumps(stories))
def read_stories_by_tags():
tags = list()
current_date = START_DATE
while current_date <= END_DATE:
file_in = open("./TopStories/%s.json" % current_date.isoformat(), 'r')
raw_data = json.loads(str(file_in.read()))
file_in.close()
for raw_story in raw_data['stories']:
for raw_tag in raw_story['tags']:
tag = dict()
tag['top_date'] = current_date.isoformat()
tag['story_id'] = raw_story['story_id']
tag['author'] = raw_story['author']
tag['published_date'] = raw_story['published_date']
tag['recommends'] = raw_story['recommends']
tag['responses'] = raw_story['responses']
tag['name'] = raw_tag['name']
tag['post_count'] = raw_tag['postCount']
tag['follower_count'] = raw_tag['metadata']['followerCount']
tags.append(tag)
print(current_date.isoformat())
current_date = current_date + datetime.timedelta(days=1)
return pd.read_json(json.dumps(tags))
if __name__ == '__main__':
if not os.path.exists('./top_stories_result'):
os.mkdir('./top_stories_result')
stories_data = read_stories_without_tags()
tags_data = read_stories_by_tags()
stories_data.to_csv('./top_stories_result/stories_raw_data.csv', sep='\t', encoding='utf-8')
tags_data.to_csv('./top_stories_result/tags_raw_data.csv', sep='\t', encoding='utf-8')
plt.figure()
stories_data.groupby(['top_date'])[['recommends']].mean().plot()
plt.savefig('./top_stories_result/recommends-top_date.png')
plt.close()
plt.figure()
stories_data.groupby(['top_date'])[['responses']].mean().plot()
plt.savefig('./top_stories_result/responses-top_date.png')
plt.close()
plt.figure()
stories_data.groupby(['top_date'])[['recommends', 'responses']].mean().plot()
plt.savefig('./top_stories_result/recommends_responses-top_date.png')
plt.close()
plt.figure()
stories_data.groupby(['top_date'])[['tags_count']].mean().plot()
plt.savefig('./top_stories_result/tags_count-top_date.png')
plt.close()
stories_data.groupby(['author'])[['story_id']].count().rename(columns={'story_id': 'count'}).sort_values(by=['count'], ascending=False).to_csv('./top_stories_result/author_count.csv', sep='\t', encoding='utf-8')
stories_data.groupby(['story_id'])[['author']].count().rename(columns={'author': 'count'}).sort_values(by=['count'], ascending=False).to_csv('./top_stories_result/story_count.csv', sep='\t', encoding='utf-8')
tags_data.groupby(['name'])[['story_id']].count().rename(columns={'story_id': 'count'}).sort_values(by=['count'], ascending=False).to_csv('./top_stories_result/tags_count.csv', sep='\t', encoding='utf-8')
responses_list = np.sort(stories_data['responses'].tolist())
plt.figure()
plt.plot(responses_list, np.linspace(0, 1, responses_list.size))
plt.savefig('./top_stories_result/CDF_responses.png')
plt.close()
recommends_list = np.sort(stories_data['recommends'].tolist())
plt.figure()
plt.plot(recommends_list, np.linspace(0, 1, recommends_list.size))
plt.savefig('./top_stories_result/CDF_recommends.png')
plt.close()
tags_count_list = np.sort(stories_data['tags_count'].tolist())
plt.figure()
plt.plot(tags_count_list, np.linspace(0, 1, tags_count_list.size))
plt.savefig('./top_stories_result/CDF_tags_count.png')
plt.close()
tags_post_count_count = tags_data[['name', 'post_count']].drop_duplicates().join(tags_data.groupby(['name'])[['story_id']].count().rename(columns={'story_id': 'count'}), on='name')
plt.figure()
tags_post_count_count[['post_count', 'count']].plot(x='post_count', y='count', kind='scatter')
plt.savefig('./top_stories_result/tags_count-post_count.png')
plt.close()
tags_follower_count_count = tags_data[['name', 'follower_count']].drop_duplicates().join(tags_data.groupby(['name'])[['story_id']].count().rename(columns={'story_id': 'count'}), on='name')
plt.figure()
tags_follower_count_count[['follower_count', 'count']].plot(x='follower_count', y='count', kind='scatter')
plt.savefig('./top_stories_result/tags_count-follower_count.png')
plt.close()
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"json.dumps",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.linspace",
"datetime.date",
"os.mkdir",
"datetime.timedelta"
] | [((149, 175), 'datetime.date', 'datetime.date', (['(2014)', '(9)', '(10)'], {}), '(2014, 9, 10)\n', (162, 175), False, 'import datetime\n'), ((187, 213), 'datetime.date', 'datetime.date', (['(2016)', '(7)', '(16)'], {}), '(2016, 7, 16)\n', (200, 213), False, 'import datetime\n'), ((2650, 2662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2660, 2662), True, 'import matplotlib.pyplot as plt\n'), ((2736, 2795), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/recommends-top_date.png"""'], {}), "('./top_stories_result/recommends-top_date.png')\n", (2747, 2795), True, 'import matplotlib.pyplot as plt\n'), ((2800, 2811), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2809, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2827, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2960), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/responses-top_date.png"""'], {}), "('./top_stories_result/responses-top_date.png')\n", (2913, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2965, 2976), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2982, 2994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2992, 2994), True, 'import matplotlib.pyplot as plt\n'), ((3081, 3150), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/recommends_responses-top_date.png"""'], {}), "('./top_stories_result/recommends_responses-top_date.png')\n", (3092, 3150), True, 'import matplotlib.pyplot as plt\n'), ((3155, 3166), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3164, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3182, 3184), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3317), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/tags_count-top_date.png"""'], {}), "('./top_stories_result/tags_count-top_date.png')\n", (3269, 3317), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3333), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3331, 3333), True, 'import matplotlib.pyplot as plt\n'), ((4045, 4057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4055, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4184), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/CDF_responses.png"""'], {}), "('./top_stories_result/CDF_responses.png')\n", (4142, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4189, 4200), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4198, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4283, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4361, 4415), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/CDF_recommends.png"""'], {}), "('./top_stories_result/CDF_recommends.png')\n", (4372, 4415), True, 'import matplotlib.pyplot as plt\n'), ((4420, 4431), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4429, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4504, 4516), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4514, 4516), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4646), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/CDF_tags_count.png"""'], {}), "('./top_stories_result/CDF_tags_count.png')\n", (4603, 4646), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4662), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4660, 4662), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4865), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4863, 4865), True, 'import matplotlib.pyplot as plt\n'), ((4969, 5030), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/tags_count-post_count.png"""'], {}), "('./top_stories_result/tags_count-post_count.png')\n", (4980, 5030), True, 'import matplotlib.pyplot as plt\n'), ((5035, 5046), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5044, 5046), True, 'import matplotlib.pyplot as plt\n'), ((5245, 5257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5255, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5373, 5438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./top_stories_result/tags_count-follower_count.png"""'], {}), "('./top_stories_result/tags_count-follower_count.png')\n", (5384, 5438), True, 'import matplotlib.pyplot as plt\n'), ((5443, 5454), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5452, 5454), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1143), 'json.dumps', 'json.dumps', (['stories'], {}), '(stories)\n', (1134, 1143), False, 'import json\n'), ((2229, 2245), 'json.dumps', 'json.dumps', (['tags'], {}), '(tags)\n', (2239, 2245), False, 'import json\n'), ((2287, 2325), 'os.path.exists', 'os.path.exists', (['"""./top_stories_result"""'], {}), "('./top_stories_result')\n", (2301, 2325), False, 'import os\n'), ((2335, 2367), 'os.mkdir', 'os.mkdir', (['"""./top_stories_result"""'], {}), "('./top_stories_result')\n", (2343, 2367), False, 'import os\n'), ((4087, 4125), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'responses_list.size'], {}), '(0, 1, responses_list.size)\n', (4098, 4125), True, 'import numpy as np\n'), ((4316, 4355), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'recommends_list.size'], {}), '(0, 1, recommends_list.size)\n', (4327, 4355), True, 'import numpy as np\n'), ((4547, 4586), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'tags_count_list.size'], {}), '(0, 1, tags_count_list.size)\n', (4558, 4586), True, 'import numpy as np\n'), ((1073, 1099), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1091, 1099), False, 'import datetime\n'), ((2178, 2204), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2196, 2204), False, 'import datetime\n')] |
import pkg_resources
__version__ = pkg_resources.require(__name__)[0].version
CUSTOM_CMAPS = []
def __RGBToPyCmap(rgbdata):
import numpy as np
nsteps = rgbdata.shape[0]
stepaxis = np.linspace(0, 1, nsteps)
rdata=[]; gdata=[]; bdata=[]
for istep in range(nsteps):
r = rgbdata[istep,0]
g = rgbdata[istep,1]
b = rgbdata[istep,2]
rdata.append((stepaxis[istep], r, r))
gdata.append((stepaxis[istep], g, g))
bdata.append((stepaxis[istep], b, b))
mpl_data = {'red': rdata,
'green': gdata,
'blue': bdata}
return mpl_data
def __InstallCmapFromCSV(csv):
global CUSTOM_CMAPS
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
cmap = os.path.splitext(os.path.basename(csv))[0]
cmap_data = np.loadtxt(csv, delimiter=',')
if cmap not in plt.colormaps():
CUSTOM_CMAPS.append(cmap)
mpl_data = __RGBToPyCmap(cmap_data)
plt.register_cmap(cmap=mpl.colors.LinearSegmentedColormap(cmap, mpl_data, cmap_data.shape[0]))
cmap = f'{cmap}_r'
if cmap not in plt.colormaps():
mpl_data_r = __RGBToPyCmap(cmap_data[::-1,:])
plt.register_cmap(cmap=mpl.colors.LinearSegmentedColormap(cmap, mpl_data_r, cmap_data.shape[0]))
def load(style='fancy', flavor='light'):
"""
`myplotlib.load`
preload custom style
args
----------
style ['fancy'] ............. : style to load (options: 'fancy', 'mono')
flavor ['light'] ............ : color flavor to load (options: 'light', 'dark')
"""
import os
import pkg_resources
from matplotlib import font_manager
import matplotlib.pyplot as plt
CMAP_DIR = pkg_resources.resource_filename(__name__, 'assets/colormaps')
CMAPS = pkg_resources.resource_listdir(__name__, 'assets/colormaps')
for cmap in CMAPS:
cmapname = os.path.join(CMAP_DIR, cmap)
__InstallCmapFromCSV(cmapname)
FONT_DIR = pkg_resources.resource_filename(__name__, 'assets/fonts')
font_files = font_manager.findSystemFonts(fontpaths=[FONT_DIR])
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
MPLSTYLE_FILE = pkg_resources.resource_stream(__name__, f'assets/{style}.{flavor}.mplstyle')
plt.style.use(MPLSTYLE_FILE.name)
| [
"matplotlib.font_manager.findSystemFonts",
"pkg_resources.require",
"os.path.join",
"pkg_resources.resource_filename",
"matplotlib.pyplot.style.use",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.linspace",
"os.path.basename",
"matplotlib.pyplot.colormaps",
"numpy.loadtxt",
"matplotlib.fon... | [((188, 213), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nsteps'], {}), '(0, 1, nsteps)\n', (199, 213), True, 'import numpy as np\n'), ((798, 828), 'numpy.loadtxt', 'np.loadtxt', (['csv'], {'delimiter': '""","""'}), "(csv, delimiter=',')\n", (808, 828), True, 'import numpy as np\n'), ((1633, 1694), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""assets/colormaps"""'], {}), "(__name__, 'assets/colormaps')\n", (1664, 1694), False, 'import pkg_resources\n'), ((1705, 1765), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['__name__', '"""assets/colormaps"""'], {}), "(__name__, 'assets/colormaps')\n", (1735, 1765), False, 'import pkg_resources\n'), ((1879, 1936), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""assets/fonts"""'], {}), "(__name__, 'assets/fonts')\n", (1910, 1936), False, 'import pkg_resources\n'), ((1952, 2002), 'matplotlib.font_manager.findSystemFonts', 'font_manager.findSystemFonts', ([], {'fontpaths': '[FONT_DIR]'}), '(fontpaths=[FONT_DIR])\n', (1980, 2002), False, 'from matplotlib import font_manager\n'), ((2100, 2176), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', 'f"""assets/{style}.{flavor}.mplstyle"""'], {}), "(__name__, f'assets/{style}.{flavor}.mplstyle')\n", (2129, 2176), False, 'import pkg_resources\n'), ((2179, 2212), 'matplotlib.pyplot.style.use', 'plt.style.use', (['MPLSTYLE_FILE.name'], {}), '(MPLSTYLE_FILE.name)\n', (2192, 2212), True, 'import matplotlib.pyplot as plt\n'), ((35, 66), 'pkg_resources.require', 'pkg_resources.require', (['__name__'], {}), '(__name__)\n', (56, 66), False, 'import pkg_resources\n'), ((846, 861), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (859, 861), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1085), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (1083, 1085), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1830), 'os.path.join', 'os.path.join', (['CMAP_DIR', 'cmap'], {}), '(CMAP_DIR, cmap)\n', (1814, 1830), False, 'import os\n'), ((2038, 2081), 'matplotlib.font_manager.fontManager.addfont', 'font_manager.fontManager.addfont', (['font_file'], {}), '(font_file)\n', (2070, 2081), False, 'from matplotlib import font_manager\n'), ((758, 779), 'os.path.basename', 'os.path.basename', (['csv'], {}), '(csv)\n', (774, 779), False, 'import os\n'), ((960, 1030), 'matplotlib.colors.LinearSegmentedColormap', 'mpl.colors.LinearSegmentedColormap', (['cmap', 'mpl_data', 'cmap_data.shape[0]'], {}), '(cmap, mpl_data, cmap_data.shape[0])\n', (994, 1030), True, 'import matplotlib as mpl\n'), ((1164, 1236), 'matplotlib.colors.LinearSegmentedColormap', 'mpl.colors.LinearSegmentedColormap', (['cmap', 'mpl_data_r', 'cmap_data.shape[0]'], {}), '(cmap, mpl_data_r, cmap_data.shape[0])\n', (1198, 1236), True, 'import matplotlib as mpl\n')] |
import unittest2 as unittest
import rdflib
import numpy as np
from osp.core.namespaces import cuba
from osp.core.ontology.cuba import rdflib_cuba
from osp.core.namespaces import owl
try:
from osp.core.namespaces import city
except ImportError:
from osp.core.ontology import Parser
from osp.core.namespaces import _namespace_registry
Parser(_namespace_registry._graph).parse("city")
_namespace_registry.update_namespaces()
city = _namespace_registry.city
class TestOntologyEntity(unittest.TestCase):
def test_str(self):
self.assertEqual(str(city.City), "city.City")
self.assertEqual(repr(city.City), "<OntologyClass city.City>")
def test_properties(self):
self.assertEqual(
city.City.iri,
rdflib.term.URIRef('http://www.osp-core.com/city#City')
)
self.assertEqual(city.City.tblname, "city___City")
self.assertEqual(city.City.namespace, city)
self.assertEqual(city.City.description, "To Be Determined")
self.assertEqual(city.LivingBeing.description, "A being that lives")
def test_subclass(self):
self.assertEqual(city.GeographicalPlace.subclasses, {
city.GeographicalPlace, city.ArchitecturalStructure,
city.PopulatedPlace, city.City, city.Neighborhood,
city.Street, city.Building
})
self.assertEqual(city.GeographicalPlace.superclasses, {
cuba.Class, city.GeographicalPlace
})
self.assertEqual(city.GeographicalPlace.direct_subclasses, {
city.PopulatedPlace, city.ArchitecturalStructure
})
self.assertEqual(city.GeographicalPlace.direct_superclasses, {
cuba.Class
})
self.assertEqual(city.hasPart.subclasses, {
city.hasPart, city.hasMajor, city.hasChild, city.hasWorker
})
self.assertEqual(city.hasPart.superclasses, {
city.encloses, cuba.activeRelationship, cuba.relationship,
city.hasPart
})
self.assertEqual(city.hasPart.direct_subclasses, {
city.hasChild, city.hasWorker
})
self.assertEqual(city.hasPart.direct_superclasses, {city.encloses})
self.assertEqual(city.name.subclasses, {city.name})
self.assertEqual(city.name.superclasses, {city.name, cuba.attribute})
self.assertEqual(city.name.direct_subclasses, set())
self.assertEqual(city.name.direct_superclasses, {cuba.attribute})
self.assertTrue(city.City.is_subclass_of(city.GeographicalPlace))
self.assertFalse(city.City.is_superclass_of(city.GeographicalPlace))
self.assertTrue(city.City.is_subclass_of(city.City))
self.assertTrue(city.City.is_superclass_of(city.City))
self.assertFalse(city.GeographicalPlace.is_subclass_of(city.City))
self.assertTrue(city.GeographicalPlace.is_superclass_of(city.City))
def test_get_triples(self):
self.assertEqual(set(city.City.get_triples()), {
(city.City.iri, rdflib.RDFS.label, rdflib.term.Literal('City',
lang='en')),
(city.City.iri, rdflib.RDFS.subClassOf, city.PopulatedPlace.iri),
(city.City.iri, rdflib.RDF.type, rdflib.OWL.Class)
})
self.assertEqual(set(city.hasPart.get_triples()), {
(city.hasPart.iri, rdflib.RDFS.label,
rdflib.term.Literal('hasPart', lang='en')),
(city.hasPart.iri, rdflib.RDFS.subPropertyOf, city.encloses.iri),
(city.hasPart.iri, rdflib.RDF.type, rdflib.OWL.ObjectProperty),
(city.hasPart.iri, rdflib.OWL.inverseOf, city.isPartOf.iri)
})
self.assertEqual(set(city.coordinates.get_triples()), {
(city.coordinates.iri, rdflib.RDFS.label,
rdflib.term.Literal('coordinates', lang='en')),
(city.coordinates.iri, rdflib.RDFS.subPropertyOf,
cuba.attribute.iri),
(city.coordinates.iri, rdflib.RDF.type,
rdflib.OWL.DatatypeProperty),
(city.coordinates.iri, rdflib.RDF.type,
rdflib.OWL.FunctionalProperty),
(city.coordinates.iri, rdflib.RDFS.range,
rdflib_cuba["datatypes/VECTOR-INT-2"])
})
def test_transitive_hull(self):
self.assertEqual(set(
city.PopulatedPlace._transitive_hull(
rdflib.RDFS.subClassOf, blacklist={rdflib.OWL.Thing})),
{city.GeographicalPlace, cuba.Class}
)
self.assertEqual(
set(city.PopulatedPlace._transitive_hull(rdflib.RDFS.subClassOf,
inverse=True)),
{city.Street, city.City, city.Neighborhood}
)
def test_directly_connected(self):
self.assertEqual(set(
city.PopulatedPlace._directly_connected(rdflib.RDFS.subClassOf)),
{city.GeographicalPlace}
)
self.assertEqual(
set(city.PopulatedPlace._directly_connected(rdflib.RDFS.subClassOf,
inverse=True)),
{city.Street, city.City, city.Neighborhood}
)
def test_oclass_attributes(self):
self.assertEqual(city.City.attributes, {
city.name: (None, True, None),
city.coordinates: (rdflib.Literal('[0, 0]'), False, None),
})
self.assertEqual(city.City.own_attributes, {})
self.assertEqual(city.GeographicalPlace.own_attributes, {
city.name: (None, True, None),
})
self.maxDiff = None
self.assertEqual(city.LivingBeing.own_attributes, {
city.name: (rdflib.Literal("<NAME>"), False, None),
city.age: (rdflib.Literal(25), False, None)
})
self.assertEqual(city.Person.attributes, {
city.name: (rdflib.Literal("<NAME>"), False, None),
city.age: (rdflib.Literal(25), False, None)
})
self.assertEqual(city.PopulatedPlace.attributes, {
city.name: (None, True, None),
city.coordinates: (rdflib.Literal('[0, 0]'), False, None)
})
self.assertEqual(city.GeographicalPlace.attributes, {
city.name: (None, True, None),
})
def test_oclass_get_default(self):
self.assertEqual(city.City._get_default(city.name.iri, city.City.iri),
None)
self.assertEqual(city.City._get_default(city.name.iri,
city.GeographicalPlace.iri),
None)
self.assertEqual(city.City._get_default(city.coordinates.iri,
city.City.iri),
None)
self.assertEqual(city.City._get_default(city.coordinates.iri,
city.PopulatedPlace.iri),
rdflib.term.Literal('[0, 0]'))
def test_get_attribute_values(self):
self.assertRaises(TypeError, city.City._get_attributes_values,
kwargs={}, _force=False)
self.assertRaises(TypeError, city.City._get_attributes_values,
kwargs={"name": "name", "invalid": "invalid"},
_force=False)
self.assertEqual(city.City._get_attributes_values(kwargs={},
_force=True),
{city.coordinates: rdflib.term.Literal('[0, 0]')})
self.assertEqual(city.City._get_attributes_values(kwargs={},
_force=True),
{city.coordinates: rdflib.term.Literal('[0, 0]')})
self.assertEqual(city.City._get_attributes_values(
kwargs={"name": "Freiburg"}, _force=True),
{city.name: "Freiburg",
city.coordinates: rdflib.term.Literal('[0, 0]')}
)
self.assertEqual(city.City._get_attributes_values(
kwargs={"name": "Freiburg", "coordinates": [1, 1]}, _force=True),
{city.name: "Freiburg",
city.coordinates: [1, 1]}
)
def test_oclass_call(self):
c = city.City(name="Freiburg")
self.assertEqual(c.name, "Freiburg")
self.assertTrue(np.all(c.coordinates == np.array([0, 0])))
c = city.City(name="Basel", coordinates=[1, 2])
self.assertEqual(c.name, "Basel")
self.assertTrue(np.all(c.coordinates == np.array([1, 2])))
self.assertRaises(TypeError, city.City)
self.assertRaises(TypeError, city.City, name="Name", invalid="invalid")
def test_rel_inverse(self):
self.assertEqual(city.hasPart.inverse, city.isPartOf)
self.assertEqual(city.isPartOf.inverse, city.hasPart)
self.assertEqual(cuba.relationship.inverse, cuba.relationship)
self.assertEqual(cuba.activeRelationship.inverse,
cuba.passiveRelationship)
self.assertEqual(cuba.passiveRelationship.inverse,
cuba.activeRelationship)
self.assertEqual(city.hasMajor.inverse,
city.INVERSE_OF_hasMajor)
self.assertEqual(city.INVERSE_OF_hasMajor.direct_superclasses,
{city.worksIn})
self.assertEqual(city.INVERSE_OF_hasMajor.inverse, city.hasMajor)
def test_attribute_datatype(self):
self.assertEqual(city.name.datatype, None)
self.assertEqual(city.coordinates.datatype,
rdflib_cuba["datatypes/VECTOR-INT-2"])
self.assertEqual(cuba.attribute.datatype, None)
self.assertEqual(city.name.convert_to_datatype("abc"), "abc")
self.assertEqual(city.name.convert_to_datatype(12.3), "12.3")
self.assertEqual(city.name.convert_to_datatype([1, 2, 3]), "[1, 2, 3]")
self.assertTrue(np.all(
city.coordinates.convert_to_datatype([1, 2])
== np.array([1, 2])))
self.assertRaises(ValueError, city.coordinates.convert_to_datatype,
"[1, 2]")
self.assertRaises(ValueError, city.coordinates.convert_to_datatype,
[1, 2, 3])
self.assertTrue(np.all(
city.coordinates.convert_to_datatype(rdflib.Literal([1, 2]))
== np.array([1, 2])))
self.assertEqual(
city.coordinates.convert_to_basic_type(np.array([1, 2])),
[1, 2]
)
if __name__ == "__main__":
unittest.main()
| [
"osp.core.namespaces.city.GeographicalPlace.is_subclass_of",
"osp.core.namespaces.city.GeographicalPlace.is_superclass_of",
"numpy.array",
"osp.core.namespaces.city.City._get_attributes_values",
"osp.core.namespaces.city.City.get_triples",
"osp.core.namespaces.city.City.is_subclass_of",
"osp.core.namesp... | [((10562, 10577), 'unittest2.main', 'unittest.main', ([], {}), '()\n', (10575, 10577), True, 'import unittest2 as unittest\n'), ((404, 443), 'osp.core.namespaces._namespace_registry.update_namespaces', '_namespace_registry.update_namespaces', ([], {}), '()\n', (441, 443), False, 'from osp.core.namespaces import _namespace_registry\n'), ((8271, 8297), 'osp.core.namespaces.city.City', 'city.City', ([], {'name': '"""Freiburg"""'}), "(name='Freiburg')\n", (8280, 8297), False, 'from osp.core.namespaces import city\n'), ((8422, 8465), 'osp.core.namespaces.city.City', 'city.City', ([], {'name': '"""Basel"""', 'coordinates': '[1, 2]'}), "(name='Basel', coordinates=[1, 2])\n", (8431, 8465), False, 'from osp.core.namespaces import city\n'), ((773, 828), 'rdflib.term.URIRef', 'rdflib.term.URIRef', (['"""http://www.osp-core.com/city#City"""'], {}), "('http://www.osp-core.com/city#City')\n", (791, 828), False, 'import rdflib\n'), ((2514, 2562), 'osp.core.namespaces.city.City.is_subclass_of', 'city.City.is_subclass_of', (['city.GeographicalPlace'], {}), '(city.GeographicalPlace)\n', (2538, 2562), False, 'from osp.core.namespaces import city\n'), ((2589, 2639), 'osp.core.namespaces.city.City.is_superclass_of', 'city.City.is_superclass_of', (['city.GeographicalPlace'], {}), '(city.GeographicalPlace)\n', (2615, 2639), False, 'from osp.core.namespaces import city\n'), ((2665, 2700), 'osp.core.namespaces.city.City.is_subclass_of', 'city.City.is_subclass_of', (['city.City'], {}), '(city.City)\n', (2689, 2700), False, 'from osp.core.namespaces import city\n'), ((2726, 2763), 'osp.core.namespaces.city.City.is_superclass_of', 'city.City.is_superclass_of', (['city.City'], {}), '(city.City)\n', (2752, 2763), False, 'from osp.core.namespaces import city\n'), ((2790, 2838), 'osp.core.namespaces.city.GeographicalPlace.is_subclass_of', 'city.GeographicalPlace.is_subclass_of', (['city.City'], {}), '(city.City)\n', (2827, 2838), False, 'from osp.core.namespaces import city\n'), ((2864, 2914), 'osp.core.namespaces.city.GeographicalPlace.is_superclass_of', 'city.GeographicalPlace.is_superclass_of', (['city.City'], {}), '(city.City)\n', (2903, 2914), False, 'from osp.core.namespaces import city\n'), ((6379, 6431), 'osp.core.namespaces.city.City._get_default', 'city.City._get_default', (['city.name.iri', 'city.City.iri'], {}), '(city.name.iri, city.City.iri)\n', (6401, 6431), False, 'from osp.core.namespaces import city\n'), ((6489, 6554), 'osp.core.namespaces.city.City._get_default', 'city.City._get_default', (['city.name.iri', 'city.GeographicalPlace.iri'], {}), '(city.name.iri, city.GeographicalPlace.iri)\n', (6511, 6554), False, 'from osp.core.namespaces import city\n'), ((6660, 6719), 'osp.core.namespaces.city.City._get_default', 'city.City._get_default', (['city.coordinates.iri', 'city.City.iri'], {}), '(city.coordinates.iri, city.City.iri)\n', (6682, 6719), False, 'from osp.core.namespaces import city\n'), ((6825, 6894), 'osp.core.namespaces.city.City._get_default', 'city.City._get_default', (['city.coordinates.iri', 'city.PopulatedPlace.iri'], {}), '(city.coordinates.iri, city.PopulatedPlace.iri)\n', (6847, 6894), False, 'from osp.core.namespaces import city\n'), ((6969, 6998), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (6988, 6998), False, 'import rdflib\n'), ((7373, 7429), 'osp.core.namespaces.city.City._get_attributes_values', 'city.City._get_attributes_values', ([], {'kwargs': '{}', '_force': '(True)'}), '(kwargs={}, _force=True)\n', (7405, 7429), False, 'from osp.core.namespaces import city\n'), ((7590, 7646), 'osp.core.namespaces.city.City._get_attributes_values', 'city.City._get_attributes_values', ([], {'kwargs': '{}', '_force': '(True)'}), '(kwargs={}, _force=True)\n', (7622, 7646), False, 'from osp.core.namespaces import city\n'), ((7807, 7881), 'osp.core.namespaces.city.City._get_attributes_values', 'city.City._get_attributes_values', ([], {'kwargs': "{'name': 'Freiburg'}", '_force': '(True)'}), "(kwargs={'name': 'Freiburg'}, _force=True)\n", (7839, 7881), False, 'from osp.core.namespaces import city\n'), ((8029, 8130), 'osp.core.namespaces.city.City._get_attributes_values', 'city.City._get_attributes_values', ([], {'kwargs': "{'name': 'Freiburg', 'coordinates': [1, 1]}", '_force': '(True)'}), "(kwargs={'name': 'Freiburg', 'coordinates':\n [1, 1]}, _force=True)\n", (8061, 8130), False, 'from osp.core.namespaces import city\n'), ((9722, 9758), 'osp.core.namespaces.city.name.convert_to_datatype', 'city.name.convert_to_datatype', (['"""abc"""'], {}), "('abc')\n", (9751, 9758), False, 'from osp.core.namespaces import city\n'), ((9792, 9827), 'osp.core.namespaces.city.name.convert_to_datatype', 'city.name.convert_to_datatype', (['(12.3)'], {}), '(12.3)\n', (9821, 9827), False, 'from osp.core.namespaces import city\n'), ((9862, 9902), 'osp.core.namespaces.city.name.convert_to_datatype', 'city.name.convert_to_datatype', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (9891, 9902), False, 'from osp.core.namespaces import city\n'), ((351, 385), 'osp.core.ontology.Parser', 'Parser', (['_namespace_registry._graph'], {}), '(_namespace_registry._graph)\n', (357, 385), False, 'from osp.core.ontology import Parser\n'), ((2978, 3001), 'osp.core.namespaces.city.City.get_triples', 'city.City.get_triples', ([], {}), '()\n', (2999, 3001), False, 'from osp.core.namespaces import city\n'), ((3342, 3368), 'osp.core.namespaces.city.hasPart.get_triples', 'city.hasPart.get_triples', ([], {}), '()\n', (3366, 3368), False, 'from osp.core.namespaces import city\n'), ((3746, 3776), 'osp.core.namespaces.city.coordinates.get_triples', 'city.coordinates.get_triples', ([], {}), '()\n', (3774, 3776), False, 'from osp.core.namespaces import city\n'), ((4380, 4475), 'osp.core.namespaces.city.PopulatedPlace._transitive_hull', 'city.PopulatedPlace._transitive_hull', (['rdflib.RDFS.subClassOf'], {'blacklist': '{rdflib.OWL.Thing}'}), '(rdflib.RDFS.subClassOf, blacklist={\n rdflib.OWL.Thing})\n', (4416, 4475), False, 'from osp.core.namespaces import city\n'), ((4591, 4665), 'osp.core.namespaces.city.PopulatedPlace._transitive_hull', 'city.PopulatedPlace._transitive_hull', (['rdflib.RDFS.subClassOf'], {'inverse': '(True)'}), '(rdflib.RDFS.subClassOf, inverse=True)\n', (4627, 4665), False, 'from osp.core.namespaces import city\n'), ((4869, 4932), 'osp.core.namespaces.city.PopulatedPlace._directly_connected', 'city.PopulatedPlace._directly_connected', (['rdflib.RDFS.subClassOf'], {}), '(rdflib.RDFS.subClassOf)\n', (4908, 4932), False, 'from osp.core.namespaces import city\n'), ((5024, 5101), 'osp.core.namespaces.city.PopulatedPlace._directly_connected', 'city.PopulatedPlace._directly_connected', (['rdflib.RDFS.subClassOf'], {'inverse': '(True)'}), '(rdflib.RDFS.subClassOf, inverse=True)\n', (5063, 5101), False, 'from osp.core.namespaces import city\n'), ((7533, 7562), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (7552, 7562), False, 'import rdflib\n'), ((7750, 7779), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (7769, 7779), False, 'import rdflib\n'), ((7963, 7992), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (7982, 7992), False, 'import rdflib\n'), ((10481, 10497), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (10489, 10497), True, 'import numpy as np\n'), ((3053, 3091), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""City"""'], {'lang': '"""en"""'}), "('City', lang='en')\n", (3072, 3091), False, 'import rdflib\n'), ((3436, 3477), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""hasPart"""'], {'lang': '"""en"""'}), "('hasPart', lang='en')\n", (3455, 3477), False, 'import rdflib\n'), ((3848, 3893), 'rdflib.term.Literal', 'rdflib.term.Literal', (['"""coordinates"""'], {'lang': '"""en"""'}), "('coordinates', lang='en')\n", (3867, 3893), False, 'import rdflib\n'), ((5388, 5412), 'rdflib.Literal', 'rdflib.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (5402, 5412), False, 'import rdflib\n'), ((5726, 5750), 'rdflib.Literal', 'rdflib.Literal', (['"""<NAME>"""'], {}), "('<NAME>')\n", (5740, 5750), False, 'import rdflib\n'), ((5789, 5807), 'rdflib.Literal', 'rdflib.Literal', (['(25)'], {}), '(25)\n', (5803, 5807), False, 'import rdflib\n'), ((5908, 5932), 'rdflib.Literal', 'rdflib.Literal', (['"""<NAME>"""'], {}), "('<NAME>')\n", (5922, 5932), False, 'import rdflib\n'), ((5971, 5989), 'rdflib.Literal', 'rdflib.Literal', (['(25)'], {}), '(25)\n', (5985, 5989), False, 'import rdflib\n'), ((6148, 6172), 'rdflib.Literal', 'rdflib.Literal', (['"""[0, 0]"""'], {}), "('[0, 0]')\n", (6162, 6172), False, 'import rdflib\n'), ((8391, 8407), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8399, 8407), True, 'import numpy as np\n'), ((8556, 8572), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (8564, 8572), True, 'import numpy as np\n'), ((9961, 10005), 'osp.core.namespaces.city.coordinates.convert_to_datatype', 'city.coordinates.convert_to_datatype', (['[1, 2]'], {}), '([1, 2])\n', (9997, 10005), False, 'from osp.core.namespaces import city\n'), ((10021, 10037), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (10029, 10037), True, 'import numpy as np\n'), ((10385, 10401), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (10393, 10401), True, 'import numpy as np\n'), ((10346, 10368), 'rdflib.Literal', 'rdflib.Literal', (['[1, 2]'], {}), '([1, 2])\n', (10360, 10368), False, 'import rdflib\n')] |
import argparse
import logging
import os
import sys
import numpy as np
from naslib.optimizers.discrete.re import RegularizedEvolution as RE
from naslib.optimizers.discrete.re import Searcher as RESearcher
from naslib.optimizers.discrete.rs import RandomSearch as RS
from naslib.optimizers.discrete.rs import Searcher as RSSearcher
from naslib.optimizers.discrete.tpe import Searcher as TPESearcher
from naslib.optimizers.discrete.tpe import TPE
from naslib.optimizers.oneshot.darts import Searcher, DARTSOptimizer
from naslib.optimizers.oneshot.gdas import GDASOptimizer
from naslib.optimizers.oneshot.pc_darts import PCDARTSOptimizer
from naslib.search_spaces.nasbench201 import MacroGraph, PRIMITIVES, OPS
from naslib.utils import config_parser
from naslib.utils.parser import Parser
from naslib.utils.utils import create_exp_dir
opt_list = [DARTSOptimizer, GDASOptimizer, PCDARTSOptimizer, RE, TPE, RS]
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
parser = argparse.ArgumentParser('nasbench201')
parser.add_argument('--optimizer', type=str, default='RE')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--n_evals', type=int, default=200, help='num of function evaluations')
args = parser.parse_args()
if __name__ == '__main__':
config = config_parser('../../configs/nasbench_201.yaml')
parser = Parser('../../configs/nasbench_201.yaml')
np.random.seed(args.seed)
config.seed = parser.config.seed = args.seed
config.epochs = parser.config.epochs = args.epochs
parser.config.save += '/{}'.format(args.optimizer)
create_exp_dir(parser.config.save)
fh = logging.FileHandler(os.path.join(parser.config.save,
'log_{}.txt'.format(config.seed)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
one_shot_optimizer = eval(args.optimizer).from_config(**config)
search_space = MacroGraph.from_optimizer_op(
one_shot_optimizer,
config=config,
primitives=PRIMITIVES,
ops_dict=OPS
)
one_shot_optimizer.init()
if hasattr(one_shot_optimizer, 'fill_space'):
one_shot_optimizer.fill_space()
if args.optimizer == 'RS':
_searcher = RSSearcher
elif args.optimizer == 'RE':
_searcher = RESearcher
elif args.optimizer == 'TPE':
_searcher = TPESearcher
else:
_searcher = Searcher
searcher = _searcher(search_space, parser, arch_optimizer=one_shot_optimizer)
searcher.run(n_evaluations=args.n_evals)
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser",
"naslib.utils.parser.Parser",
"naslib.utils.utils.create_exp_dir",
"logging.Formatter",
"naslib.search_spaces.nasbench201.MacroGraph.from_optimizer_op",
"numpy.random.seed",
"naslib.utils.config_parser"
] | [((949, 1060), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (968, 1060), False, 'import logging\n'), ((1086, 1124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""nasbench201"""'], {}), "('nasbench201')\n", (1109, 1124), False, 'import argparse\n'), ((1542, 1590), 'naslib.utils.config_parser', 'config_parser', (['"""../../configs/nasbench_201.yaml"""'], {}), "('../../configs/nasbench_201.yaml')\n", (1555, 1590), False, 'from naslib.utils import config_parser\n'), ((1604, 1645), 'naslib.utils.parser.Parser', 'Parser', (['"""../../configs/nasbench_201.yaml"""'], {}), "('../../configs/nasbench_201.yaml')\n", (1610, 1645), False, 'from naslib.utils.parser import Parser\n'), ((1650, 1675), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1664, 1675), True, 'import numpy as np\n'), ((1840, 1874), 'naslib.utils.utils.create_exp_dir', 'create_exp_dir', (['parser.config.save'], {}), '(parser.config.save)\n', (1854, 1874), False, 'from naslib.utils.utils import create_exp_dir\n'), ((2193, 2298), 'naslib.search_spaces.nasbench201.MacroGraph.from_optimizer_op', 'MacroGraph.from_optimizer_op', (['one_shot_optimizer'], {'config': 'config', 'primitives': 'PRIMITIVES', 'ops_dict': 'OPS'}), '(one_shot_optimizer, config=config, primitives=\n PRIMITIVES, ops_dict=OPS)\n', (2221, 2298), False, 'from naslib.search_spaces.nasbench201 import MacroGraph, PRIMITIVES, OPS\n'), ((2035, 2064), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (2052, 2064), False, 'import logging\n'), ((2070, 2089), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2087, 2089), False, 'import logging\n')] |
# coding=utf8
# --------------------------------------------------------
# Scene Graph Generation by Iterative Message Passing
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import argparse, json, string
from collections import Counter
import math
import os
from math import floor
import h5py as h5
import numpy as np
import pprint
import xml.etree.ElementTree as ET
"""
A script for generating an hdf5 ROIDB from the VisualGenome dataset
"""
def preprocess_object_labels(data, alias_dict={}):
for img in data:
for obj in img['objects']:
obj['ids'] = [obj['object_id']]
names = []
for name in obj['names']:
label = sentence_preprocess(name)
if label in alias_dict:
label = alias_dict[label]
names.append(label)
obj['names'] = names
def preprocess_predicates(data, alias_dict={}):
for img in data:
for relation in img['relationships']:
predicate = sentence_preprocess(relation['predicate'])
if predicate in alias_dict:
predicate = alias_dict[predicate]
relation['predicate'] = predicate
def extract_object_token(data, obj_list=[], verbose=True):
""" Builds a set that contains the object names. Filters infrequent tokens. """
token_counter = Counter()
for img in data:
for region in img['objects']:
for name in region['names']:
if not obj_list or name in obj_list:
token_counter.update([name])
tokens = set()
# pick top N tokens
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if verbose:
print(('Keeping %d / %d objects'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return
def extract_predicate_token(data, pred_list=[], verbose=True):
""" Builds a set that contains the relationship predicates. Filters infrequent tokens. """
token_counter = Counter()
total = 0
for img in data:
for relation in img['relationships']:
predicate = relation['predicate']
if not pred_list or predicate in pred_list:
token_counter.update([predicate])
total += 1
tokens = set()
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if verbose:
print(('Keeping %d / %d predicates with enough instances'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return
def merge_duplicate_boxes(data):
def IoU(b1, b2):
if b1[2] <= b2[0] or \
b1[3] <= b2[1] or \
b1[0] >= b2[2] or \
b1[1] >= b2[3]:
return 0
b1b2 = np.vstack([b1,b2])
minc = np.min(b1b2, 0)
maxc = np.max(b1b2, 0)
union_area = (maxc[2]-minc[0])*(maxc[3]-minc[1])
int_area = (minc[2]-maxc[0])*(minc[3]-maxc[1])
return float(int_area)/float(union_area)
def to_x1y1x2y2(obj):
x1 = obj['x']
y1 = obj['y']
x2 = obj['x'] + obj['w']
y2 = obj['y'] + obj['h']
return np.array([x1, y1, x2, y2], dtype=np.int32)
def inside(b1, b2):
return b1[0] >= b2[0] and b1[1] >= b2[1] \
and b1[2] <= b2[2] and b1[3] <= b2[3]
def overlap(obj1, obj2):
b1 = to_x1y1x2y2(obj1)
b2 = to_x1y1x2y2(obj2)
iou = IoU(b1, b2)
if all(b1 == b2) or iou > 0.9: # consider as the same box
return 1
elif (inside(b1, b2) or inside(b2, b1))\
and obj1['names'][0] == obj2['names'][0]: # same object inside the other
return 2
elif iou > 0.6 and obj1['names'][0] == obj2['names'][0]: # multiple overlapping same object
return 3
else:
return 0 # no overlap
num_merged = {1:0, 2:0, 3:0}
print('merging boxes..')
for img in data:
# mark objects to be merged and save their ids
objs = img['objects']
num_obj = len(objs)
for i in range(num_obj):
if 'M_TYPE' in objs[i]: # has been merged
continue
merged_objs = [] # circular refs, but fine
for j in range(i+1, num_obj):
if 'M_TYPE' in objs[j]: # has been merged
continue
overlap_type = overlap(objs[i], objs[j])
if overlap_type > 0:
objs[j]['M_TYPE'] = overlap_type
merged_objs.append(objs[j])
objs[i]['mobjs'] = merged_objs
# merge boxes
filtered_objs = []
merged_num_obj = 0
for obj in objs:
if 'M_TYPE' not in obj:
ids = [obj['object_id']]
dims = [to_x1y1x2y2(obj)]
prominent_type = 1
for mo in obj['mobjs']:
ids.append(mo['object_id'])
obj['names'].extend(mo['names'])
dims.append(to_x1y1x2y2(mo))
if mo['M_TYPE'] > prominent_type:
prominent_type = mo['M_TYPE']
merged_num_obj += len(ids)
obj['ids'] = ids
mdims = np.zeros(4)
if prominent_type > 1: # use extreme
mdims[:2] = np.min(np.vstack(dims)[:,:2], 0)
mdims[2:] = np.max(np.vstack(dims)[:,2:], 0)
else: # use mean
mdims = np.mean(np.vstack(dims), 0)
obj['x'] = int(mdims[0])
obj['y'] = int(mdims[1])
obj['w'] = int(mdims[2] - mdims[0])
obj['h'] = int(mdims[3] - mdims[1])
num_merged[prominent_type] += len(obj['mobjs'])
obj['mobjs'] = None
obj['names'] = list(set(obj['names'])) # remove duplicates
filtered_objs.append(obj)
else:
assert 'mobjs' not in obj
img['objects'] = filtered_objs
assert(merged_num_obj == num_obj)
print('# merged boxes per merging type:')
print(num_merged)
def build_token_dict(vocab):
""" build bi-directional mapping between index and token"""
token_to_idx, idx_to_token = {}, {}
next_idx = 1
vocab_sorted = sorted(list(vocab)) # make sure it's the same order everytime
for token in vocab_sorted:
token_to_idx[token] = next_idx
idx_to_token[next_idx] = token
next_idx = next_idx + 1
return token_to_idx, idx_to_token
def encode_box(region, org_h, org_w, im_long_size):
x = region['x']
y = region['y']
w = region['w']
h = region['h']
scale = float(im_long_size) / max(org_h, org_w)
image_size = im_long_size
# recall: x,y are 1-indexed
x, y = math.floor(scale*(region['x']-1)), math.floor(scale*(region['y']-1))
w, h = math.ceil(scale*region['w']), math.ceil(scale*region['h'])
# clamp to image
if x < 0: x = 0
if y < 0: y = 0
# box should be at least 2 by 2
if x > image_size - 2:
x = image_size - 2
if y > image_size - 2:
y = image_size - 2
if x + w >= image_size:
w = image_size - x
if y + h >= image_size:
h = image_size - y
# also convert to center-coord oriented
box = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32)
assert box[2] > 0 # width height should be positive numbers
assert box[3] > 0
return box
def encode_objects(obj_data, token_to_idx, token_counter, org_h, org_w, im_long_sizes):
encoded_labels = []
encoded_boxes = {}
for size in im_long_sizes:
encoded_boxes[size] = []
im_to_first_obj = np.zeros(len(obj_data), dtype=np.int32)
im_to_last_obj = np.zeros(len(obj_data), dtype=np.int32)
obj_counter = 0
for i, img in enumerate(obj_data):
im_to_first_obj[i] = obj_counter
img['id_to_idx'] = {} # object id to region idx
for obj in img['objects']:
# pick a label for the object
max_occur = 0
obj_label = None
for name in obj['names']:
# pick the name that has maximum occurance
if name in token_to_idx and token_counter[name] > max_occur:
obj_label = name
max_occur = token_counter[obj_label]
# if obj_label is None:
# print(obj['names'])
if obj_label is not None:
# encode region
for size in im_long_sizes:
encoded_boxes[size].append(encode_box(obj, org_h[i], org_w[i], size))
encoded_labels.append(token_to_idx[obj_label])
for obj_id in obj['ids']: # assign same index for merged ids
img['id_to_idx'][obj_id] = obj_counter
obj_counter += 1
if im_to_first_obj[i] == obj_counter:
im_to_first_obj[i] = -1
im_to_last_obj[i] = -1
else:
im_to_last_obj[i] = obj_counter - 1
for k, boxes in encoded_boxes.items():
# print(boxes)
encoded_boxes[k] = np.vstack(boxes)
return np.vstack(encoded_labels), encoded_boxes, im_to_first_obj, im_to_last_obj
def encode_relationship(sub_id, obj_id, id_to_idx):
# builds a tuple of the index of object and subject in the object list
sub_idx = id_to_idx[sub_id]
obj_idx = id_to_idx[obj_id]
return np.asarray([sub_idx, obj_idx], dtype=np.int32)
def encode_relationships(rel_data, token_to_idx, obj_data):
"""MUST BE CALLED AFTER encode_objects!!!"""
encoded_pred = [] # encoded predicates
encoded_rel = [] # encoded relationship tuple
im_to_first_rel = np.zeros(len(rel_data), dtype=np.int32)
im_to_last_rel = np.zeros(len(rel_data), dtype=np.int32)
rel_idx_counter = 0
no_rel_counter = 0
obj_filtered = 0
predicate_filtered = 0
duplicate_filtered = 0
for i, img in enumerate(rel_data):
im_to_first_rel[i] = rel_idx_counter
id_to_idx = obj_data[i]['id_to_idx'] # object id to object list idx
for relation in img['relationships']:
subj = relation['subject']
obj = relation['object']
predicate = relation['predicate']
if subj['object_id'] not in id_to_idx or obj['object_id'] not in id_to_idx:
obj_filtered += 1
continue
elif predicate not in token_to_idx:
predicate_filtered += 1
continue
elif id_to_idx[subj['object_id']] == id_to_idx[obj['object_id']]: # sub and obj can't be the same box
duplicate_filtered += 1
continue
else:
encoded_pred.append(token_to_idx[predicate])
encoded_rel.append(
encode_relationship(subj['object_id'],
obj['object_id'],
id_to_idx
))
rel_idx_counter += 1 # accumulate counter
if im_to_first_rel[i] == rel_idx_counter:
# if no qualifying relationship
im_to_first_rel[i] = -1
im_to_last_rel[i] = -1
no_rel_counter += 1
else:
im_to_last_rel[i] = rel_idx_counter - 1
print('%i rel is filtered by object' % obj_filtered)
print('%i rel is filtered by predicate' % predicate_filtered)
print('%i rel is filtered by duplicate' % duplicate_filtered)
print('%i rel remains ' % len(encoded_pred))
print('%i out of %i valid images have relationships' % (len(rel_data)-no_rel_counter, len(rel_data)))
return np.vstack(encoded_pred), np.vstack(encoded_rel), im_to_first_rel, im_to_last_rel
def sentence_preprocess(phrase):
""" preprocess a sentence: lowercase, clean up weird chars, remove punctuation """
replacements = {
'½': 'half',
'—' : '-',
'™': '',
'¢': 'cent',
'ç': 'c',
'û': 'u',
'é': 'e',
'°': ' degree',
'è': 'e',
'…': '',
}
phrase = phrase.encode('utf-8')
phrase = phrase.lstrip(' ').rstrip(' ')
for k, v in replacements.iteritems():
phrase = phrase.replace(k, v)
return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore')
def encode_splits(obj_data, opt=None):
if opt is not None:
val_begin_idx = opt['val_begin_idx']
test_begin_idx = opt['test_begin_idx']
split = np.zeros(len(obj_data), dtype=np.int32)
for i, info in enumerate(obj_data):
splitix = 0
if opt is None: # use encode from input file
s = info['split']
if s == 'val': splitix = 1
if s == 'test': splitix = 2
else: # use portion split
if i >= val_begin_idx: splitix = 1
if i >= test_begin_idx: splitix = 2
split[i] = splitix
if opt is not None and opt['shuffle']:
np.random.shuffle(split)
print(('assigned %d/%d/%d to train/val/test split' % (np.sum(split==0), np.sum(split==1), np.sum(split==2))))
return split
def make_alias_dict(dict_file):
"""create an alias dictionary from a file"""
out_dict = {}
vocab = []
for line in open(dict_file, 'r'):
alias = line.strip('\n').strip('\r').split(',')
alias_target = alias[0] if alias[0] not in out_dict else out_dict[alias[0]]
for a in alias:
out_dict[a] = alias_target # use the first term as the aliasing target
vocab.append(alias_target)
return out_dict, vocab
def make_list(list_file):
"""create a blacklist list from a file"""
return [line.strip('\n').strip('\r') for line in open(list_file)]
def filter_object_boxes(data, heights, widths, area_frac_thresh):
"""
filter boxes by a box area-image area ratio threshold
"""
thresh_count = 0
all_count = 0
for i, img in enumerate(data):
filtered_obj = []
area = float(heights[i]*widths[i])
for obj in img['objects']:
if float(obj['h'] * obj['w']) > area * area_frac_thresh:
filtered_obj.append(obj)
thresh_count += 1
all_count += 1
img['objects'] = filtered_obj
print('box threshod: keeping %i/%i boxes' % (thresh_count, all_count))
def filter_by_idx(data, valid_list):
return [data[i] for i in valid_list]
def obj_rel_cross_check(obj_data, rel_data, verbose=False):
"""
make sure all objects that are in relationship dataset
are in object dataset
"""
num_img = len(obj_data)
num_correct = 0
total_rel = 0
for i in xrange(num_img):
assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])
objs = obj_data[i]['objects']
rels = rel_data[i]['relationships']
ids = [obj['object_id'] for obj in objs]
for rel in rels:
if rel['subject']['object_id'] in ids \
and rel['object']['object_id'] in ids:
num_correct += 1
elif verbose:
if rel['subject']['object_id'] not in ids:
print(str(rel['subject']['object_id']) + 'cannot be found in ' + str(i))
if rel['object']['object_id'] not in ids:
print(str(rel['object']['object_id']) + 'cannot be found in ' + str(i))
total_rel += 1
print('cross check: %i/%i relationship are correct' % (num_correct, total_rel))
def sync_objects(obj_data, rel_data):
num_img = len(obj_data)
for i in xrange(num_img):
assert(obj_data[i]['image_id'] == rel_data[i]['image_id'])
objs = obj_data[i]['objects']
rels = rel_data[i]['relationships']
ids = [obj['object_id'] for obj in objs]
for rel in rels:
if rel['subject']['object_id'] not in ids:
rel_obj = rel['subject']
rel_obj['names'] = [rel_obj['name']]
objs.append(rel_obj)
if rel['object']['object_id'] not in ids:
rel_obj = rel['object']
rel_obj['names'] = [rel_obj['name']]
objs.append(rel_obj)
obj_data[i]['objects'] = objs
# def create_from_xml(img_data, orginal_obj_data, args):
# pred_list = set()
# obj_list = set()
# obj_data, rel_data = [], []
# for im in img_data:
# tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))
# root = tree.getroot()
# for child in root:
# if child.tag == 'object':
# name = str(child[0].text)
# obj_list.add(name)
# if child.tag == 'relation':
# predicate = str(child[2].text)
# pred_list.add(predicate)
# for im, im_obj in zip(img_data, orginal_obj_data):
# tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))
# root = tree.getroot()
# obj_data.append({'objects':[], 'image_id':im['image_id']})
# rel_data.append({'relationships':[], 'image_id':im['image_id']})
# im_obj_ids = set()
# for obj in im_obj['objects']:
# if any([name in obj_list for name in obj['names']]):
# obj_data[-1]['objects'].append(obj)
# im_obj_ids.add(obj['object_id'])
# for child in root:
# if child.tag == 'object':
# name = str(child[0].text)
# object_id = int(child[1].text)
# if object_id not in im_obj_ids:
# print("NEW OBJ: {}/{}".format(im['image_id'], object_id))
# xmin = int(child[3][0].text)
# ymin = int(child[3][1].text)
# xmax = int(child[3][2].text)
# ymax = int(child[3][3].text)
# w = xmax - xmin
# h = ymax - ymin
# obj_data[-1]['objects'].append({'x': xmin, 'y': ymin, 'w': w, 'h': h, 'object_id': object_id, 'names': [name]})
# if child.tag == 'relation':
# subject_id = int(child[0].text)
# object_id = int(child[1].text)
# predicate = str(child[2].text)
# rel_data[-1]['relationships'].append({'object': {'object_id': object_id}, 'subject': {'object_id': subject_id}, 'predicate': predicate})
# return list(obj_list), list(pred_list), obj_data, rel_data
def create_from_xml(img_data, orginal_obj_data, args):
pred_list = set()
obj_list = set()
obj_data, rel_data = [], []
for im in img_data:
if os.path.exists('{}/{}.xml'.format(args.vrrvg_dir, im['image_id'])):
tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))
root = tree.getroot()
for child in root:
if child.tag == 'object':
name = str(child[0].text)
obj_list.add(name)
if child.tag == 'relation':
predicate = str(child[2].text)
pred_list.add(predicate)
for im, im_obj in zip(img_data, orginal_obj_data):
obj_data.append({'objects':[], 'image_id':im['image_id']})
rel_data.append({'relationships':[], 'image_id':im['image_id']})
if os.path.exists('{}/{}.xml'.format(args.vrrvg_dir, im['image_id'])):
tree = ET.parse('{}/{}.xml'.format(args.vrrvg_dir, im['image_id']))
root = tree.getroot()
im_obj_ids = set()
for child in root:
if child.tag == 'object':
name = str(child[0].text)
object_id = int(child[1].text)
im_obj_ids.add(object_id)
# print("NEW OBJ: {}/{}".format(im['image_id'], object_id))
xmin = int(child[3][0].text)
ymin = int(child[3][1].text)
xmax = int(child[3][2].text)
ymax = int(child[3][3].text)
w = xmax - xmin
h = ymax - ymin
obj_data[-1]['objects'].append({'x': xmin, 'y': ymin, 'w': w, 'h': h, 'object_id': object_id, 'names': [name]})
for obj in im_obj['objects']:
if any([name in obj_list for name in obj['names']]):
if obj['object_id'] not in im_obj_ids:
obj_data[-1]['objects'].append(obj)
im_obj_ids.add(int(obj['object_id']))
for child in root:
if child.tag == 'relation':
subject_id = int(child[0].text)
object_id = int(child[1].text)
predicate = str(child[2].text)
if subject_id not in im_obj_ids or object_id not in im_obj_ids:
print(subject_id, object_id, predicate)
rel_data[-1]['relationships'].append({'object': {'object_id': object_id}, 'subject': {'object_id': subject_id}, 'predicate': predicate})
else:
for obj in im_obj['objects']:
if any([name in obj_list for name in obj['names']]):
obj_data[-1]['objects'].append(obj)
im_obj_ids.add(int(obj['object_id']))
return list(obj_list), list(pred_list), obj_data, rel_data
def main(args):
print('start')
pprint.pprint(args)
# read in the annotation data
print('loading json files..')
orginal_obj_data = json.load(open(args.object_input))
img_data = json.load(open(args.metadata_input))
print('read image db from %s' % args.imdb)
imdb = h5.File(args.imdb, 'r')
num_im, _, _, _ = imdb['images'].shape
img_long_sizes = [512, 1024]
valid_im_idx = imdb['valid_idx'][:] # valid image indices
img_ids = imdb['image_ids'][:]
print('len(valid_im_idx)', len(valid_im_idx))
print('len(img_ids)', len(img_ids))
orginal_obj_data = filter_by_idx(orginal_obj_data, valid_im_idx)
img_data = filter_by_idx(img_data, valid_im_idx)
with open("image_data_.json", 'w') as f:
json.dump(img_data, f)
print('len(img_data) before filtering', len(img_data))
obj_list, pred_list, obj_data, rel_data = create_from_xml(img_data, orginal_obj_data, args)
print("number of object classes", len(obj_list))
print("number of predicate classes", len(pred_list))
print('len(obj_data) after filtering', len(obj_data))
print('len(rel_data) after filtering', len(rel_data))
print('len(img_data) after filtering', len(img_data))
# sanity check
for i in xrange(num_im):
assert(obj_data[i]['image_id'] \
== rel_data[i]['image_id'] \
== img_data[i]['image_id'] \
== img_ids[i]
)
# may only load a fraction of the data
if args.load_frac < 1:
num_im = int(num_im*args.load_frac)
obj_data = obj_data[:num_im]
rel_data = rel_data[:num_im]
print('processing %i images' % num_im)
# sync objects from rel to obj_data
sync_objects(obj_data, rel_data)
obj_rel_cross_check(obj_data, rel_data)
heights, widths = imdb['original_heights'][:], imdb['original_widths'][:]
if args.min_box_area_frac > 0:
# filter out invalid small boxes
print('threshold bounding box by %f area fraction' % args.min_box_area_frac)
filter_object_boxes(obj_data, heights, widths, args.min_box_area_frac) # filter by box dimensions
merge_duplicate_boxes(obj_data)
# build vocabulary
object_tokens, object_token_counter = extract_object_token(obj_data, obj_list)
label_to_idx, idx_to_label = build_token_dict(object_tokens)
predicate_tokens, predicate_token_counter = extract_predicate_token(rel_data, pred_list)
predicate_to_idx, idx_to_predicate = build_token_dict(predicate_tokens)
# print out vocabulary
print('objects: ')
print(object_token_counter)
print('relationships: ')
print(predicate_token_counter)
# write the h5 file
f = h5.File(args.h5_file, 'w')
# encode object
encoded_label, encoded_boxes, im_to_first_obj, im_to_last_obj = \
encode_objects(obj_data, label_to_idx, object_token_counter, \
heights, widths, img_long_sizes)
f.create_dataset('labels', data=encoded_label)
for k, boxes in encoded_boxes.items():
f.create_dataset('boxes_%i' % k, data=boxes)
f.create_dataset('img_to_first_box', data=im_to_first_obj)
f.create_dataset('img_to_last_box', data=im_to_last_obj)
encoded_predicate, encoded_rel, im_to_first_rel, im_to_last_rel = \
encode_relationships(rel_data, predicate_to_idx, obj_data)
f.create_dataset('predicates', data=encoded_predicate)
f.create_dataset('relationships', data=encoded_rel)
f.create_dataset('img_to_first_rel', data=im_to_first_rel)
f.create_dataset('img_to_last_rel', data=im_to_last_rel)
# build train/val/test splits
print('num objects = %i' % encoded_label.shape[0])
print('num relationships = %i' % encoded_predicate.shape[0])
opt = None
if not args.use_input_split:
opt = {}
opt['val_begin_idx'] = int(len(obj_data) * args.train_frac)
opt['test_begin_idx'] = int(len(obj_data) * args.val_frac)
opt['shuffle'] = args.shuffle
split = encode_splits(obj_data, opt)
if split is not None:
f.create_dataset('split', data=split) # 1 = test, 0 = train
# and write the additional json file
json_struct = {
'label_to_idx': label_to_idx,
'idx_to_label': idx_to_label,
'predicate_to_idx': predicate_to_idx,
'idx_to_predicate': idx_to_predicate,
'predicate_count': predicate_token_counter,
'object_count': object_token_counter
}
with open(args.json_file, 'w') as f:
json.dump(json_struct, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--imdb', default='VG/imdb_1024.h5', type=str)
parser.add_argument('--object_input', default='VG/objects.json', type=str)
parser.add_argument('--relationship_input', default='VG/relationships.json', type=str)
parser.add_argument('--vrrvg_dir', default='VG/VrR-VG', type=str)
parser.add_argument('--metadata_input', default='VG/image_data.json', type=str)
# parser.add_argument('--object_alias', default='VG/object_alias.txt', type=str)
# parser.add_argument('--pred_alias', default='VG/predicate_alias.txt', type=str)
# parser.add_argument('--object_list', default='VG/object_list.txt', type=str)
# parser.add_argument('--pred_list', default='VG/predicate_list.txt', type=str)
# parser.add_argument('--num_objects', default=150, type=int, help="set to 0 to disable filtering")
# parser.add_argument('--num_predicates', default=50, type=int, help="set to 0 to disable filtering")
parser.add_argument('--min_box_area_frac', default=0.002, type=float)
parser.add_argument('--json_file', default='VG-dicts.json')
parser.add_argument('--h5_file', default='VG.h5')
parser.add_argument('--load_frac', default=1, type=float)
parser.add_argument('--use_input_split', default=False, type=bool)
parser.add_argument('--train_frac', default=0.7, type=float)
parser.add_argument('--val_frac', default=0.7, type=float)
parser.add_argument('--shuffle', default=False, type=bool)
args = parser.parse_args()
main(args)
| [
"math.ceil",
"argparse.ArgumentParser",
"math.floor",
"numpy.asarray",
"h5py.File",
"collections.Counter",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack",
"numpy.min",
"pprint.pprint",
"json.dump",
"numpy.random.shuffle"
] | [((1440, 1449), 'collections.Counter', 'Counter', ([], {}), '()\n', (1447, 1449), False, 'from collections import Counter\n'), ((2181, 2190), 'collections.Counter', 'Counter', ([], {}), '()\n', (2188, 2190), False, 'from collections import Counter\n'), ((9719, 9765), 'numpy.asarray', 'np.asarray', (['[sub_idx, obj_idx]'], {'dtype': 'np.int32'}), '([sub_idx, obj_idx], dtype=np.int32)\n', (9729, 9765), True, 'import numpy as np\n'), ((21710, 21729), 'pprint.pprint', 'pprint.pprint', (['args'], {}), '(args)\n', (21723, 21729), False, 'import pprint\n'), ((21968, 21991), 'h5py.File', 'h5.File', (['args.imdb', '"""r"""'], {}), "(args.imdb, 'r')\n", (21975, 21991), True, 'import h5py as h5\n'), ((24383, 24409), 'h5py.File', 'h5.File', (['args.h5_file', '"""w"""'], {}), "(args.h5_file, 'w')\n", (24390, 24409), True, 'import h5py as h5\n'), ((26249, 26274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (26272, 26274), False, 'import argparse, json, string\n'), ((3013, 3032), 'numpy.vstack', 'np.vstack', (['[b1, b2]'], {}), '([b1, b2])\n', (3022, 3032), True, 'import numpy as np\n'), ((3047, 3062), 'numpy.min', 'np.min', (['b1b2', '(0)'], {}), '(b1b2, 0)\n', (3053, 3062), True, 'import numpy as np\n'), ((3078, 3093), 'numpy.max', 'np.max', (['b1b2', '(0)'], {}), '(b1b2, 0)\n', (3084, 3093), True, 'import numpy as np\n'), ((3407, 3449), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {'dtype': 'np.int32'}), '([x1, y1, x2, y2], dtype=np.int32)\n', (3415, 3449), True, 'import numpy as np\n'), ((7068, 7105), 'math.floor', 'math.floor', (["(scale * (region['x'] - 1))"], {}), "(scale * (region['x'] - 1))\n", (7078, 7105), False, 'import math\n'), ((7103, 7140), 'math.floor', 'math.floor', (["(scale * (region['y'] - 1))"], {}), "(scale * (region['y'] - 1))\n", (7113, 7140), False, 'import math\n'), ((7148, 7178), 'math.ceil', 'math.ceil', (["(scale * region['w'])"], {}), "(scale * region['w'])\n", (7157, 7178), False, 'import math\n'), ((7178, 7208), 'math.ceil', 'math.ceil', (["(scale * region['h'])"], {}), "(scale * region['h'])\n", (7187, 7208), False, 'import math\n'), ((9413, 9429), 'numpy.vstack', 'np.vstack', (['boxes'], {}), '(boxes)\n', (9422, 9429), True, 'import numpy as np\n'), ((9441, 9466), 'numpy.vstack', 'np.vstack', (['encoded_labels'], {}), '(encoded_labels)\n', (9450, 9466), True, 'import numpy as np\n'), ((11990, 12013), 'numpy.vstack', 'np.vstack', (['encoded_pred'], {}), '(encoded_pred)\n', (11999, 12013), True, 'import numpy as np\n'), ((12015, 12037), 'numpy.vstack', 'np.vstack', (['encoded_rel'], {}), '(encoded_rel)\n', (12024, 12037), True, 'import numpy as np\n'), ((13282, 13306), 'numpy.random.shuffle', 'np.random.shuffle', (['split'], {}), '(split)\n', (13299, 13306), True, 'import numpy as np\n'), ((22433, 22455), 'json.dump', 'json.dump', (['img_data', 'f'], {}), '(img_data, f)\n', (22442, 22455), False, 'import argparse, json, string\n'), ((26181, 26206), 'json.dump', 'json.dump', (['json_struct', 'f'], {}), '(json_struct, f)\n', (26190, 26206), False, 'import argparse, json, string\n'), ((5493, 5504), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5501, 5504), True, 'import numpy as np\n'), ((7593, 7605), 'math.floor', 'floor', (['(w / 2)'], {}), '(w / 2)\n', (7598, 7605), False, 'from math import floor\n'), ((7607, 7619), 'math.floor', 'floor', (['(h / 2)'], {}), '(h / 2)\n', (7612, 7619), False, 'from math import floor\n'), ((13366, 13384), 'numpy.sum', 'np.sum', (['(split == 0)'], {}), '(split == 0)\n', (13372, 13384), True, 'import numpy as np\n'), ((13384, 13402), 'numpy.sum', 'np.sum', (['(split == 1)'], {}), '(split == 1)\n', (13390, 13402), True, 'import numpy as np\n'), ((13402, 13420), 'numpy.sum', 'np.sum', (['(split == 2)'], {}), '(split == 2)\n', (13408, 13420), True, 'import numpy as np\n'), ((5758, 5773), 'numpy.vstack', 'np.vstack', (['dims'], {}), '(dims)\n', (5767, 5773), True, 'import numpy as np\n'), ((5597, 5612), 'numpy.vstack', 'np.vstack', (['dims'], {}), '(dims)\n', (5606, 5612), True, 'import numpy as np\n'), ((5662, 5677), 'numpy.vstack', 'np.vstack', (['dims'], {}), '(dims)\n', (5671, 5677), True, 'import numpy as np\n')] |
import os
import torch
import pytorch3d
import pytorch3d.loss
import numpy as np
from scipy.spatial.transform import Rotation
import pandas as pd
import point_cloud_utils as pcu
from tqdm.auto import tqdm
from models.utils import *
from .misc import BlackHole
def load_xyz(xyz_dir):
all_pcls = {}
for fn in tqdm(os.listdir(xyz_dir), desc='Loading'):
if fn[-3:] != 'xyz':
continue
name = fn[:-4]
path = os.path.join(xyz_dir, fn)
all_pcls[name] = torch.FloatTensor(np.loadtxt(path, dtype=np.float32))
return all_pcls
def load_off(off_dir):
all_meshes = {}
for fn in tqdm(os.listdir(off_dir), desc='Loading'):
if fn[-3:] != 'off':
continue
name = fn[:-4]
path = os.path.join(off_dir, fn)
verts, faces = pcu.load_mesh_vf(path)
verts = torch.FloatTensor(verts)
faces = torch.LongTensor(faces)
all_meshes[name] = {'verts': verts, 'faces': faces}
return all_meshes
class Evaluator(object):
def __init__(self, output_pcl_dir, dataset_root, dataset, summary_dir, experiment_name, device='cuda', res_gts='8192_poisson', logger=BlackHole()):
super().__init__()
self.output_pcl_dir = output_pcl_dir
self.dataset_root = dataset_root
self.dataset = dataset
self.summary_dir = summary_dir
self.experiment_name = experiment_name
self.gts_pcl_dir = os.path.join(dataset_root, dataset, 'pointclouds', 'test', res_gts)
self.gts_mesh_dir = os.path.join(dataset_root, dataset, 'meshes', 'test')
self.res_gts = res_gts
self.device = device
self.logger = logger
self.load_data()
def load_data(self):
self.pcls_up = load_xyz(self.output_pcl_dir)
self.pcls_high = load_xyz(self.gts_pcl_dir)
self.meshes = load_off(self.gts_mesh_dir)
self.pcls_name = list(self.pcls_up.keys())
def run(self):
pcls_up, pcls_high, pcls_name = self.pcls_up, self.pcls_high, self.pcls_name
results = {}
for name in tqdm(pcls_name, desc='Evaluate'):
pcl_up = pcls_up[name][:,:3].unsqueeze(0).to(self.device)
if name not in pcls_high:
self.logger.warning('Shape `%s` not found, ignored.' % name)
continue
pcl_high = pcls_high[name].unsqueeze(0).to(self.device)
verts = self.meshes[name]['verts'].to(self.device)
faces = self.meshes[name]['faces'].to(self.device)
cd = pytorch3d.loss.chamfer_distance(pcl_up, pcl_high)[0].item()
cd_sph = chamfer_distance_unit_sphere(pcl_up, pcl_high)[0].item()
hd_sph = hausdorff_distance_unit_sphere(pcl_up, pcl_high)[0].item()
# p2f = point_to_mesh_distance_single_unit_sphere(
# pcl=pcl_up[0],
# verts=verts,
# faces=faces
# ).sqrt().mean().item()
if 'blensor' in self.experiment_name:
rotmat = torch.FloatTensor(Rotation.from_euler('xyz', [-90, 0, 0], degrees=True).as_matrix()).to(pcl_up[0])
p2f = point_mesh_bidir_distance_single_unit_sphere(
pcl=pcl_up[0].matmul(rotmat.t()),
verts=verts,
faces=faces
).item()
else:
p2f = point_mesh_bidir_distance_single_unit_sphere(
pcl=pcl_up[0],
verts=verts,
faces=faces
).item()
results[name] = {
# 'cd': cd,
'cd_sph': cd_sph,
'p2f': p2f,
# 'hd_sph': hd_sph,
}
results = pd.DataFrame(results).transpose()
res_mean = results.mean(axis=0)
self.logger.info("\n" + repr(results))
self.logger.info("\nMean\n" + '\n'.join([
'%s\t%.12f' % (k, v) for k, v in res_mean.items()
]))
update_summary(
os.path.join(self.summary_dir, 'Summary_%s.csv' % self.dataset),
model=self.experiment_name,
metrics={
# 'cd(mean)': res_mean['cd'],
'cd_sph(mean)': res_mean['cd_sph'],
'p2f(mean)': res_mean['p2f'],
# 'hd_sph(mean)': res_mean['hd_sph'],
}
)
def update_summary(path, model, metrics):
if os.path.exists(path):
df = pd.read_csv(path, index_col=0, sep="\s*,\s*", engine='python')
else:
df = pd.DataFrame()
for metric, value in metrics.items():
setting = metric
if setting not in df.columns:
df[setting] = np.nan
df.loc[model, setting] = value
df.to_csv(path, float_format='%.12f')
return df
| [
"os.path.exists",
"point_cloud_utils.load_mesh_vf",
"os.listdir",
"pytorch3d.loss.chamfer_distance",
"scipy.spatial.transform.Rotation.from_euler",
"pandas.read_csv",
"torch.LongTensor",
"os.path.join",
"tqdm.auto.tqdm",
"pandas.DataFrame",
"numpy.loadtxt",
"torch.FloatTensor"
] | [((4414, 4434), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4428, 4434), False, 'import os\n'), ((322, 341), 'os.listdir', 'os.listdir', (['xyz_dir'], {}), '(xyz_dir)\n', (332, 341), False, 'import os\n'), ((448, 473), 'os.path.join', 'os.path.join', (['xyz_dir', 'fn'], {}), '(xyz_dir, fn)\n', (460, 473), False, 'import os\n'), ((636, 655), 'os.listdir', 'os.listdir', (['off_dir'], {}), '(off_dir)\n', (646, 655), False, 'import os\n'), ((762, 787), 'os.path.join', 'os.path.join', (['off_dir', 'fn'], {}), '(off_dir, fn)\n', (774, 787), False, 'import os\n'), ((811, 833), 'point_cloud_utils.load_mesh_vf', 'pcu.load_mesh_vf', (['path'], {}), '(path)\n', (827, 833), True, 'import point_cloud_utils as pcu\n'), ((850, 874), 'torch.FloatTensor', 'torch.FloatTensor', (['verts'], {}), '(verts)\n', (867, 874), False, 'import torch\n'), ((891, 914), 'torch.LongTensor', 'torch.LongTensor', (['faces'], {}), '(faces)\n', (907, 914), False, 'import torch\n'), ((1434, 1501), 'os.path.join', 'os.path.join', (['dataset_root', 'dataset', '"""pointclouds"""', '"""test"""', 'res_gts'], {}), "(dataset_root, dataset, 'pointclouds', 'test', res_gts)\n", (1446, 1501), False, 'import os\n'), ((1530, 1583), 'os.path.join', 'os.path.join', (['dataset_root', 'dataset', '"""meshes"""', '"""test"""'], {}), "(dataset_root, dataset, 'meshes', 'test')\n", (1542, 1583), False, 'import os\n'), ((2076, 2108), 'tqdm.auto.tqdm', 'tqdm', (['pcls_name'], {'desc': '"""Evaluate"""'}), "(pcls_name, desc='Evaluate')\n", (2080, 2108), False, 'from tqdm.auto import tqdm\n'), ((4449, 4513), 'pandas.read_csv', 'pd.read_csv', (['path'], {'index_col': '(0)', 'sep': '"""\\\\s*,\\\\s*"""', 'engine': '"""python"""'}), "(path, index_col=0, sep='\\\\s*,\\\\s*', engine='python')\n", (4460, 4513), True, 'import pandas as pd\n'), ((4535, 4549), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4547, 4549), True, 'import pandas as pd\n'), ((517, 551), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': 'np.float32'}), '(path, dtype=np.float32)\n', (527, 551), True, 'import numpy as np\n'), ((4014, 4077), 'os.path.join', 'os.path.join', (['self.summary_dir', "('Summary_%s.csv' % self.dataset)"], {}), "(self.summary_dir, 'Summary_%s.csv' % self.dataset)\n", (4026, 4077), False, 'import os\n'), ((3732, 3753), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3744, 3753), True, 'import pandas as pd\n'), ((2532, 2581), 'pytorch3d.loss.chamfer_distance', 'pytorch3d.loss.chamfer_distance', (['pcl_up', 'pcl_high'], {}), '(pcl_up, pcl_high)\n', (2563, 2581), False, 'import pytorch3d\n'), ((3038, 3091), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[-90, 0, 0]'], {'degrees': '(True)'}), "('xyz', [-90, 0, 0], degrees=True)\n", (3057, 3091), False, 'from scipy.spatial.transform import Rotation\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, './')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
from sklearn.manifold import TSNE
from features import features_functions
csv_file = './features/CSV_files/features_70_phages_clear_lysis.csv' # Take data from csv file
matrix_of_features = []
organisms_designation = []
# Fill the matrix with data and add phage designation
features_functions.createMatrixFeatures(csv_file=csv_file,
matrix_of_features=matrix_of_features,
organisms_designation=organisms_designation)
matrix_of_features = np.array(matrix_of_features)
matrix_of_features_embedded = TSNE(n_components=2, perplexity=25.0, learning_rate=50).fit_transform(matrix_of_features)
y_data = matrix_of_features_embedded[:,1]
plt.scatter(matrix_of_features_embedded[:, 0], matrix_of_features_embedded[:, 1], c=y_data, cmap="tab10")
plt.colorbar(orientation='horizontal', ticks=range(10))
plt.clim(0, 10)
plt.title('Graph - T-SNE')
# Display organisms' names
for i, txt in enumerate(organisms_designation):
plt.annotate(txt, (matrix_of_features_embedded[i,0],matrix_of_features_embedded[i,1]))
plt.show() # Display the graphic
# Source :
# <NAME>, 2018. t-SNE: The effect of various perplexity values on the shape.
# DataCamp [en ligne].
# [Consulté le 20 juillet 2019]. Disponible à l'adresse :
# https://www.datacamp.com/community/tutorials/introduction-t-sne | [
"matplotlib.pyplot.clim",
"sys.path.insert",
"features.features_functions.createMatrixFeatures",
"sklearn.manifold.TSNE",
"numpy.array",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((58, 82), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./"""'], {}), "(0, './')\n", (73, 82), False, 'import sys\n'), ((444, 595), 'features.features_functions.createMatrixFeatures', 'features_functions.createMatrixFeatures', ([], {'csv_file': 'csv_file', 'matrix_of_features': 'matrix_of_features', 'organisms_designation': 'organisms_designation'}), '(csv_file=csv_file,\n matrix_of_features=matrix_of_features, organisms_designation=\n organisms_designation)\n', (483, 595), False, 'from features import features_functions\n'), ((692, 720), 'numpy.array', 'np.array', (['matrix_of_features'], {}), '(matrix_of_features)\n', (700, 720), True, 'import numpy as np\n'), ((883, 993), 'matplotlib.pyplot.scatter', 'plt.scatter', (['matrix_of_features_embedded[:, 0]', 'matrix_of_features_embedded[:, 1]'], {'c': 'y_data', 'cmap': '"""tab10"""'}), "(matrix_of_features_embedded[:, 0], matrix_of_features_embedded[\n :, 1], c=y_data, cmap='tab10')\n", (894, 993), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1060), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(10)'], {}), '(0, 10)\n', (1053, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1087), 'matplotlib.pyplot.title', 'plt.title', (['"""Graph - T-SNE"""'], {}), "('Graph - T-SNE')\n", (1070, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1264, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1261), 'matplotlib.pyplot.annotate', 'plt.annotate', (['txt', '(matrix_of_features_embedded[i, 0], matrix_of_features_embedded[i, 1])'], {}), '(txt, (matrix_of_features_embedded[i, 0],\n matrix_of_features_embedded[i, 1]))\n', (1180, 1261), True, 'import matplotlib.pyplot as plt\n'), ((751, 806), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'perplexity': '(25.0)', 'learning_rate': '(50)'}), '(n_components=2, perplexity=25.0, learning_rate=50)\n', (755, 806), False, 'from sklearn.manifold import TSNE\n')] |
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
import torchvision.models as models
from resnet import resnet50
class CompatModel(nn.Module):
def __init__(self, embed_size=1000, need_rep=False, vocabulary=None):
"""The concatenation operation for outfit compatibility prediction.
Args:
embed_size: the output embedding size of the cnn model, default 1000.
need_rep: whether to output representation of the layer before last fc
layer, whose size is 2048. This representation can be used for
compute the Visual Sementic Embedding (VSE) loss.
vocabulary: the counts of words in the polyvore dataset.
"""
super(CompatModel, self).__init__()
cnn = resnet50(pretrained=True, need_rep=need_rep)
cnn.fc = nn.Linear(cnn.fc.in_features, embed_size)
self.cnn = cnn
self.need_rep = need_rep
self.bn = nn.BatchNorm1d(embed_size)
self.fc1 = nn.Linear(embed_size*5, embed_size)
self.fc2 = nn.Linear(embed_size, 1)
self.sigmoid = nn.Sigmoid()
# Initialize the compatibility predictor, which is a 2-layered MLP
nn.init.xavier_uniform_(cnn.fc.weight)
nn.init.constant_(cnn.fc.bias, 0)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0)
# Semantic embedding model
self.sem_embedding = nn.Embedding(vocabulary, 1000)
# Visual embedding model
self.image_embedding = nn.Linear(2048, 1000)
# Global average pooling layer
self.ada_avgpool2d = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, images, names):
"""
Args:
images: Outfit images with shape (N, T, C, H, W)
names: Description words of each item in outfit
Return:
out: Compatibility score
vse_loss: Visual Semantic Loss
"""
if self.need_rep:
out, features, rep = self._compute_score(images)
else:
out, features = self._compute_score(images)
vse_loss = self._compute_vse_loss(names, rep)
return out, vse_loss
def _compute_vse_loss(self, names, rep):
""" Visual semantice loss which map both visual embedding and semantic embedding
into a common space.
Reference:
https://github.com/xthan/polyvore/blob/e0ca93b0671491564b4316982d4bfe7da17b6238/polyvore/polyvore_model_bi.py#L362
"""
# Normalized Semantic Embedding
padded_names = rnn_utils.pad_sequence(names, batch_first=True).to(rep.device)
mask = torch.gt(padded_names, 0)
cap_mask = torch.ge(mask.sum(dim=1), 2)
semb = self.sem_embedding(padded_names)
semb = semb * (mask.unsqueeze(dim=2)).float()
word_lengths = mask.sum(dim=1)
word_lengths = torch.where(
word_lengths == 0,
(torch.ones(semb.shape[0]).float() * 0.1).to(rep.device),
word_lengths.float(),
)
semb = semb.sum(dim=1) / word_lengths.unsqueeze(dim=1)
semb = F.normalize(semb, dim=1)
# Normalized Visual Embedding
vemb = F.normalize(self.image_embedding(rep), dim=1)
# VSE Loss
semb = torch.masked_select(semb, cap_mask.unsqueeze(dim=1))
vemb = torch.masked_select(vemb, cap_mask.unsqueeze(dim=1))
semb = semb.reshape([-1, 1000])
vemb = vemb.reshape([-1, 1000])
scores = torch.matmul(semb, vemb.transpose(0, 1))
diagnoal = scores.diag().unsqueeze(dim=1)
cost_s = torch.clamp(0.2 - diagnoal + scores, min=0, max=1e6) # 0.2 is margin
cost_im = torch.clamp(0.2 - diagnoal.transpose(0, 1) + scores, min=0, max=1e6)
cost_s = cost_s - torch.diag(cost_s.diag())
cost_im = cost_im - torch.diag(cost_im.diag())
vse_loss = cost_s.sum() + cost_im.sum()
vse_loss = vse_loss / (semb.shape[0] ** 2)
return vse_loss
def _compute_type_repr_loss(self, tmasks, features):
""" Here adopt two losses to improve the type-spcified represetations.
`tmasks_loss` expect the masks to be sparse and `features_loss` regularize
the feature vector to be a unit vector.
Reference:
Conditional Similarity Networks: https://arxiv.org/abs/1603.07810
"""
# Type embedding loss
tmasks_loss = tmasks.norm(1) / len(tmasks)
features_loss = features.norm(2) / np.sqrt(
(features.shape[0] * features.shape[1])
)
return tmasks_loss, features_loss
def _compute_score(self, images, activate=True):
"""Extract feature vectors from input images.
Return:
out: the compatibility score
features: the visual embedding of the images, we use 1000-d in all experiments
rep: the represtions of the second last year, which is 2048-d for resnet-50 backend
"""
batch_size, item_num, _, _, img_size = images.shape
images = torch.reshape(images, (-1, 3, img_size, img_size))
if self.need_rep:
features, *rep = self.cnn(images)
rep = rep[-1]
else:
features = self.cnn(images)
# Concatenation
features = features.reshape(batch_size, -1)
# Predictor
out = F.relu(self.bn(self.fc1(features)))
out = self.fc2(out)
if activate:
out = self.sigmoid(out)
if self.need_rep:
return out, features, rep
else:
return out, features
| [
"torch.nn.Sigmoid",
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.gt",
"torch.nn.init.xavier_uniform_",
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.functional.normalize",
"resnet.resnet50",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.reshape",
"torch.cl... | [((854, 898), 'resnet.resnet50', 'resnet50', ([], {'pretrained': '(True)', 'need_rep': 'need_rep'}), '(pretrained=True, need_rep=need_rep)\n', (862, 898), False, 'from resnet import resnet50\n'), ((916, 957), 'torch.nn.Linear', 'nn.Linear', (['cnn.fc.in_features', 'embed_size'], {}), '(cnn.fc.in_features, embed_size)\n', (925, 957), True, 'import torch.nn as nn\n'), ((1032, 1058), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['embed_size'], {}), '(embed_size)\n', (1046, 1058), True, 'import torch.nn as nn\n'), ((1078, 1115), 'torch.nn.Linear', 'nn.Linear', (['(embed_size * 5)', 'embed_size'], {}), '(embed_size * 5, embed_size)\n', (1087, 1115), True, 'import torch.nn as nn\n'), ((1133, 1157), 'torch.nn.Linear', 'nn.Linear', (['embed_size', '(1)'], {}), '(embed_size, 1)\n', (1142, 1157), True, 'import torch.nn as nn\n'), ((1181, 1193), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1191, 1193), True, 'import torch.nn as nn\n'), ((1278, 1316), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['cnn.fc.weight'], {}), '(cnn.fc.weight)\n', (1301, 1316), True, 'import torch.nn as nn\n'), ((1325, 1358), 'torch.nn.init.constant_', 'nn.init.constant_', (['cnn.fc.bias', '(0)'], {}), '(cnn.fc.bias, 0)\n', (1342, 1358), True, 'import torch.nn as nn\n'), ((1367, 1407), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (1390, 1407), True, 'import torch.nn as nn\n'), ((1416, 1451), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc1.bias', '(0)'], {}), '(self.fc1.bias, 0)\n', (1433, 1451), True, 'import torch.nn as nn\n'), ((1460, 1500), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (1483, 1500), True, 'import torch.nn as nn\n'), ((1509, 1544), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.fc2.bias', '(0)'], {}), '(self.fc2.bias, 0)\n', (1526, 1544), True, 'import torch.nn as nn\n'), ((1610, 1640), 'torch.nn.Embedding', 'nn.Embedding', (['vocabulary', '(1000)'], {}), '(vocabulary, 1000)\n', (1622, 1640), True, 'import torch.nn as nn\n'), ((1705, 1726), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(1000)'], {}), '(2048, 1000)\n', (1714, 1726), True, 'import torch.nn as nn\n'), ((1796, 1824), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (1816, 1824), True, 'import torch.nn as nn\n'), ((2825, 2850), 'torch.gt', 'torch.gt', (['padded_names', '(0)'], {}), '(padded_names, 0)\n', (2833, 2850), False, 'import torch\n'), ((3299, 3323), 'torch.nn.functional.normalize', 'F.normalize', (['semb'], {'dim': '(1)'}), '(semb, dim=1)\n', (3310, 3323), True, 'import torch.nn.functional as F\n'), ((3785, 3843), 'torch.clamp', 'torch.clamp', (['(0.2 - diagnoal + scores)'], {'min': '(0)', 'max': '(1000000.0)'}), '(0.2 - diagnoal + scores, min=0, max=1000000.0)\n', (3796, 3843), False, 'import torch\n'), ((5226, 5276), 'torch.reshape', 'torch.reshape', (['images', '(-1, 3, img_size, img_size)'], {}), '(images, (-1, 3, img_size, img_size))\n', (5239, 5276), False, 'import torch\n'), ((4671, 4717), 'numpy.sqrt', 'np.sqrt', (['(features.shape[0] * features.shape[1])'], {}), '(features.shape[0] * features.shape[1])\n', (4678, 4717), True, 'import numpy as np\n'), ((2747, 2794), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['names'], {'batch_first': '(True)'}), '(names, batch_first=True)\n', (2769, 2794), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((3120, 3145), 'torch.ones', 'torch.ones', (['semb.shape[0]'], {}), '(semb.shape[0])\n', (3130, 3145), False, 'import torch\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
'''Helper functions for model conversion to pb'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import copy
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
class OpFilter(object):
def __init__(self, **kwargs):
self.type = None
self.type_in = None
self.inputs = None
self.outputs = None
self.input_has = None
self.output_has = None
self.cond = None
self.reverse = False
assert all([x in self.__dict__ for x in kwargs])
self.__dict__.update(kwargs)
def check(self, op):
ret = self.reverse
if self.type and op.type != self.type:
return ret
if self.type_in and op.type not in self.type_in:
return ret
if self.inputs and set(op.input) != set(self.inputs):
return ret
if self.outputs and set(op.output) != set(self.outputs):
return ret
if self.input_has and self.input_has not in op.input:
return ret
if self.output_has and self.output_has not in op.output:
return ret
if self.cond is not None and not self.cond:
return ret
return not ret
def filter_op(op, **kwargs):
''' Returns true if passed all checks '''
return OpFilter(**kwargs).check(op)
def op_filter(**filter_args):
''' Returns None if no condition is satisfied '''
def actual_decorator(f):
@wraps(f)
def wrapper(op, **params):
if not filter_op(op, **filter_args):
return None
return f(op, **params)
return wrapper
return actual_decorator
def op_func_chain(convert_func_list):
''' Run funcs one by one until func return is not None '''
assert isinstance(convert_func_list, list)
def _chain(op):
for x in convert_func_list:
ret = x(op)
if ret is not None:
return ret
return None
return _chain
def convert_op_in_ops(ops_ref, func_or_list):
func = func_or_list
if isinstance(func_or_list, list):
func = op_func_chain(func_or_list)
ops = [op for op in ops_ref]
converted_ops = []
for op in ops:
new_ops = func(op)
if new_ops is not None and not isinstance(new_ops, list):
new_ops = [new_ops]
converted_ops.extend(new_ops if new_ops is not None else [op])
del ops_ref[:]
# ops_ref maybe of type RepeatedCompositeFieldContainer
# which does not have append()
ops_ref.extend(converted_ops)
def convert_op_in_proto(proto, func_or_list):
convert_op_in_ops(proto.op, func_or_list)
def get_op_arg(op, arg_name):
for x in op.arg:
if x.name == arg_name:
return x
return None
def get_op_arg_valf(op, arg_name, default_val):
arg = get_op_arg(op, arg_name)
return arg.f if arg is not None else default_val
def update_mobile_engines(net):
for op in net.op:
if op.type == "Conv":
op.engine = "NNPACK"
if op.type == "ConvTranspose":
op.engine = "BLOCK"
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
if blob in op.input or blob in op.control_input:
u.append(i)
return u
def fuse_first_affine(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if current.type not in ("Conv", "ConvTranspose") \
or next_.type != "AffineChannel":
continue
if current.output[0] != next_.output[0] and \
len(blob_uses(net, current.output[0])) != 1:
# Can't fuse if more than one user unless AffineChannel is inplace
continue
# else, can fuse
conv = current
affine = next_
fused_conv = copy.deepcopy(conv)
fused_conv.output[0] = affine.output[0]
conv_weight = params[conv.input[1]]
conv_has_bias = len(conv.input) > 2
conv_bias = params[conv.input[2]] if conv_has_bias else 0
A = params[affine.input[1]]
B = params[affine.input[2]]
# Thus, can just have the affine transform
# X * A + B
# where
# A = bn_scale * 1.0 / (sqrt(running_var + eps))
# B = (bias - running_mean * (1.0 / sqrt(running_var + eps))
# * bn_scale)
# This identify should hold if we have correctly fused
# np.testing.assert_array_equal(
# params[conv.output[0]] * A + B,
# params[bn.output[0]])
# Now, we have that the computation made is the following:
# ((X `conv` W) + b) * A + B
# Then, we can simply fuse this as follows:
# (X `conv` (W * A)) + b * A + B
# which is simply
# (X `conv` Q) + C
# where
# Q = W * A
# C = b * A + B
# For ConvTranspose, from the view of convolutions as a
# Toepeliz multiplication, we have W_ = W^T, so the weights
# are laid out as (R, S, K, K) (vs (S, R, K, K) for a Conv),
# so the weights broadcast slightly differently. Remember, our
# BN scale 'B' is of size (S,)
A_ = A.reshape(-1, 1, 1, 1) if conv.type == "Conv" else \
A.reshape(1, -1, 1, 1)
C = conv_bias * A + B
Q = conv_weight * A_
assert params[conv.input[1]].shape == Q.shape
params[conv.input[1]] = Q
if conv_has_bias:
assert params[conv.input[2]].shape == C.shape
params[conv.input[2]] = C
else:
# make af_bias to be bias of the conv layer
fused_conv.input.append(affine.input[2])
params[affine.input[2]] = B
new_ops = net.op[:i] + [fused_conv] + net.op[j + 1:]
del net.op[:]
if conv_has_bias:
del params[affine.input[2]]
removed_tensors.append(affine.input[2])
removed_tensors.append(affine.input[1])
del params[affine.input[1]]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_affine(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = \
fuse_first_affine(net, params, removed_tensors)
if len(next_net.op) == len(net.op):
if (
any(op.type == "AffineChannel" for op in next_net.op) and
not ignore_failure
):
raise Exception(
"Model contains AffineChannel op after fusion: %s", next_net)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_net(fuse_func, net, blobs, ignore_failure=False):
is_core_net = isinstance(net, core.Net)
if is_core_net:
net = net.Proto()
net, params, removed_tensors = fuse_func(net, blobs, ignore_failure)
for rt in removed_tensors:
net.external_input.remove(rt)
if is_core_net:
net = core.Net(net)
return net, params
def fuse_net_affine(net, blobs):
return fuse_net(fuse_affine, net, blobs)
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
uint8 is stored as an array of string with one element.
'''
kTypeNameMapper = {
np.dtype('float32'): "GivenTensorFill",
np.dtype('int32'): "GivenTensorIntFill",
np.dtype('int64'): "GivenTensorInt64Fill",
np.dtype('uint8'): "GivenTensorStringFill",
}
shape = blob.shape
values = blob
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if blob.dtype == np.dtype('uint8'):
shape = [1]
values = [str(blob.data)]
op = core.CreateOperator(
kTypeNameMapper[blob.dtype],
[], [name],
shape=shape,
values=values,
# arg=[
# putils.MakeArgument("shape", shape),
# putils.MakeArgument("values", values),
# ]
)
net.op.extend([op])
def gen_init_net_from_blobs(blobs, blobs_to_use=None, excluded_blobs=None):
''' Generate an initialization net based on a blob dict '''
ret = caffe2_pb2.NetDef()
if blobs_to_use is None:
blobs_to_use = {x for x in blobs}
else:
blobs_to_use = copy.deepcopy(blobs_to_use)
if excluded_blobs is not None:
blobs_to_use = [x for x in blobs_to_use if x not in excluded_blobs]
for name in blobs_to_use:
blob = blobs[name]
if isinstance(blob, str):
print('Blob {} with type {} is not supported in generating init net,'
' skipped.'.format(name, type(blob)))
continue
add_tensor(ret, name, blob)
return ret
def get_ws_blobs(blob_names=None):
''' Get blobs in 'blob_names' in the default workspace,
get all blobs if blob_names is None '''
blobs = {}
if blob_names is None:
blob_names = workspace.Blobs()
blobs = {x: workspace.FetchBlob(x) for x in blob_names}
return blobs
def get_device_option_cpu():
device_option = core.DeviceOption(caffe2_pb2.CPU)
return device_option
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = gpu_id
return device_option
def create_input_blobs_for_net(net_def):
for op in net_def.op:
for blob_in in op.input:
if not workspace.HasBlob(blob_in):
workspace.CreateBlob(blob_in)
def compare_model(model1_func, model2_func, test_image, check_blobs):
''' model_func(test_image, check_blobs)
'''
cb1, cb2 = check_blobs, check_blobs
if isinstance(check_blobs, dict):
cb1 = check_blobs.keys()
cb2 = check_blobs.values()
print('Running the first model...')
res1 = model1_func(test_image, check_blobs)
print('Running the second model...')
res2 = model2_func(test_image, check_blobs)
for idx in range(len(cb1)):
print('Checking {} -> {}...'.format(cb1[idx], cb2[idx]))
n1, n2 = cb1[idx], cb2[idx]
r1 = res1[n1] if n1 in res1 else None
r2 = res2[n2] if n2 in res2 else None
assert r1 is not None or r2 is None, \
"Blob {} in model1 is None".format(n1)
assert r2 is not None or r1 is None, \
"Blob {} in model2 is None".format(n2)
assert r1.shape == r2.shape, \
"Blob {} and {} shape mismatched: {} vs {}".format(
n1, n2, r1.shape, r2.shape)
np.testing.assert_array_almost_equal(
r1, r2, decimal=3,
err_msg='{} and {} not matched. Max diff: {}'.format(
n1, n2, np.amax(np.absolute(r1 - r2))))
return True
# graph_name could not contain word 'graph'
def save_graph(net, file_name, graph_name="net", op_only=True):
from caffe2.python import net_drawer
graph = None
ops = net.op
if not op_only:
graph = net_drawer.GetPydotGraph(
ops, graph_name,
rankdir="TB")
else:
graph = net_drawer.GetPydotGraphMinimal(
ops, graph_name,
rankdir="TB", minimal_dependency=True)
try:
graph.write_png(file_name)
except Exception as e:
print('Error when writing graph to image {}'.format(e))
| [
"caffe2.proto.caffe2_pb2.NetDef",
"caffe2.python.workspace.CreateBlob",
"caffe2.python.workspace.HasBlob",
"caffe2.python.core.CreateOperator",
"caffe2.python.core.Net",
"caffe2.python.workspace.Blobs",
"caffe2.python.workspace.FetchBlob",
"caffe2.python.net_drawer.GetPydotGraph",
"itertools.tee",
... | [((4038, 4051), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (4041, 4051), False, 'from itertools import tee\n'), ((4325, 4343), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (4338, 4343), False, 'import copy\n'), ((4357, 4378), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (4370, 4378), False, 'import copy\n'), ((9025, 9117), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['kTypeNameMapper[blob.dtype]', '[]', '[name]'], {'shape': 'shape', 'values': 'values'}), '(kTypeNameMapper[blob.dtype], [], [name], shape=shape,\n values=values)\n', (9044, 9117), False, 'from caffe2.python import core, workspace\n'), ((9461, 9480), 'caffe2.proto.caffe2_pb2.NetDef', 'caffe2_pb2.NetDef', ([], {}), '()\n', (9478, 9480), False, 'from caffe2.proto import caffe2_pb2\n'), ((10381, 10414), 'caffe2.python.core.DeviceOption', 'core.DeviceOption', (['caffe2_pb2.CPU'], {}), '(caffe2_pb2.CPU)\n', (10398, 10414), False, 'from caffe2.python import core, workspace\n'), ((10500, 10525), 'caffe2.proto.caffe2_pb2.DeviceOption', 'caffe2_pb2.DeviceOption', ([], {}), '()\n', (10523, 10525), False, 'from caffe2.proto import caffe2_pb2\n'), ((2276, 2284), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2281, 2284), False, 'from functools import wraps\n'), ((4950, 4969), 'copy.deepcopy', 'copy.deepcopy', (['conv'], {}), '(conv)\n', (4963, 4969), False, 'import copy\n'), ((8209, 8222), 'caffe2.python.core.Net', 'core.Net', (['net'], {}), '(net)\n', (8217, 8222), False, 'from caffe2.python import core, workspace\n'), ((8576, 8595), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (8584, 8595), True, 'import numpy as np\n'), ((8624, 8641), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (8632, 8641), True, 'import numpy as np\n'), ((8673, 8690), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (8681, 8690), True, 'import numpy as np\n'), ((8724, 8741), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (8732, 8741), True, 'import numpy as np\n'), ((8942, 8959), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (8950, 8959), True, 'import numpy as np\n'), ((9585, 9612), 'copy.deepcopy', 'copy.deepcopy', (['blobs_to_use'], {}), '(blobs_to_use)\n', (9598, 9612), False, 'import copy\n'), ((10234, 10251), 'caffe2.python.workspace.Blobs', 'workspace.Blobs', ([], {}), '()\n', (10249, 10251), False, 'from caffe2.python import core, workspace\n'), ((10268, 10290), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['x'], {}), '(x)\n', (10287, 10290), False, 'from caffe2.python import core, workspace\n'), ((12284, 12339), 'caffe2.python.net_drawer.GetPydotGraph', 'net_drawer.GetPydotGraph', (['ops', 'graph_name'], {'rankdir': '"""TB"""'}), "(ops, graph_name, rankdir='TB')\n", (12308, 12339), False, 'from caffe2.python import net_drawer\n'), ((12391, 12482), 'caffe2.python.net_drawer.GetPydotGraphMinimal', 'net_drawer.GetPydotGraphMinimal', (['ops', 'graph_name'], {'rankdir': '"""TB"""', 'minimal_dependency': '(True)'}), "(ops, graph_name, rankdir='TB',\n minimal_dependency=True)\n", (12422, 12482), False, 'from caffe2.python import net_drawer\n'), ((10757, 10783), 'caffe2.python.workspace.HasBlob', 'workspace.HasBlob', (['blob_in'], {}), '(blob_in)\n', (10774, 10783), False, 'from caffe2.python import core, workspace\n'), ((10801, 10830), 'caffe2.python.workspace.CreateBlob', 'workspace.CreateBlob', (['blob_in'], {}), '(blob_in)\n', (10821, 10830), False, 'from caffe2.python import core, workspace\n'), ((12022, 12042), 'numpy.absolute', 'np.absolute', (['(r1 - r2)'], {}), '(r1 - r2)\n', (12033, 12042), True, 'import numpy as np\n')] |
import torch
import logging
import numpy as np
import torch.nn.functional as F
# Method taken from tensorflow/models skipgram
from torch.nn import EmbeddingBag
def read_analogies(file, w2id):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(file, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.decode().strip().lower().split()
ids = [w2id.get(w.strip(), None) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
logging.info("###########################################")
logging.info("Loaded evaluation method: Question analogy")
logging.info("-------------------------------------------\n")
logging.info("Eval analogy file: "+ file)
logging.info("Questions: "+ str(len(questions)))
logging.info("Skipped: "+ str(questions_skipped))
logging.info("###########################################\n")
return np.array(questions, dtype=np.int32)
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris
def eval_analogy_questions(data_processor, embeddings, use_cuda):
"""Evaluate analogy questions and reports accuracy."""
is_embedding_bag = type(embeddings) is EmbeddingBag
# How many questions we get right at precision@1.
correct = 0
aq = data_processor.analogy_questions
total = aq.shape[0]
start = 0
# Lower the N, if you are running out of memory
N = 32
predict_item_index = 3
# Normalize matrix so we can calculate cosine distances with dot product
nembs = torch.transpose(F.normalize(embeddings.weight), 0, 1)
while start < total:
limit = start + N
analogy = aq[start:limit, :]
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a = torch.LongTensor(analogy[:, 0])
b = torch.LongTensor(analogy[:, 1])
c = torch.LongTensor(analogy[:, 2])
if is_embedding_bag:
arange = torch.LongTensor(range(len(a)))
brange = torch.LongTensor(range(len(b)))
crange = torch.LongTensor(range(len(c)))
if use_cuda:
a = a.cuda()
b = b.cuda()
c = c.cuda()
if is_embedding_bag:
arange = arange.cuda()
brange = brange.cuda()
crange = crange.cuda()
if is_embedding_bag:
a_emb = embeddings(a, arange)
b_emb = embeddings(b, brange)
c_emb = embeddings(c, crange)
else:
a_emb = embeddings(a)
b_emb = embeddings(b)
c_emb = embeddings(c)
# We expect that d's embedding vectors analogies are
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
d_emb = c_emb + b_emb - a_emb
# normalize d_emb embeddings
d_emb = F.normalize(d_emb)
# Compute cosine distance of d_emb to each vocab word
# dist has shape [N, vocab_size]
dist = torch.matmul(d_emb, nembs)
# top_k closest EMBEDDINGS
top_predicted = torch.topk(dist, dim=1, k=4)[1].cpu().numpy()
start = limit
for question in range(analogy.shape[0]):
for j in range(4):
if top_predicted[question, j] == analogy[question, predict_item_index]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif top_predicted[question, j] in analogy[question, :predict_item_index]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
logging.info("Eval analogy questions %4d/%d accuracy = %4.1f%%" % (correct, total, correct * 100.0 / total))
| [
"torch.LongTensor",
"torch.topk",
"torch.nn.functional.normalize",
"numpy.array",
"torch.matmul",
"logging.info"
] | [((899, 958), 'logging.info', 'logging.info', (['"""###########################################"""'], {}), "('###########################################')\n", (911, 958), False, 'import logging\n'), ((963, 1021), 'logging.info', 'logging.info', (['"""Loaded evaluation method: Question analogy"""'], {}), "('Loaded evaluation method: Question analogy')\n", (975, 1021), False, 'import logging\n'), ((1026, 1087), 'logging.info', 'logging.info', (['"""-------------------------------------------\n"""'], {}), "('-------------------------------------------\\n')\n", (1038, 1087), False, 'import logging\n'), ((1092, 1134), 'logging.info', 'logging.info', (["('Eval analogy file: ' + file)"], {}), "('Eval analogy file: ' + file)\n", (1104, 1134), False, 'import logging\n'), ((1245, 1306), 'logging.info', 'logging.info', (['"""###########################################\n"""'], {}), "('###########################################\\n')\n", (1257, 1306), False, 'import logging\n'), ((1318, 1353), 'numpy.array', 'np.array', (['questions'], {'dtype': 'np.int32'}), '(questions, dtype=np.int32)\n', (1326, 1353), True, 'import numpy as np\n'), ((4266, 4378), 'logging.info', 'logging.info', (["('Eval analogy questions %4d/%d accuracy = %4.1f%%' % (correct, total, \n correct * 100.0 / total))"], {}), "('Eval analogy questions %4d/%d accuracy = %4.1f%%' % (correct,\n total, correct * 100.0 / total))\n", (4278, 4378), False, 'import logging\n'), ((2029, 2059), 'torch.nn.functional.normalize', 'F.normalize', (['embeddings.weight'], {}), '(embeddings.weight)\n', (2040, 2059), True, 'import torch.nn.functional as F\n'), ((2287, 2318), 'torch.LongTensor', 'torch.LongTensor', (['analogy[:, 0]'], {}), '(analogy[:, 0])\n', (2303, 2318), False, 'import torch\n'), ((2331, 2362), 'torch.LongTensor', 'torch.LongTensor', (['analogy[:, 1]'], {}), '(analogy[:, 1])\n', (2347, 2362), False, 'import torch\n'), ((2375, 2406), 'torch.LongTensor', 'torch.LongTensor', (['analogy[:, 2]'], {}), '(analogy[:, 2])\n', (2391, 2406), False, 'import torch\n'), ((3352, 3370), 'torch.nn.functional.normalize', 'F.normalize', (['d_emb'], {}), '(d_emb)\n', (3363, 3370), True, 'import torch.nn.functional as F\n'), ((3490, 3516), 'torch.matmul', 'torch.matmul', (['d_emb', 'nembs'], {}), '(d_emb, nembs)\n', (3502, 3516), False, 'import torch\n'), ((879, 892), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (887, 892), True, 'import numpy as np\n'), ((3577, 3605), 'torch.topk', 'torch.topk', (['dist'], {'dim': '(1)', 'k': '(4)'}), '(dist, dim=1, k=4)\n', (3587, 3605), False, 'import torch\n')] |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
assert_raises, ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
assert_raises(ValueError, cal_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
assert_raises(ValueError, calibration_curve, y_true2, y_pred2,
strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def text_data():
text_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return text_data, text_labels
@pytest.fixture
def text_data_pipeline(text_data):
X, y = text_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_pipeline(text_data, text_data_pipeline):
# Test that calibration works in prefit pipeline with transformer,
# where `X` is not array-like, sparse matrix or dataframe at the start.
# See https://github.com/scikit-learn/scikit-learn/issues/8710
X, y = text_data
clf = text_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
msg = "'CalibratedClassifierCV' object has no attribute"
with pytest.raises(AttributeError, match=msg):
calib_clf.n_features_in_
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.utils._testing.assert_raises",
"sklearn.utils._testing.assert_array_equal",
"numpy.array",
"numpy.linalg.norm",
"sklearn.calibration.CalibratedClassifierCV",
"pytest.fixture",
"sklearn.model_selection.KFold",
"numpy.arange",
"numpy.random.RandomState"... | [((1389, 1419), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1403, 1419), False, 'import pytest\n'), ((1542, 1600), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sigmoid', 'isotonic']"], {}), "('method', ['sigmoid', 'isotonic'])\n", (1565, 1600), False, 'import pytest\n'), ((1602, 1652), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (1625, 1652), False, 'import pytest\n'), ((4424, 4474), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (4447, 4474), False, 'import pytest\n'), ((4804, 4854), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (4827, 4854), False, 'import pytest\n'), ((5522, 5572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (5545, 5572), False, 'import pytest\n'), ((6026, 6084), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sigmoid', 'isotonic']"], {}), "('method', ['sigmoid', 'isotonic'])\n", (6049, 6084), False, 'import pytest\n'), ((6086, 6136), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (6109, 6136), False, 'import pytest\n'), ((6990, 7048), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sigmoid', 'isotonic']"], {}), "('method', ['sigmoid', 'isotonic'])\n", (7013, 7048), False, 'import pytest\n'), ((7050, 7100), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (7073, 7100), False, 'import pytest\n'), ((7848, 7906), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sigmoid', 'isotonic']"], {}), "('method', ['sigmoid', 'isotonic'])\n", (7871, 7906), False, 'import pytest\n'), ((7908, 7958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (7931, 7958), False, 'import pytest\n'), ((13616, 13674), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "['sigmoid', 'isotonic']"], {}), "('method', ['sigmoid', 'isotonic'])\n", (13639, 13674), False, 'import pytest\n'), ((16825, 16875), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (16848, 16875), False, 'import pytest\n'), ((17417, 17467), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (17440, 17467), False, 'import pytest\n'), ((18023, 18073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ensemble"""', '[True, False]'], {}), "('ensemble', [True, False])\n", (18046, 18073), False, 'import pytest\n'), ((19116, 19155), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'FutureWarning'}), '(category=FutureWarning)\n', (19131, 19155), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((1443, 1508), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(6)', 'random_state': '(42)'}), '(n_samples=200, n_features=6, random_state=42)\n', (1462, 1508), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((2256, 2317), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'cv': '(y.size + 1)', 'ensemble': 'ensemble'}), '(clf, cv=y.size + 1, ensemble=ensemble)\n', (2278, 2317), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((2322, 2366), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'cal_clf.fit', 'X', 'y'], {}), '(ValueError, cal_clf.fit, X, y)\n', (2335, 2366), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((4616, 4627), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (4625, 4627), False, 'from sklearn.svm import LinearSVC\n'), ((4653, 4713), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': '"""foo"""', 'ensemble': 'ensemble'}), "(clf, method='foo', ensemble=ensemble)\n", (4675, 4713), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((5355, 5383), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', ([], {'cv': '(2)'}), '(cv=2)\n', (5377, 5383), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((5706, 5728), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'splits'}), '(n_splits=splits)\n', (5711, 5728), False, 'from sklearn.model_selection import KFold, cross_val_predict\n'), ((5745, 5796), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', ([], {'cv': 'kfold', 'ensemble': 'ensemble'}), '(cv=kfold, ensemble=ensemble)\n', (5767, 5796), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((6442, 6468), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (6451, 6468), False, 'from sklearn.svm import LinearSVC\n'), ((6490, 6562), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['base_estimator'], {'method': 'method', 'ensemble': 'ensemble'}), '(base_estimator, method=method, ensemble=ensemble)\n', (6512, 6562), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((6916, 6964), 'numpy.linalg.norm', 'np.linalg.norm', (['(probs_with_sw - probs_without_sw)'], {}), '(probs_with_sw - probs_without_sw)\n', (6930, 6964), True, 'import numpy as np\n'), ((7245, 7284), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(42)'}), '(X, y, random_state=42)\n', (7261, 7284), False, 'from sklearn.model_selection import LeaveOneOut, train_test_split\n'), ((7307, 7333), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (7316, 7333), False, 'from sklearn.svm import LinearSVC\n'), ((7358, 7445), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['base_estimator'], {'method': 'method', 'n_jobs': '(2)', 'ensemble': 'ensemble'}), '(base_estimator, method=method, n_jobs=2, ensemble=\n ensemble)\n', (7380, 7445), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((7584, 7671), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['base_estimator'], {'method': 'method', 'n_jobs': '(1)', 'ensemble': 'ensemble'}), '(base_estimator, method=method, n_jobs=1, ensemble=\n ensemble)\n', (7606, 7671), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((7795, 7844), 'numpy.testing.assert_allclose', 'assert_allclose', (['probs_parallel', 'probs_sequential'], {}), '(probs_parallel, probs_sequential)\n', (7810, 7844), False, 'from numpy.testing import assert_allclose\n'), ((8432, 8457), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(7)'}), '(random_state=7)\n', (8441, 8457), False, 'from sklearn.svm import LinearSVC\n'), ((8469, 8563), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(500)', 'n_features': '(100)', 'random_state': 'seed', 'centers': '(10)', 'cluster_std': '(15.0)'}), '(n_samples=500, n_features=100, random_state=seed, centers=10,\n cluster_std=15.0)\n', (8479, 8563), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((8916, 8983), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': 'method', 'cv': '(5)', 'ensemble': 'ensemble'}), '(clf, method=method, cv=5, ensemble=ensemble)\n', (8938, 8983), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((10194, 10250), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(30)', 'random_state': '(42)'}), '(n_estimators=30, random_state=42)\n', (10216, 10250), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((10462, 10529), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': 'method', 'cv': '(5)', 'ensemble': 'ensemble'}), '(clf, method=method, cv=5, ensemble=ensemble)\n', (10484, 10529), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((11284, 11373), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'n_features': '(10)', 'random_state': '(7)', 'centers': '(10)', 'cluster_std': '(15.0)'}), '(n_samples=50, n_features=10, random_state=7, centers=10,\n cluster_std=15.0)\n', (11294, 11373), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((11478, 11572), 'sklearn.calibration._CalibratedClassifier', '_CalibratedClassifier', ([], {'base_estimator': 'clf', 'calibrators': '[calibrator]', 'classes': 'clf.classes_'}), '(base_estimator=clf, calibrators=[calibrator], classes\n =clf.classes_)\n', (11499, 11572), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((11690, 11735), 'numpy.testing.assert_allclose', 'assert_allclose', (['probas', '(1.0 / clf.n_classes_)'], {}), '(probas, 1.0 / clf.n_classes_)\n', (11705, 11735), False, 'from numpy.testing import assert_allclose\n'), ((11851, 11926), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(3 * n_samples)', 'n_features': '(6)', 'random_state': '(42)'}), '(n_samples=3 * n_samples, n_features=6, random_state=42)\n', (11870, 11926), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((12450, 12465), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (12463, 12465), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((12518, 12558), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'cv': '"""prefit"""'}), "(clf, cv='prefit')\n", (12540, 12558), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((13870, 13895), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(7)'}), '(random_state=7)\n', (13879, 13895), False, 'from sklearn.svm import LinearSVC\n'), ((13911, 13975), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': 'method', 'cv': '(3)', 'ensemble': '(False)'}), '(clf, method=method, cv=3, ensemble=False)\n', (13933, 13975), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((14088, 14150), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clf', 'X', 'y'], {'cv': '(3)', 'method': '"""decision_function"""'}), "(clf, X, y, cv=3, method='decision_function')\n", (14105, 14150), False, 'from sklearn.model_selection import KFold, cross_val_predict\n'), ((14486, 14534), 'numpy.testing.assert_allclose', 'assert_allclose', (['cal_probas[:, 1]', 'manual_probas'], {}), '(cal_probas[:, 1], manual_probas)\n', (14501, 14534), False, 'from numpy.testing import assert_allclose\n'), ((14638, 14660), 'numpy.array', 'np.array', (['[5, -4, 1.0]'], {}), '([5, -4, 1.0])\n', (14646, 14660), True, 'import numpy as np\n'), ((14671, 14692), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (14679, 14692), True, 'import numpy as np\n'), ((14774, 14826), 'numpy.array', 'np.array', (['[-0.20261354391187855, 0.6523631498001051]'], {}), '([-0.20261354391187855, 0.6523631498001051])\n', (14782, 14826), True, 'import numpy as np\n'), ((15082, 15129), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lin_prob', 'sk_prob', '(6)'], {}), '(lin_prob, sk_prob, 6)\n', (15107, 15129), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((15413, 15441), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (15421, 15441), True, 'import numpy as np\n'), ((15455, 15495), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2, 0.8, 0.9, 1.0]'], {}), '([0.0, 0.1, 0.2, 0.8, 0.9, 1.0])\n', (15463, 15495), True, 'import numpy as np\n'), ((15521, 15564), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', 'y_pred'], {'n_bins': '(2)'}), '(y_true, y_pred, n_bins=2)\n', (15538, 15564), False, 'from sklearn.calibration import calibration_curve\n'), ((15628, 15691), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true', '(y_pred * 2)'], {'n_bins': '(2)', 'normalize': '(True)'}), '(y_true, y_pred * 2, n_bins=2, normalize=True)\n', (15645, 15691), False, 'from sklearn.calibration import calibration_curve\n'), ((15771, 15809), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_true', '[0, 1]'], {}), '(prob_true, [0, 1])\n', (15790, 15809), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((15814, 15856), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_pred', '[0.1, 0.9]'], {}), '(prob_pred, [0.1, 0.9])\n', (15833, 15856), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((15861, 15915), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_true', 'prob_true_unnormalized'], {}), '(prob_true, prob_true_unnormalized)\n', (15880, 15915), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((15920, 15974), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_pred', 'prob_pred_unnormalized'], {}), '(prob_pred, prob_pred_unnormalized)\n', (15939, 15974), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((16075, 16151), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'calibration_curve', '[1.1]', '[-0.1]'], {'normalize': '(False)'}), '(ValueError, calibration_curve, [1.1], [-0.1], normalize=False)\n', (16088, 16151), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((16228, 16256), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1])\n', (16236, 16256), True, 'import numpy as np\n'), ((16271, 16311), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2, 0.5, 0.9, 1.0]'], {}), '([0.0, 0.1, 0.2, 0.5, 0.9, 1.0])\n', (16279, 16311), True, 'import numpy as np\n'), ((16355, 16421), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_true2', 'y_pred2'], {'n_bins': '(2)', 'strategy': '"""quantile"""'}), "(y_true2, y_pred2, n_bins=2, strategy='quantile')\n", (16372, 16421), False, 'from sklearn.calibration import calibration_curve\n'), ((16538, 16589), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_true_quantile', '[0, 2 / 3]'], {}), '(prob_true_quantile, [0, 2 / 3])\n', (16557, 16589), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((16594, 16645), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['prob_pred_quantile', '[0.1, 0.8]'], {}), '(prob_pred_quantile, [0.1, 0.8])\n', (16613, 16645), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((16718, 16808), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'calibration_curve', 'y_true2', 'y_pred2'], {'strategy': '"""percentile"""'}), "(ValueError, calibration_curve, y_true2, y_pred2, strategy=\n 'percentile')\n", (16731, 16808), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((16978, 17078), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(10)', 'n_features': '(2)', 'n_informative': '(2)', 'n_redundant': '(0)', 'random_state': '(42)'}), '(n_samples=10, n_features=2, n_informative=2,\n n_redundant=0, random_state=42)\n', (16997, 17078), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((17287, 17358), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'cv': '(2)', 'method': '"""isotonic"""', 'ensemble': 'ensemble'}), "(clf, cv=2, method='isotonic', ensemble=ensemble)\n", (17309, 17358), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((17627, 17697), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(10)', 'n_features': '(5)', 'n_classes': 'num_classes'}), '(n_samples=10, n_features=5, n_classes=num_classes)\n', (17646, 17697), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((17739, 17771), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)', 'random_state': '(7)'}), '(C=1.0, random_state=7)\n', (17748, 17771), False, 'from sklearn.svm import LinearSVC\n'), ((18339, 18361), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (18354, 18361), True, 'import numpy as np\n'), ((18370, 18383), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (18379, 18383), True, 'import numpy as np\n'), ((18394, 18426), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)', 'random_state': '(7)'}), '(C=1.0, random_state=7)\n', (18403, 18426), False, 'from sklearn.svm import LinearSVC\n'), ((20830, 20870), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'cv': '"""prefit"""'}), "(clf, cv='prefit')\n", (20852, 20870), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((20957, 21009), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['calib_clf.classes_', 'clf.classes_'], {}), '(calib_clf.classes_, clf.classes_)\n', (20975, 21009), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((21409, 21485), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(10)', 'n_features': '(5)', 'n_classes': '(2)', 'random_state': '(7)'}), '(n_samples=10, n_features=5, n_classes=2, random_state=7)\n', (21428, 21485), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((21584, 21618), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'cv': 'cv'}), '(clf, cv=cv)\n', (21606, 21618), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((2613, 2680), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': 'method', 'cv': '(5)', 'ensemble': 'ensemble'}), '(clf, method=method, cv=5, ensemble=ensemble)\n', (2635, 2680), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((3326, 3397), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['prob_pos_cal_clf', 'prob_pos_cal_clf_relabeled'], {}), '(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)\n', (3351, 3397), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((3658, 3729), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['prob_pos_cal_clf', 'prob_pos_cal_clf_relabeled'], {}), '(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)\n', (3683, 3729), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((4737, 4762), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4750, 4762), False, 'import pytest\n'), ((5108, 5131), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (5129, 5131), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((5161, 5188), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5174, 5188), False, 'import pytest\n'), ((9130, 9152), 'numpy.sum', 'np.sum', (['probas'], {'axis': '(1)'}), '(probas, axis=1)\n', (9136, 9152), True, 'import numpy as np\n'), ((12568, 12597), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (12581, 12597), False, 'import pytest\n'), ((14215, 14255), 'sklearn.isotonic.IsotonicRegression', 'IsotonicRegression', ([], {'out_of_bounds': '"""clip"""'}), "(out_of_bounds='clip')\n", (14233, 14255), False, 'from sklearn.isotonic import IsotonicRegression\n'), ((14287, 14308), 'sklearn.calibration._SigmoidCalibration', '_SigmoidCalibration', ([], {}), '()\n', (14306, 14308), False, 'from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration\n'), ((14903, 14933), 'sklearn.calibration._sigmoid_calibration', '_sigmoid_calibration', (['exF', 'exY'], {}), '(exF, exY)\n', (14923, 14933), False, 'from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration\n'), ((15297, 15318), 'numpy.vstack', 'np.vstack', (['(exF, exF)'], {}), '((exF, exF))\n', (15306, 15318), True, 'import numpy as np\n'), ((17995, 18018), 'numpy.ones', 'np.ones', (['probs.shape[0]'], {}), '(probs.shape[0])\n', (18002, 18018), True, 'import numpy as np\n'), ((21080, 21120), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': 'msg'}), '(AttributeError, match=msg)\n', (21093, 21120), False, 'import pytest\n'), ((21675, 21727), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['calib_clf.classes_', 'clf.classes_'], {}), '(calib_clf.classes_, clf.classes_)\n', (21693, 21727), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((21857, 21904), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['calib_clf.classes_', 'classes'], {}), '(calib_clf.classes_, classes)\n', (21875, 21904), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((22243, 22270), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (22255, 22270), False, 'import pytest\n'), ((1812, 1842), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (1833, 1842), True, 'import numpy as np\n'), ((2128, 2143), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (2141, 2143), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((2499, 2525), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X_train'], {}), '(X_train)\n', (2516, 2525), False, 'from scipy import sparse\n'), ((2566, 2591), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X_test'], {}), '(X_test)\n', (2583, 2591), False, 'from scipy import sparse\n'), ((3004, 3042), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'prob_pos_clf'], {}), '(y_test, prob_pos_clf)\n', (3020, 3042), False, 'from sklearn.metrics import brier_score_loss\n'), ((3061, 3103), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'prob_pos_cal_clf'], {}), '(y_test, prob_pos_cal_clf)\n', (3077, 3103), False, 'from sklearn.metrics import brier_score_loss\n'), ((3993, 4068), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['prob_pos_cal_clf', '(1 - prob_pos_cal_clf_relabeled)'], {}), '(prob_pos_cal_clf, 1 - prob_pos_cal_clf_relabeled)\n', (4018, 4068), False, 'from sklearn.utils._testing import assert_array_almost_equal, assert_almost_equal, assert_array_equal, assert_raises, ignore_warnings\n'), ((6242, 6272), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (6263, 6272), True, 'import numpy as np\n'), ((8223, 8240), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (8229, 8240), True, 'import numpy as np\n'), ((8264, 8300), 'numpy.sum', 'np.sum', (['((Y_onehot - proba_pred) ** 2)'], {}), '((Y_onehot - proba_pred) ** 2)\n', (8270, 8300), True, 'import numpy as np\n'), ((8772, 8784), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (8781, 8784), True, 'import numpy as np\n'), ((11251, 11271), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (11259, 11271), True, 'import numpy as np\n'), ((11402, 11419), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {}), '()\n', (11417, 11419), False, 'from sklearn.dummy import DummyClassifier\n'), ((11978, 12008), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (11999, 12008), True, 'import numpy as np\n'), ((12863, 12889), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X_calib'], {}), '(X_calib)\n', (12880, 12889), False, 'from scipy import sparse\n'), ((12930, 12955), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['X_test'], {}), '(X_test)\n', (12947, 12955), False, 'from scipy import sparse\n'), ((13028, 13083), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': 'method', 'cv': '"""prefit"""'}), "(clf, method=method, cv='prefit')\n", (13050, 13083), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((14964, 15013), 'numpy.exp', 'np.exp', (['(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1])'], {}), '(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1])\n', (14970, 15013), True, 'import numpy as np\n'), ((15252, 15273), 'sklearn.calibration._SigmoidCalibration', '_SigmoidCalibration', ([], {}), '()\n', (15271, 15273), False, 'from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration\n'), ((17845, 17858), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (17856, 17858), False, 'from sklearn.model_selection import LeaveOneOut, train_test_split\n'), ((18499, 18512), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (18510, 18512), False, 'from sklearn.model_selection import LeaveOneOut, train_test_split\n'), ((18920, 18944), 'numpy.all', 'np.all', (['(proba[:, :i] > 0)'], {}), '(proba[:, :i] > 0)\n', (18926, 18944), True, 'import numpy as np\n'), ((18964, 18992), 'numpy.all', 'np.all', (['(proba[:, i + 1:] > 0)'], {}), '(proba[:, i + 1:] > 0)\n', (18970, 18992), True, 'import numpy as np\n'), ((19074, 19112), 'numpy.allclose', 'np.allclose', (['proba', '(1 / proba.shape[0])'], {}), '(proba, 1 / proba.shape[0])\n', (19085, 19112), True, 'import numpy as np\n'), ((19636, 19648), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (19645, 19648), True, 'import numpy as np\n'), ((21212, 21226), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1)'}), '(C=1)\n', (21221, 21226), False, 'from sklearn.svm import LinearSVC\n'), ((21249, 21263), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1)'}), '(C=1)\n', (21258, 21263), False, 'from sklearn.svm import LinearSVC\n'), ((22194, 22222), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', ([], {'cv': '(2)'}), '(cv=2)\n', (22216, 22222), False, 'from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\n'), ((4259, 4297), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'prob_pos_clf'], {}), '(y_test, prob_pos_clf)\n', (4275, 4297), False, 'from sklearn.metrics import brier_score_loss\n'), ((4320, 4382), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['((y_test + 1) % 2)', 'prob_pos_cal_clf_relabeled'], {}), '((y_test + 1) % 2, prob_pos_cal_clf_relabeled)\n', (4336, 4382), False, 'from sklearn.metrics import brier_score_loss\n'), ((17199, 17214), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (17212, 17214), False, 'from sklearn.impute import SimpleImputer\n'), ((17233, 17271), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1)'}), '(n_estimators=1)\n', (17255, 17271), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((19187, 19212), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (19208, 19212), True, 'import numpy as np\n'), ((19261, 19286), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (19282, 19286), True, 'import numpy as np\n'), ((20381, 20397), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (20395, 20397), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((20416, 20440), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (20438, 20440), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((13504, 13542), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'prob_pos_clf'], {}), '(y_test, prob_pos_clf)\n', (13520, 13542), False, 'from sklearn.metrics import brier_score_loss\n'), ((13569, 13611), 'sklearn.metrics.brier_score_loss', 'brier_score_loss', (['y_test', 'prob_pos_cal_clf'], {}), '(y_test, prob_pos_cal_clf)\n', (13585, 13611), False, 'from sklearn.metrics import brier_score_loss\n'), ((15029, 15050), 'sklearn.calibration._SigmoidCalibration', '_SigmoidCalibration', ([], {}), '()\n', (15048, 15050), False, 'from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration\n'), ((21818, 21832), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (21830, 21832), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((13434, 13450), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (13442, 13450), True, 'import numpy as np\n'), ((13451, 13476), 'numpy.argmax', 'np.argmax', (['y_prob'], {'axis': '(1)'}), '(y_prob, axis=1)\n', (13460, 13476), True, 'import numpy as np\n')] |
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from cv2 import *
import cv2
import threading
import time
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget,QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout
from src.MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet
import imutils
import src.tools_matrix as tools
import numpy as np
from queue import Queue
import tensorflow as tf
import os
import src.facenet
import pickle
import random
import threading
import src.validate_twopics as vt
img_w_dis = 150
img_h_dis = 150
click_lock = threading.Lock()
class MyWindow(QMainWindow):
VIDEO_TYPE_OFFLINE = 0
VIDEO_TYPE_REAL_TIME = 1
STATUS_INIT = 0
STATUS_PLAYING = 1
STATUS_PAUSE = 2
video_url = ""
progress = 0
def __init__(self, video_url="", video_type=VIDEO_TYPE_OFFLINE, auto_play=False):
super(MyWindow, self).__init__()
self.resize(1024, 768)
self.setWindowTitle("奥卫科技-人脸识别")
self.createGridGroupBox_UnRecognize()
self.createGridGroupBox_Recognized()
self.createGridGroupBox_Video()
self.createGridGroupBox_RecognizedDetail()
main_layout = QGridLayout()
# self.createUI()
self.threadId = 0
main_layout.addWidget(self.gridGroupBox_UnRecognize, 0, 0)
main_layout.addWidget(self.gridGroupBox_Video, 0, 1)
main_layout.addWidget(self.gridGroupBox_RecognizeDetail, 0, 2)
main_layout.addWidget(self.gridGroupBox_Recognize, 1, 0, 1, 3)
self.gridGroupBox = QGroupBox()
self.gridGroupBox.setLayout(main_layout)
self.setCentralWidget(self.gridGroupBox)
self.cameraConfig = cameraConfigDia()
self.createStatusbar()
self.createMenu()
self.pre = 0.0
self.img_stack = []
self.threshold = [0.8, 0.8, 0.9]
self.video_url = video_url
self.video_type = video_type # 0: offline 1: realTime
self.auto_play = auto_play
self.status = self.STATUS_INIT # 0: init 1:playing 2: pause
self.timer = VideoTimer()
self.timer.timeSignal.signal[str].connect(self.show_video_images)
# video 初始设置
self.playCapture = VideoCapture()
if self.video_url != "":
self.set_timer_fps()
if self.auto_play:
self.switch_video()
self.thread2 = threading.Thread(target=self.update_timer)
self.thread2.setDaemon(True)
self.thread2.start()
self.q_thread = Queue()
def closeEvent(self, event):
sys.exit(app.exec_())
self.cameraConfig.exec()
def createUI(self):
self.setCentralWidget(self.gridGroupBox)
def createGridGroupBox_RecognizedDetail(self):
init_orig_image = QPixmap("../data/sample.png").scaled(img_w_dis*2, img_w_dis*2)
self.imgeLabel_1 = QLabel()
self.imgeLabel_1.setPixmap(init_orig_image)
self.imgeLabel_sample = QLabel("样本图像")
layout = QGridLayout()
layout.addWidget(self.imgeLabel_1, 0, 0)
layout.addWidget(self.imgeLabel_sample, 1, 0)
self.gridGroupBox_RecognizeDetail = QGroupBox("详细信息")
self.gridGroupBox_RecognizeDetail.setLayout(layout)
def createGridGroupBox_Recognized(self):
self.q_recognize = Queue()
layout = QGridLayout()
init_image = QPixmap("../data/loading.jpg").scaled(img_w_dis, img_w_dis)
for i in range(0, 2):
for j in range(0, 6):
vboxGroupBox = QGroupBox()
layoutbox = QVBoxLayout()
layoutbox.setObjectName("boxlayout")
imgeLabel_0 = QLabel()
imgeLabel_0.setObjectName("image")
imgeLabel_0.setPixmap(init_image)
imgeLabel_name = QPushButton("姓名")
imgeLabel_name.setObjectName("name")
imgeLabel_id = QLabel("学号")
imgeLabel_id.setObjectName("id")
imgeLabel_rate = QLabel("识别率")
imgeLabel_rate.setObjectName("rate")
layoutbox.addWidget(imgeLabel_0)
layoutbox.addWidget(imgeLabel_name)
layoutbox.addWidget(imgeLabel_id)
layoutbox.addWidget(imgeLabel_rate)
vboxGroupBox.setLayout(layoutbox)
imgeLabel_name.clicked.connect(self.detailDisplay)
layout.addWidget(vboxGroupBox, i, j)
self.q_recognize.put(vboxGroupBox)
self.gridGroupBox_Recognize = QGroupBox("已识别")
self.gridGroupBox_Recognize.setLayout(layout)
def createGridGroupBox_UnRecognize(self):
init_image = QPixmap("../data/loading.jpg").scaled(img_w_dis, img_h_dis)
layout = QGridLayout()
self.q_unrecognize = Queue()
for i in range(0, 2):
for j in range(0, 4):
vboxGroupBox = QGroupBox()
layoutbox = QVBoxLayout()
layoutbox.setObjectName("boxlayout")
imgeLabel_0 = QLabel()
imgeLabel_0.setPixmap(init_image)
imgeLabel_0.setObjectName("image")
layoutbox.addWidget(imgeLabel_0)
vboxGroupBox.setLayout(layoutbox)
layout.addWidget(vboxGroupBox, j, i)
self.q_unrecognize.put(vboxGroupBox)
self.gridGroupBox_UnRecognize = QGroupBox("待识别")
self.gridGroupBox_UnRecognize.setLayout(layout)
def createGridGroupBox_Video(self):
self.gridGroupBox_Video = QGroupBox("video")
layout = QGridLayout()
layout.setSpacing(10)
self.pictureLabel = QLabel()
init_image = QPixmap("../data/loading.jpg").scaled(640, 480)
self.pictureLabel.setPixmap(init_image)
self.playButton = QPushButton()
self.playButton.setEnabled(True)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.switch_video)
layout.addWidget(self.pictureLabel, 0, 0, 1, 2)
layout.addWidget(self.playButton, 1, 0, 1, 1)
self.gridGroupBox_Video.setLayout(layout)
# 状态栏
def createStatusbar(self):
self.statusBar().showMessage('状态栏...')
# 菜单栏
def createMenu(self):
# menubar = QMenuBar(self)
menubar = self.menuBar()
menu = menubar.addMenu("系统控制(F)")
menu.addAction(QAction(QIcon("ico_open_16_16.jpg"), "打开东门", self, triggered=qApp.quit))
menu.addAction(QAction(QIcon("ico_save_16_16.jpg"), "打开西门", self, triggered=qApp.quit))
menu.addSeparator()
menu.addAction(
QAction(QIcon("ico_close_16_16.jpg"), "关闭", self, triggered=qApp.quit))
menu = menubar.addMenu("设置(E)")
cameraSetting = QAction('摄像头设置', self)
cameraSetting.setStatusTip('摄像头设置')
cameraSetting.triggered.connect(self.cameraConfig.show)
menu.addAction(cameraSetting)
menu = menubar.addMenu("帮助(H)")
menu.addAction('关于', lambda: QMessageBox.about(self, '关于', '奥卫科技'), Qt.CTRL + Qt.Key_Q) # 注意快捷键
def initFacenet(self):
with tf.Graph().as_default():
self.sess = tf.Session()
facenet.load_model('models/20180408-102900',session =self.sess)
self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self.classifier_filename_exp = os.path.expanduser('2018zhongzhuanv2.pkl')
def initNet(self, Pnet, Rnet, Onet, lock):
self.Pnet = Pnet
self.Rnet = Rnet
self.Onet = Onet
self.lock = lock
def reset(self):
self.timer.stop()
self.playCapture.release()
self.status = MyWindow.STATUS_INIT
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
def set_timer_fps(self):
self.playCapture.open(self.video_url)
fps = self.playCapture.get(CAP_PROP_FPS)
self.timer.set_fps(fps)
self.playCapture.release()
def set_video(self, url, video_type=VIDEO_TYPE_OFFLINE, auto_play=False):
self.reset()
self.video_url = url
self.video_type = video_type
self.auto_play = auto_play
self.set_timer_fps()
if self.auto_play:
self.switch_video()
def detailDisplay(self):
# imageLabel_name = box.findChild(QLabel, "name")
click_lock.acquire()
box = self.sender()
text = box.text()
box.update()
text = box.text()
print("detailDisplay text is : %s" % text)
self.imgeLabel_sample.setText(text)
dir = '../data/zhongzhuan' #dir + '/' + file + '_0.png'
files = os.listdir(dir)
for file in files:
if file in text:
init_orig_image = QPixmap(dir + '/' + file + '/'+file+'.png').scaled(img_w_dis * 2, img_w_dis * 2)
self.imgeLabel_1.setPixmap(init_orig_image)
click_lock.release()
def play(self):
if self.video_url == "" or self.video_url is None:
return
if not self.playCapture.isOpened():
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
self.status = MyWindow.STATUS_PLAYING
def stop(self):
if self.video_url == "" or self.video_url is None:
return
if self.playCapture.isOpened():
self.timer.stop()
if self.video_type is MyWindow.VIDEO_TYPE_REAL_TIME:
self.playCapture.release()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.status = MyWindow.STATUS_PAUSE
def re_play(self):
if self.video_url == "" or self.video_url is None:
return
self.playCapture.release()
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
self.status = MyWindow.STATUS_PLAYING
def show_video_images(self):
if self.playCapture.isOpened():
success, frame = self.playCapture.read()
if success:
#frame = imutils.resize(frame, width=1000)
frame = imutils.resize(frame)
now = time.time()
if now - self.pre > 0.3:
self.thread_it(self.music, frame)
self.pre = now
start = time.time()
#cv2.imwrite('temp/' + str(time.time()) + '.jpg', frame)
height, width = frame.shape[:2]
if frame.ndim == 3:
rgb = cvtColor(frame, COLOR_BGR2RGB)
elif frame.ndim == 2:
rgb = cvtColor(frame, COLOR_GRAY2BGR)
temp_image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
temp_pixmap = QPixmap.fromImage(temp_image).scaled(640, 480)
self.pictureLabel.setPixmap(temp_pixmap)
else:
print("read failed, no frame data")
success, frame = self.playCapture.read()
if not success and self.video_type is MyWindow.VIDEO_TYPE_OFFLINE:
print("play finished") # 判断本地文件播放完毕
self.reset()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))
return
else:
print("open file or capturing device error, init again")
self.reset()
def update_timer(self):
while (True):
if self.status is MyWindow.STATUS_PLAYING:
self.progress = self.progress + 1
time.sleep(0.4)
if (self.progress == 15):
self.progress = 0
def switch_video(self):
if self.video_url == "" or self.video_url is None:
return
if self.status is MyWindow.STATUS_INIT:
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
elif self.status is MyWindow.STATUS_PLAYING:
self.timer.stop()
if self.video_type is MyWindow.VIDEO_TYPE_REAL_TIME:
self.playCapture.release()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
elif self.status is MyWindow.STATUS_PAUSE:
if self.video_type is MyWindow.VIDEO_TYPE_REAL_TIME:
self.playCapture.open(self.video_url)
self.timer.start()
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
self.status = (MyWindow.STATUS_PLAYING,
MyWindow.STATUS_PAUSE,
MyWindow.STATUS_PLAYING)[self.status]
def detectFace(self, img,threshold):
caffe_img = (img.copy() - 127.5) / 127.5
origin_h, origin_w, ch = caffe_img.shape
scales = tools.calculateScales(img)
out = []
t0 = time.time()
# del scales[:4]
for scale in scales:
hs = int(origin_h * scale)
ws = int(origin_w * scale)
scale_img = cv2.resize(caffe_img, (ws, hs))
input = scale_img.reshape(1, *scale_img.shape)
ouput = self.Pnet.predict(input) # .transpose(0,2,1,3) should add, but seems after process is wrong then.
out.append(ouput)
image_num = len(scales)
rectangles = []
for i in range(image_num):
cls_prob = out[i][0][0][:, :, 1] # i = #scale, first 0 select cls score, second 0 = batchnum, alway=0. 1 one hot repr
roi = out[i][1][0]
out_h, out_w = cls_prob.shape
out_side = out_w
if out_h > out_w:
out_side = out_h
# out_side = max(out_h, out_w)
# print('calculating img scale #:', i)
cls_prob = np.swapaxes(cls_prob, 0, 1)
roi = np.swapaxes(roi, 0, 2)
rectangle = tools.detect_face_12net(cls_prob, roi, out_side, 1 / scales[i], origin_w, origin_h,
threshold[0])
rectangles.extend(rectangle)
rectangles = tools.NMS(rectangles, 0.85, 'iou')
t1 = time.time()
print('time for 12 net is: ', t1 - t0)
if len(rectangles) == 0:
return rectangles
crop_number = 0
out = []
predict_24_batch = []
for rectangle in rectangles:
crop_img = caffe_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
scale_img = cv2.resize(crop_img, (24, 24))
predict_24_batch.append(scale_img)
crop_number += 1
predict_24_batch = np.array(predict_24_batch)
out = self.Rnet.predict(predict_24_batch)
cls_prob = out[0] # first 0 is to select cls, second batch number, always =0
cls_prob = np.array(cls_prob) # convert to numpy
roi_prob = out[1] # first 0 is to select roi, second batch number, always =0
roi_prob = np.array(roi_prob)
rectangles = tools.filter_face_24net(cls_prob, roi_prob, rectangles, origin_w, origin_h, threshold[1])
t2 = time.time()
print('time for 24 net is: ', t2 - t1)
if len(rectangles) == 0:
return rectangles
crop_number = 0
predict_batch = []
for rectangle in rectangles:
crop_img = caffe_img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
scale_img = cv2.resize(crop_img, (48, 48))
predict_batch.append(scale_img)
crop_number += 1
predict_batch = np.array(predict_batch)
output = self.Onet.predict(predict_batch)
cls_prob = output[0]
roi_prob = output[1]
pts_prob = output[2] # index
rectangles = tools.filter_face_48net(cls_prob, roi_prob, pts_prob, rectangles, origin_w, origin_h, threshold[2])
t3 = time.time()
print('time for 48 net is: ', t3 - t2)
return rectangles
def prewhiten(self, x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
def crop(self, image, random_crop, image_size):
image = cv2.resize(image, (160, 160))
return image
def flip(self, image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
# to identify one pic
def recognizeFace(self, image):
embedding_size = self.embeddings.get_shape()[1]
emb_array = np.zeros((1, embedding_size))
images = np.zeros((1, 160, 160, 3))
img = self.prewhiten(image)
img = self.crop(img, False, 160)
img = self.flip(img, False)
images[0, :, :, :] = img
feed_dict = {self.images_placeholder: images, self.phase_train_placeholder: False}
emb_array[0:1, :] = self.sess.run(self.embeddings, feed_dict=feed_dict)
print('Testing classifier')
with open(self.classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
print('Loaded classifier model from file "%s"' % self.classifier_filename_exp)
predictions = model.predict_proba(emb_array)
print("czg predictions")
print(predictions)
best_class_indices = np.argmax(predictions, axis=1)
# best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
resultText = class_names[best_class_indices[0]]
best_class_indices = np.argmax(predictions, axis=1)
print(best_class_indices)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
print(best_class_probabilities)
print("czg resultText is : %s" % resultText)
return resultText,best_class_probabilities
# self.textbox.setText(resultText)
def rectangleDraw(self, rectangles, img):
draw = img.copy()
for rectangle in rectangles:
if rectangle is not None:
W = -int(rectangle[0]) + int(rectangle[2])
H = -int(rectangle[1]) + int(rectangle[3])
paddingH = 0.01 * W
paddingW = 0.02 * H
crop_img = img[int(rectangle[1] + paddingH):int(rectangle[3] - paddingH),
int(rectangle[0] - paddingW):int(rectangle[2] + paddingW)]
crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)
if crop_img is None:
continue
if crop_img.shape[0] < 0 or crop_img.shape[1] < 0:
continue
cv2.rectangle(draw, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])),
(255, 0, 0), 1)
crop_img = imutils.resize(crop_img, width=100)
height, width = crop_img.shape[:2]
temp_image = QImage(crop_img.flatten(), width, height, QImage.Format_RGB888)
temp_pixmap = QPixmap.fromImage(temp_image).scaled(img_w_dis, img_w_dis)
# 加消息队列线程实现图片更新
item = self.q_unrecognize.get()
imageLabel_img = item.findChild(QLabel, "image")
imageLabel_img.setPixmap(temp_pixmap)
self.q_unrecognize.put(item)
return draw
def temp_recongize(self, crop_img):
if not self.img_stack:
self.img_stack.append(crop_img)
rec_name, best_class_probabilities = self.recognizeFace(
imutils.resize(crop_img, width=160)) # czg 调用facenet脸识别
crop_img = imutils.resize(crop_img, width=100)
height, width = crop_img.shape[:2]
temp_image = QImage(crop_img.flatten(), width, height, QImage.Format_RGB888)
temp_pixmap = QPixmap.fromImage(temp_image).scaled(img_w_dis, img_w_dis)
# 加消息队列线程实现图片更新
# self.imgeLabel_1.setPixmap(temp_pixmap)
item = self.q_recognize.get()
imageLabel_img = item.findChild(QLabel, "image")
imageLabel_img.setPixmap(temp_pixmap)
imageLabel_name = item.findChild(QPushButton, "name")
imageLabel_name.setText(rec_name)
imageLabel_id = item.findChild(QLabel, "id")
imageLabel_id.setText(rec_name)
imageLabel_rate = item.findChild(QLabel, "rate")
rec_rate = random.randint(70, 96) / 100;
imageLabel_rate.setText(str(rec_rate))
self.q_recognize.put(item)
else:
pic_temp = self.img_stack.pop()
vt_result = vt.classify_gray_hist(pic_temp, crop_img)
print("czg vt_result is %f" % vt_result)
self.img_stack.append(crop_img)
if vt_result < 0.65:
rec_name, best_class_probabilities = self.recognizeFace(
imutils.resize(crop_img, width=160)) # czg 调用facenet脸识别
if best_class_probabilities[0] < 0.0095:
crop_img = imutils.resize(crop_img, width=100)
height, width = crop_img.shape[:2]
temp_image = QImage(crop_img.flatten(), width, height, QImage.Format_RGB888)
temp_pixmap = QPixmap.fromImage(temp_image).scaled(img_w_dis, img_w_dis)
# 加消息队列线程实现图片更新
# self.imgeLabel_1.setPixmap(temp_pixmap)
item = self.q_recognize.get()
# layoutbox = item.findChild(QVBoxLayout, "boxlayout")
# layoutbox.removeWidget(QLabel)
imageLabel_img = item.findChild(QLabel, "image")
imageLabel_img.setPixmap(temp_pixmap)
imageLabel_name = item.findChild(QPushButton, "name")
imageLabel_name.setText(rec_name)
imageLabel_id = item.findChild(QLabel, "id")
imageLabel_id.setText(rec_name)
imageLabel_rate = item.findChild(QLabel, "rate")
# rec_rate = random.randint(70, 96)/100;
imageLabel_rate.setText(str(round((0.03 - best_class_probabilities[0]) / 0.03, 2)))
self.q_recognize.put(item)
# 逻辑:播放识别
def music(self, frame):
#cv2.imwrite('temp/' + str(time.time()) + '.jpg', frame)
#self.lock.acquire()
rectangles = self.detectFace(frame, self.threshold)
frame = self.rectangleDraw(rectangles, frame)
#self.lock.release()
# 打包进线程(耗时的操作)
@staticmethod
def thread_it(func, *args):
t = threading.Thread(target=func, args=args)
t.setDaemon(True) # 守护
t.start() # 启动
#t.join() # 阻塞--会卡死界面!
class cameraConfigDia(QDialog):
def __init__(self):
super(cameraConfigDia, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("摄像头设置")
self.setGeometry(400, 400, 600, 560)
self.label_E = QLabel("东门IP")
self.fileLineEdit_E_IP = QLineEdit()
self.label_E_Threshold = QLabel("阈值")
self.fileLineEdit_E_Threshold = QLineEdit()
self.label_W = QLabel("西门IP")
self.fileLineEdit_W_IP = QLineEdit()
self.label_W_Threshold = QLabel("阈值")
self.fileLineEdit_W_Threshold = QLineEdit()
self.confirm = QPushButton("确认")
self.confirm.setEnabled(True)
self.confirm.clicked.connect(self.settingInfo)
self.mainLayout = QGridLayout()
self.mainLayout.addWidget(self.label_E, 0, 0)
self.mainLayout.addWidget(self.fileLineEdit_E_IP, 0, 1)
self.mainLayout.addWidget(self.label_E_Threshold, 0, 2)
self.mainLayout.addWidget(self.fileLineEdit_E_Threshold, 0, 3)
self.mainLayout.addWidget(self.label_W, 1, 0)
self.mainLayout.addWidget(self.fileLineEdit_W_IP, 1, 1)
self.mainLayout.addWidget(self.label_W_Threshold, 1, 2)
self.mainLayout.addWidget(self.fileLineEdit_W_Threshold, 1, 3)
self.mainLayout.addWidget(self.confirm, 2, 0)
self.setLayout(self.mainLayout)
def settingInfo(self):
value_E_IP = self.fileLineEdit_E_IP.displayText()
value_E_Threshold = self.fileLineEdit_E_Threshold.displayText()
value_W_IP = self.fileLineEdit_W_IP.displayText()
value_W_Threshold = self.fileLineEdit_W_Threshold.displayText()
print("E IP : " + value_E_IP)
print("E Threshold : " + value_E_Threshold)
print("W IP : " + value_W_IP)
print("E Threshold : " + value_W_Threshold)
class Communicate(QObject):
signal = pyqtSignal(str)
class VideoTimer(QThread):
def __init__(self, frequent=20):
QThread.__init__(self)
self.stopped = False
self.frequent = frequent
self.timeSignal = Communicate()
self.mutex = QMutex()
def run(self):
with QMutexLocker(self.mutex):
self.stopped = False
while True:
if self.stopped:
return
self.timeSignal.signal.emit("1")
time.sleep(1 / self.frequent)
def stop(self):
with QMutexLocker(self.mutex):
self.stopped = True
def is_stopped(self):
with QMutexLocker(self.mutex):
return self.stopped
def set_fps(self, fps):
self.frequent = fps
def loadNet():
global Pnet, Rnet, Onet
Pnet = create_Kao_Pnet(r'12net.h5')
Rnet = create_Kao_Rnet(r'24net.h5')
Onet = create_Kao_Onet(r'48net.h5') # will not work. caffe and TF incompatible
img = cv2.imread('../data/loading.jpg')
scale_img = cv2.resize(img, (100, 100))
input = scale_img.reshape(1, *scale_img.shape)
Pnet.predict(input)
img = cv2.imread('../data/loading.jpg')
scale_img = cv2.resize(img, (24, 24))
input = scale_img.reshape(1, *scale_img.shape)
Rnet.predict(input)
img = cv2.imread('../data/loading.jpg')
scale_img = cv2.resize(img, (48, 48))
input = scale_img.reshape(1, *scale_img.shape)
Onet.predict(input)
return Pnet, Rnet, Onet
if __name__ == "__main__":
app = QApplication(sys.argv)
splash = QSplashScreen(QPixmap("../data/loading.jpg"))
splash.showMessage("加载... 0%", Qt.AlignHCenter | Qt.AlignBottom, Qt.black)
splash.show()
Pnet, Rnet, Onet = loadNet()
mw = MyWindow()
lock = threading.Lock()
mw.initNet(Pnet, Rnet, Onet, lock)
mw.initFacenet()
mw.set_video("east.mp4", MyWindow.VIDEO_TYPE_OFFLINE, False)
mw.show()
splash.finish(mw)
sys.exit(app.exec_())
| [
"numpy.sqrt",
"src.tools_matrix.calculateScales",
"PyQt5.QtGui.QPixmap.fromImage",
"src.MTCNN.create_Kao_Onet",
"time.sleep",
"numpy.array",
"PyQt5.QtWidgets.QApplication",
"src.tools_matrix.detect_face_12net",
"PyQt5.QtWidgets.QVBoxLayout",
"src.tools_matrix.filter_face_48net",
"numpy.mean",
... | [((660, 676), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (674, 676), False, 'import threading\n'), ((25969, 25996), 'src.MTCNN.create_Kao_Pnet', 'create_Kao_Pnet', (['"""12net.h5"""'], {}), "('12net.h5')\n", (25984, 25996), False, 'from src.MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet\n'), ((26009, 26036), 'src.MTCNN.create_Kao_Rnet', 'create_Kao_Rnet', (['"""24net.h5"""'], {}), "('24net.h5')\n", (26024, 26036), False, 'from src.MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet\n'), ((26049, 26076), 'src.MTCNN.create_Kao_Onet', 'create_Kao_Onet', (['"""48net.h5"""'], {}), "('48net.h5')\n", (26064, 26076), False, 'from src.MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet\n'), ((26132, 26165), 'cv2.imread', 'cv2.imread', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (26142, 26165), False, 'import cv2\n'), ((26182, 26209), 'cv2.resize', 'cv2.resize', (['img', '(100, 100)'], {}), '(img, (100, 100))\n', (26192, 26209), False, 'import cv2\n'), ((26295, 26328), 'cv2.imread', 'cv2.imread', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (26305, 26328), False, 'import cv2\n'), ((26345, 26370), 'cv2.resize', 'cv2.resize', (['img', '(24, 24)'], {}), '(img, (24, 24))\n', (26355, 26370), False, 'import cv2\n'), ((26456, 26489), 'cv2.imread', 'cv2.imread', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (26466, 26489), False, 'import cv2\n'), ((26506, 26531), 'cv2.resize', 'cv2.resize', (['img', '(48, 48)'], {}), '(img, (48, 48))\n', (26516, 26531), False, 'import cv2\n'), ((26673, 26695), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (26685, 26695), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((26916, 26932), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (26930, 26932), False, 'import threading\n'), ((1269, 1282), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1280, 1282), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((1634, 1645), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', ([], {}), '()\n', (1643, 1645), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((2469, 2511), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.update_timer'}), '(target=self.update_timer)\n', (2485, 2511), False, 'import threading\n'), ((2602, 2609), 'queue.Queue', 'Queue', ([], {}), '()\n', (2607, 2609), False, 'from queue import Queue\n'), ((2949, 2957), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (2955, 2957), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3042, 3056), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""样本图像"""'], {}), "('样本图像')\n", (3048, 3056), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3074, 3087), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (3085, 3087), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3235, 3252), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""详细信息"""'], {}), "('详细信息')\n", (3244, 3252), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3388, 3395), 'queue.Queue', 'Queue', ([], {}), '()\n', (3393, 3395), False, 'from queue import Queue\n'), ((3413, 3426), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (3424, 3426), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((4610, 4626), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""已识别"""'], {}), "('已识别')\n", (4619, 4626), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((4826, 4839), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (4837, 4839), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((4869, 4876), 'queue.Queue', 'Queue', ([], {}), '()\n', (4874, 4876), False, 'from queue import Queue\n'), ((5464, 5480), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""待识别"""'], {}), "('待识别')\n", (5473, 5480), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5612, 5630), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['"""video"""'], {}), "('video')\n", (5621, 5630), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5649, 5662), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (5660, 5662), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5721, 5729), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (5727, 5729), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5873, 5886), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ([], {}), '()\n', (5884, 5886), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((8945, 8960), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (8955, 8960), False, 'import os\n'), ((13290, 13316), 'src.tools_matrix.calculateScales', 'tools.calculateScales', (['img'], {}), '(img)\n', (13311, 13316), True, 'import src.tools_matrix as tools\n'), ((13347, 13358), 'time.time', 'time.time', ([], {}), '()\n', (13356, 13358), False, 'import time\n'), ((14561, 14595), 'src.tools_matrix.NMS', 'tools.NMS', (['rectangles', '(0.85)', '"""iou"""'], {}), "(rectangles, 0.85, 'iou')\n", (14570, 14595), True, 'import src.tools_matrix as tools\n'), ((14610, 14621), 'time.time', 'time.time', ([], {}), '()\n', (14619, 14621), False, 'import time\n'), ((15108, 15134), 'numpy.array', 'np.array', (['predict_24_batch'], {}), '(predict_24_batch)\n', (15116, 15134), True, 'import numpy as np\n'), ((15292, 15310), 'numpy.array', 'np.array', (['cls_prob'], {}), '(cls_prob)\n', (15300, 15310), True, 'import numpy as np\n'), ((15436, 15454), 'numpy.array', 'np.array', (['roi_prob'], {}), '(roi_prob)\n', (15444, 15454), True, 'import numpy as np\n'), ((15476, 15569), 'src.tools_matrix.filter_face_24net', 'tools.filter_face_24net', (['cls_prob', 'roi_prob', 'rectangles', 'origin_w', 'origin_h', 'threshold[1]'], {}), '(cls_prob, roi_prob, rectangles, origin_w, origin_h,\n threshold[1])\n', (15499, 15569), True, 'import src.tools_matrix as tools\n'), ((15579, 15590), 'time.time', 'time.time', ([], {}), '()\n', (15588, 15590), False, 'import time\n'), ((16050, 16073), 'numpy.array', 'np.array', (['predict_batch'], {}), '(predict_batch)\n', (16058, 16073), True, 'import numpy as np\n'), ((16242, 16345), 'src.tools_matrix.filter_face_48net', 'tools.filter_face_48net', (['cls_prob', 'roi_prob', 'pts_prob', 'rectangles', 'origin_w', 'origin_h', 'threshold[2]'], {}), '(cls_prob, roi_prob, pts_prob, rectangles, origin_w,\n origin_h, threshold[2])\n', (16265, 16345), True, 'import src.tools_matrix as tools\n'), ((16355, 16366), 'time.time', 'time.time', ([], {}), '()\n', (16364, 16366), False, 'import time\n'), ((16484, 16494), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (16491, 16494), True, 'import numpy as np\n'), ((16509, 16518), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (16515, 16518), True, 'import numpy as np\n'), ((16721, 16750), 'cv2.resize', 'cv2.resize', (['image', '(160, 160)'], {}), '(image, (160, 160))\n', (16731, 16750), False, 'import cv2\n'), ((17073, 17102), 'numpy.zeros', 'np.zeros', (['(1, embedding_size)'], {}), '((1, embedding_size))\n', (17081, 17102), True, 'import numpy as np\n'), ((17120, 17146), 'numpy.zeros', 'np.zeros', (['(1, 160, 160, 3)'], {}), '((1, 160, 160, 3))\n', (17128, 17146), True, 'import numpy as np\n'), ((17851, 17881), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (17860, 17881), True, 'import numpy as np\n'), ((18072, 18102), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (18081, 18102), True, 'import numpy as np\n'), ((23149, 23189), 'threading.Thread', 'threading.Thread', ([], {'target': 'func', 'args': 'args'}), '(target=func, args=args)\n', (23165, 23189), False, 'import threading\n'), ((23542, 23556), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""东门IP"""'], {}), "('东门IP')\n", (23548, 23556), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((23635, 23647), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""阈值"""'], {}), "('阈值')\n", (23641, 23647), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((23724, 23738), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""西门IP"""'], {}), "('西门IP')\n", (23730, 23738), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((23817, 23829), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""阈值"""'], {}), "('阈值')\n", (23823, 23829), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((23906, 23923), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""确认"""'], {}), "('确认')\n", (23917, 23923), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((24043, 24056), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (24054, 24056), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((26723, 26753), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (26730, 26753), False, 'from PyQt5.QtGui import QPixmap\n'), ((7262, 7274), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7272, 7274), True, 'import tensorflow as tf\n'), ((7675, 7717), 'os.path.expanduser', 'os.path.expanduser', (['"""2018zhongzhuanv2.pkl"""'], {}), "('2018zhongzhuanv2.pkl')\n", (7693, 7717), False, 'import os\n'), ((13516, 13547), 'cv2.resize', 'cv2.resize', (['caffe_img', '(ws, hs)'], {}), '(caffe_img, (ws, hs))\n', (13526, 13547), False, 'import cv2\n'), ((14260, 14287), 'numpy.swapaxes', 'np.swapaxes', (['cls_prob', '(0)', '(1)'], {}), '(cls_prob, 0, 1)\n', (14271, 14287), True, 'import numpy as np\n'), ((14306, 14328), 'numpy.swapaxes', 'np.swapaxes', (['roi', '(0)', '(2)'], {}), '(roi, 0, 2)\n', (14317, 14328), True, 'import numpy as np\n'), ((14353, 14454), 'src.tools_matrix.detect_face_12net', 'tools.detect_face_12net', (['cls_prob', 'roi', 'out_side', '(1 / scales[i])', 'origin_w', 'origin_h', 'threshold[0]'], {}), '(cls_prob, roi, out_side, 1 / scales[i], origin_w,\n origin_h, threshold[0])\n', (14376, 14454), True, 'import src.tools_matrix as tools\n'), ((14973, 15003), 'cv2.resize', 'cv2.resize', (['crop_img', '(24, 24)'], {}), '(crop_img, (24, 24))\n', (14983, 15003), False, 'import cv2\n'), ((15922, 15952), 'cv2.resize', 'cv2.resize', (['crop_img', '(48, 48)'], {}), '(crop_img, (48, 48))\n', (15932, 15952), False, 'import cv2\n'), ((16600, 16620), 'numpy.subtract', 'np.subtract', (['x', 'mean'], {}), '(x, mean)\n', (16611, 16620), True, 'import numpy as np\n'), ((16840, 16871), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {}), '([True, False])\n', (16856, 16871), True, 'import numpy as np\n'), ((16893, 16909), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (16902, 16909), True, 'import numpy as np\n'), ((17601, 17620), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (17612, 17620), False, 'import pickle\n'), ((20175, 20210), 'imutils.resize', 'imutils.resize', (['crop_img'], {'width': '(100)'}), '(crop_img, width=100)\n', (20189, 20210), False, 'import imutils\n'), ((21166, 21207), 'src.validate_twopics.classify_gray_hist', 'vt.classify_gray_hist', (['pic_temp', 'crop_img'], {}), '(pic_temp, crop_img)\n', (21187, 21207), True, 'import src.validate_twopics as vt\n'), ((25636, 25665), 'time.sleep', 'time.sleep', (['(1 / self.frequent)'], {}), '(1 / self.frequent)\n', (25646, 25665), False, 'import time\n'), ((2859, 2888), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""../data/sample.png"""'], {}), "('../data/sample.png')\n", (2866, 2888), False, 'from PyQt5.QtGui import QPixmap\n'), ((3448, 3478), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (3455, 3478), False, 'from PyQt5.QtGui import QPixmap\n'), ((3604, 3615), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', ([], {}), '()\n', (3613, 3615), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3644, 3657), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (3655, 3657), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3741, 3749), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (3747, 3749), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3884, 3901), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""姓名"""'], {}), "('姓名')\n", (3895, 3901), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((3986, 3998), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""学号"""'], {}), "('学号')\n", (3992, 3998), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((4081, 4094), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""识别率"""'], {}), "('识别率')\n", (4087, 4094), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((4749, 4779), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (4756, 4779), False, 'from PyQt5.QtGui import QPixmap\n'), ((4972, 4983), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', ([], {}), '()\n', (4981, 4983), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5012, 5025), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (5023, 5025), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5109, 5117), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (5115, 5117), False, 'from PyQt5.QtWidgets import QWidget, QSplashScreen, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout\n'), ((5751, 5781), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['"""../data/loading.jpg"""'], {}), "('../data/loading.jpg')\n", (5758, 5781), False, 'from PyQt5.QtGui import QPixmap\n'), ((10544, 10565), 'imutils.resize', 'imutils.resize', (['frame'], {}), '(frame)\n', (10558, 10565), False, 'import imutils\n'), ((10588, 10599), 'time.time', 'time.time', ([], {}), '()\n', (10597, 10599), False, 'import time\n'), ((10754, 10765), 'time.time', 'time.time', ([], {}), '()\n', (10763, 10765), False, 'import time\n'), ((11997, 12012), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (12007, 12012), False, 'import time\n'), ((16559, 16574), 'numpy.sqrt', 'np.sqrt', (['x.size'], {}), '(x.size)\n', (16566, 16574), True, 'import numpy as np\n'), ((18968, 19009), 'cv2.cvtColor', 'cv2.cvtColor', (['crop_img', 'cv2.COLOR_BGR2RGB'], {}), '(crop_img, cv2.COLOR_BGR2RGB)\n', (18980, 19009), False, 'import cv2\n'), ((19361, 19396), 'imutils.resize', 'imutils.resize', (['crop_img'], {'width': '(100)'}), '(crop_img, width=100)\n', (19375, 19396), False, 'import imutils\n'), ((20095, 20130), 'imutils.resize', 'imutils.resize', (['crop_img'], {'width': '(160)'}), '(crop_img, width=160)\n', (20109, 20130), False, 'import imutils\n'), ((20964, 20986), 'random.randint', 'random.randint', (['(70)', '(96)'], {}), '(70, 96)\n', (20978, 20986), False, 'import random\n'), ((7213, 7223), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7221, 7223), True, 'import tensorflow as tf\n'), ((7389, 7411), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (7409, 7411), True, 'import tensorflow as tf\n'), ((7472, 7494), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (7492, 7494), True, 'import tensorflow as tf\n'), ((7573, 7595), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (7593, 7595), True, 'import tensorflow as tf\n'), ((20373, 20402), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['temp_image'], {}), '(temp_image)\n', (20390, 20402), False, 'from PyQt5.QtGui import QPixmap\n'), ((21432, 21467), 'imutils.resize', 'imutils.resize', (['crop_img'], {'width': '(160)'}), '(crop_img, width=160)\n', (21446, 21467), False, 'import imutils\n'), ((21577, 21612), 'imutils.resize', 'imutils.resize', (['crop_img'], {'width': '(100)'}), '(crop_img, width=100)\n', (21591, 21612), False, 'import imutils\n'), ((9051, 9098), 'PyQt5.QtGui.QPixmap', 'QPixmap', (["(dir + '/' + file + '/' + file + '.png')"], {}), "(dir + '/' + file + '/' + file + '.png')\n", (9058, 9098), False, 'from PyQt5.QtGui import QPixmap\n'), ((11198, 11227), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['temp_image'], {}), '(temp_image)\n', (11215, 11227), False, 'from PyQt5.QtGui import QPixmap\n'), ((19571, 19600), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['temp_image'], {}), '(temp_image)\n', (19588, 19600), False, 'from PyQt5.QtGui import QPixmap\n'), ((21799, 21828), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['temp_image'], {}), '(temp_image)\n', (21816, 21828), False, 'from PyQt5.QtGui import QPixmap\n')] |
#!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from visualization_msgs.msg import Marker
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Point
import numpy as np
class LaserDownSample:
def __init__(self):
self.pub = rospy.Publisher('/marker', Marker, queue_size=10)
self.sub = rospy.Subscriber('scan', LaserScan, self.callback)
def callback(self, laser):
angle_vector = np.arange(laser.angle_min, laser.angle_max, laser.angle_increment)
marker = Marker()
for i in range(len(angle_vector)):
d = laser.ranges[i]
if d < 10:
point = Point()
theta = angle_vector[i]
point.x = d * np.cos(theta)
point.y = d * np.sin(theta)
point.z = 0
marker.header = laser.header
marker.points.append(point)
marker.color.r = 1.0
marker.color.g = 0.1
marker.color.b = 0.1
marker.color.a = 1.0
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.pose.orientation.w = 1
marker.type = marker.SPHERE_LIST
marker.ns = 'laser'
self.pub.publish(marker)
if __name__ == '__main__':
try:
rospy.init_node('LaserDownSample', anonymous=True)
laser = LaserDownSample()
rospy.spin()
except rospy.ROSInterruptException:
pass | [
"visualization_msgs.msg.Marker",
"rospy.Subscriber",
"rospy.init_node",
"geometry_msgs.msg.Point",
"rospy.spin",
"numpy.cos",
"numpy.sin",
"rospy.Publisher",
"numpy.arange"
] | [((299, 348), 'rospy.Publisher', 'rospy.Publisher', (['"""/marker"""', 'Marker'], {'queue_size': '(10)'}), "('/marker', Marker, queue_size=10)\n", (314, 348), False, 'import rospy\n'), ((368, 418), 'rospy.Subscriber', 'rospy.Subscriber', (['"""scan"""', 'LaserScan', 'self.callback'], {}), "('scan', LaserScan, self.callback)\n", (384, 418), False, 'import rospy\n'), ((482, 548), 'numpy.arange', 'np.arange', (['laser.angle_min', 'laser.angle_max', 'laser.angle_increment'], {}), '(laser.angle_min, laser.angle_max, laser.angle_increment)\n', (491, 548), True, 'import numpy as np\n'), ((566, 574), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (572, 574), False, 'from visualization_msgs.msg import Marker\n'), ((1355, 1405), 'rospy.init_node', 'rospy.init_node', (['"""LaserDownSample"""'], {'anonymous': '(True)'}), "('LaserDownSample', anonymous=True)\n", (1370, 1405), False, 'import rospy\n'), ((1448, 1460), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1458, 1460), False, 'import rospy\n'), ((697, 704), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (702, 704), False, 'from geometry_msgs.msg import Point\n'), ((775, 788), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (781, 788), True, 'import numpy as np\n'), ((819, 832), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (825, 832), True, 'import numpy as np\n')] |
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System test: test conv2d act"""
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import os
import tensorflow as tf
import numpy as np
import sys
from tflite_test_runner import TfliteTestRunner
def _make_module(n, i_channels, i_size, k_size, o_channels, strides, padding, dilations, act):
class Conv2DActModule(tf.Module):
def __init__(self):
super(Conv2DActModule).__init__()
self.w = tf.constant(np.random.rand(
*k_size, i_channels, o_channels).astype(np.float32) - 0.5)
self.b = tf.constant(np.random.rand(o_channels).astype(np.float32) - 0.5)
@tf.function(input_signature=[tf.TensorSpec([n, *i_size, i_channels], tf.float32)])
def __call__(self, x):
out = tf.nn.conv2d(x, self.w, strides, padding,
dilations=dilations) + self.b
if act == 'relu':
out = tf.nn.relu(out)
elif act == 'relu6':
out = tf.nn.relu6(out)
elif act == 'leaky':
out = tf.nn.leaky_relu(out)
return out
return Conv2DActModule()
n = [
1
]
i_channels = [
1,
16
]
i_sizes = [
[33, 65]
]
k_sizes = [
[3, 3]
]
o_channels = [
1,
8
]
strides = [
[1, 1],
[1, 3],
]
paddings = [
'SAME',
'VALID'
]
dilations = [
[1, 1]
]
acts = [
'relu',
'relu6',
'leaky'
]
@pytest.mark.parametrize('n', n)
@pytest.mark.parametrize('i_channels', i_channels)
@pytest.mark.parametrize('i_size', i_sizes)
@pytest.mark.parametrize('k_size', k_sizes)
@pytest.mark.parametrize('o_channels', o_channels)
@pytest.mark.parametrize('strides', strides)
@pytest.mark.parametrize('padding', paddings)
@pytest.mark.parametrize('dilations', dilations)
@pytest.mark.parametrize('act', acts)
def test_conv2d_act(n, i_channels, i_size, k_size, o_channels, strides, padding, dilations, act, request):
if padding != 'VALID' or (k_size[0] <= i_size[0] and k_size[1] <= i_size[1]):
module = _make_module(n, i_channels, i_size, k_size, o_channels,
strides, padding, dilations, act)
runner = TfliteTestRunner(request.node.name)
model_file = runner.from_tensorflow(module)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_conv2d_act.py'])
| [
"tensorflow.nn.conv2d",
"numpy.random.rand",
"tensorflow.nn.relu",
"tflite_test_runner.TfliteTestRunner",
"tensorflow.nn.leaky_relu",
"pytest.main",
"tensorflow.TensorSpec",
"pytest.mark.parametrize",
"tensorflow.nn.relu6"
] | [((2040, 2071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', 'n'], {}), "('n', n)\n", (2063, 2071), False, 'import pytest\n'), ((2073, 2122), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""i_channels"""', 'i_channels'], {}), "('i_channels', i_channels)\n", (2096, 2122), False, 'import pytest\n'), ((2124, 2166), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""i_size"""', 'i_sizes'], {}), "('i_size', i_sizes)\n", (2147, 2166), False, 'import pytest\n'), ((2168, 2210), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""k_size"""', 'k_sizes'], {}), "('k_size', k_sizes)\n", (2191, 2210), False, 'import pytest\n'), ((2212, 2261), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""o_channels"""', 'o_channels'], {}), "('o_channels', o_channels)\n", (2235, 2261), False, 'import pytest\n'), ((2263, 2306), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""strides"""', 'strides'], {}), "('strides', strides)\n", (2286, 2306), False, 'import pytest\n'), ((2308, 2352), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding"""', 'paddings'], {}), "('padding', paddings)\n", (2331, 2352), False, 'import pytest\n'), ((2354, 2401), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dilations"""', 'dilations'], {}), "('dilations', dilations)\n", (2377, 2401), False, 'import pytest\n'), ((2403, 2439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act"""', 'acts'], {}), "('act', acts)\n", (2426, 2439), False, 'import pytest\n'), ((2936, 2978), 'pytest.main', 'pytest.main', (["['-vv', 'test_conv2d_act.py']"], {}), "(['-vv', 'test_conv2d_act.py'])\n", (2947, 2978), False, 'import pytest\n'), ((2784, 2819), 'tflite_test_runner.TfliteTestRunner', 'TfliteTestRunner', (['request.node.name'], {}), '(request.node.name)\n', (2800, 2819), False, 'from tflite_test_runner import TfliteTestRunner\n'), ((1376, 1438), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'self.w', 'strides', 'padding'], {'dilations': 'dilations'}), '(x, self.w, strides, padding, dilations=dilations)\n', (1388, 1438), True, 'import tensorflow as tf\n'), ((1531, 1546), 'tensorflow.nn.relu', 'tf.nn.relu', (['out'], {}), '(out)\n', (1541, 1546), True, 'import tensorflow as tf\n'), ((1602, 1618), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['out'], {}), '(out)\n', (1613, 1618), True, 'import tensorflow as tf\n'), ((1273, 1324), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[n, *i_size, i_channels]', 'tf.float32'], {}), '([n, *i_size, i_channels], tf.float32)\n', (1286, 1324), True, 'import tensorflow as tf\n'), ((1674, 1695), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['out'], {}), '(out)\n', (1690, 1695), True, 'import tensorflow as tf\n'), ((1057, 1104), 'numpy.random.rand', 'np.random.rand', (['*k_size', 'i_channels', 'o_channels'], {}), '(*k_size, i_channels, o_channels)\n', (1071, 1104), True, 'import numpy as np\n'), ((1181, 1207), 'numpy.random.rand', 'np.random.rand', (['o_channels'], {}), '(o_channels)\n', (1195, 1207), True, 'import numpy as np\n')] |
# Copyright 2022 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (
Any,
Callable,
DefaultDict,
Generic,
Iterator,
List,
Set,
Tuple,
Type,
TypeVar,
cast,
)
import numpy as np
import pytest
import tensorflow as tf
from _pytest.fixtures import SubRequest
import gpflow
from gpflow.base import RegressionData
from gpflow.config import default_float
from gpflow.inducing_variables import InducingPoints, InducingVariables
from gpflow.kernels import Kernel, Matern52
from gpflow.likelihoods import Exponential, Likelihood
from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel, training_loss_closure
from gpflow.models.vgp import update_vgp_data
from gpflow.posteriors import AbstractPosterior, PrecomputeCacheType
_M = TypeVar("_M", bound=GPModel, covariant=True)
_CreateModel = Callable[[RegressionData], _M]
# I'd like to make this a `dataclass`, but mypy get confused about `create_model` being a function
# member, but that doesn't take `self`.
class _ModelFactory(Generic[_M]):
def __init__(
self,
create_model: _CreateModel[_M],
multi_output: bool,
atol: float,
rtol: float,
) -> None:
self.create_model = create_model
self.multi_output = multi_output
self.atol = atol
self.rtol = rtol
_MODEL_FACTORIES: List[_ModelFactory[Any]] = []
# This exists to make it easy to disable tf.function, for debugging.
_COMPILE = True
_MAXITER = 10
_DEFAULT_ATOL = 1e-10
_DEFAULT_RTOL = 1e-7
@pytest.fixture(name="register_posterior_bo_integration_test")
def _register_posterior_bo_integration_test(
request: SubRequest,
tested_posteriors: DefaultDict[str, Set[Type[AbstractPosterior]]],
) -> Callable[[AbstractPosterior], None]:
def _register_posterior(posterior: AbstractPosterior) -> None:
tested_posteriors[request.function.__name__].add(posterior.__class__)
return _register_posterior
def model_factory(
multi_output: bool = False, atol: float = _DEFAULT_ATOL, rtol: float = _DEFAULT_RTOL
) -> Callable[[_CreateModel[_M]], _ModelFactory[_M]]:
""" Decorator for adding a function to the `_MODEL_FACTORIES` list. """
def register(create_model: _CreateModel[_M]) -> _ModelFactory[_M]:
model_factory = _ModelFactory(
create_model,
multi_output,
atol,
rtol,
)
_MODEL_FACTORIES.append(model_factory)
return model_factory
return register
def create_kernel() -> Kernel:
return Matern52()
def create_likelihood() -> Likelihood:
return Exponential()
def create_inducing_points(data: RegressionData) -> InducingPoints:
n_features = data[0].shape[1]
n_inducing_points = 5
rng = np.random.default_rng(20220208)
Z = tf.constant(rng.random((n_inducing_points, n_features)))
return InducingPoints(Z)
def create_q(
inducing_variable: InducingVariables, *, row_scale: int = 1, column_scale: int = 1
) -> Tuple[bool, tf.Tensor, tf.Tensor]:
n_inducing_points = inducing_variable.num_inducing
rng = np.random.default_rng(20220133)
q_diag = True
q_mu = tf.constant(rng.random((row_scale * n_inducing_points, column_scale)))
q_sqrt = tf.constant(rng.random((row_scale * n_inducing_points, column_scale))) ** 2
return q_diag, q_mu, q_sqrt
@model_factory(rtol=1e-3)
def create_gpr(data: RegressionData) -> GPR:
return GPR(data=data, kernel=create_kernel())
@model_factory(rtol=1e-4)
def create_sgpr(data: RegressionData) -> SGPR:
return SGPR(data=data, kernel=create_kernel(), inducing_variable=create_inducing_points(data))
@model_factory(rtol=5e-3)
def create_vgp(data: RegressionData) -> VGP:
return VGP(data=data, kernel=create_kernel(), likelihood=create_likelihood())
@model_factory()
def create_svgp__independent_single_output(data: RegressionData) -> SVGP:
inducing_variable = create_inducing_points(data)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=create_kernel(),
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__fully_correlated_multi_output(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)
inducing_variable = create_inducing_points(data)
q_diag, q_mu, q_sqrt = create_q(inducing_variable, row_scale=n_outputs)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__independent_multi_output(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)
inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(
create_inducing_points(data)
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable, column_scale=n_outputs)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__fallback_independent_latent_posterior(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
rng = np.random.default_rng(20220131)
kernel = gpflow.kernels.LinearCoregionalization(
[create_kernel()],
W=tf.constant(rng.standard_normal((n_outputs, 1))),
)
inducing_variable = gpflow.inducing_variables.FallbackSeparateIndependentInducingVariables(
[create_inducing_points(data)]
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@model_factory(multi_output=True)
def create_svgp__linear_coregionalization(data: RegressionData) -> SVGP:
n_outputs = data[1].shape[1]
rng = np.random.default_rng(20220131)
kernel = gpflow.kernels.LinearCoregionalization(
[create_kernel()], W=tf.constant(rng.standard_normal((n_outputs, 1)))
)
inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(
create_inducing_points(data)
)
q_diag, q_mu, q_sqrt = create_q(inducing_variable)
return SVGP(
kernel=kernel,
likelihood=create_likelihood(),
inducing_variable=inducing_variable,
q_diag=q_diag,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
@pytest.fixture(params=_MODEL_FACTORIES)
def _model_factory(request: SubRequest) -> _ModelFactory[Any]:
return cast(_ModelFactory[Any], request.param)
@pytest.fixture
def _f_minimum(_model_factory: _ModelFactory[Any]) -> tf.Tensor:
return (
tf.constant(
[
[0.2, 0.4],
[0.4, 0.6],
[0.6, 0.8],
],
dtype=default_float(),
)
if _model_factory.multi_output
else tf.constant([[0.3, 0.5]], dtype=default_float())
)
@pytest.fixture
def _f(_f_minimum: tf.Tensor) -> Callable[[tf.Tensor], tf.Tensor]:
def f(X: tf.Tensor) -> tf.Tensor:
err = X[:, None, :] - _f_minimum[None, :, :]
err_sq = err ** 2
return tf.reduce_sum(err_sq, axis=-1)
return f
@pytest.fixture
def _data(
_f: Callable[[tf.Tensor], tf.Tensor], _f_minimum: tf.Tensor
) -> Tuple[tf.Variable, tf.Variable]:
n_initial_data = 3
n_outputs, n_features = _f_minimum.shape
rng = np.random.default_rng(20220126)
X = tf.Variable(
rng.random((n_initial_data, n_features)),
shape=[None, n_features],
dtype=default_float(),
trainable=False,
)
Y = tf.Variable(
_f(X),
shape=[None, n_outputs],
dtype=default_float(),
trainable=False,
)
return X, Y
@pytest.fixture
def _extend_data(
_data: Tuple[tf.Variable, tf.Variable], _f: Callable[[tf.Tensor], tf.Tensor]
) -> Callable[[GPModel], Iterator[int]]:
n_iterations = 3
rng = np.random.default_rng(20220127)
X, Y = _data
n_features = X.shape[1]
def iterate(model: GPModel) -> Iterator[int]:
for i in range(n_iterations):
X_new = tf.constant(rng.random((1, n_features)))
Y_new = _f(X_new)
X_i = tf.concat([X, X_new], axis=0)
Y_i = tf.concat([Y, Y_new], axis=0)
if isinstance(model, VGP):
update_vgp_data(model, (X_i, Y_i))
else:
X.assign(X_i)
Y.assign(Y_i)
yield i
return iterate
@pytest.fixture
def _X_new(_data: Tuple[tf.Variable, tf.Variable]) -> tf.Tensor:
rng = np.random.default_rng(20220128)
X, _Y = _data
n_features = X.shape[1]
return tf.constant(rng.random((3, n_features)))
@pytest.fixture
def _optimize(_data: Tuple[tf.Variable, tf.Variable]) -> Callable[[GPModel], None]:
def optimize(model: GPModel) -> None:
gpflow.optimizers.Scipy().minimize(
training_loss_closure(model, _data, compile=_COMPILE),
variables=model.trainable_variables,
options=dict(maxiter=_MAXITER),
method="BFGS",
compile=_COMPILE,
)
return optimize
def test_posterior_bo_integration__predict_f(
register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],
_model_factory: _ModelFactory[Any],
_data: Tuple[tf.Variable, tf.Variable],
_extend_data: Callable[[GPModel], Iterator[int]],
_X_new: tf.Tensor,
) -> None:
"""
Check that data added incrementally is correctly reflected in `predict_f`.
"""
_X, Y = _data
n_rows_new = _X_new.shape[0]
n_outputs = Y.shape[1]
model = _model_factory.create_model(_data)
posterior = model.posterior(PrecomputeCacheType.VARIABLE)
register_posterior_bo_integration_test(posterior)
predict_f = posterior.predict_f
if _COMPILE:
predict_f = tf.function(predict_f)
for _ in _extend_data(model):
posterior.update_cache()
compiled_mean, compiled_var = predict_f(_X_new)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)
eager_model = _model_factory.create_model(_data)
eager_mean, eager_var = eager_model.predict_f(_X_new)
np.testing.assert_allclose(
eager_mean, compiled_mean, rtol=_model_factory.rtol, atol=_model_factory.atol
)
np.testing.assert_allclose(
eager_var, compiled_var, rtol=_model_factory.rtol, atol=_model_factory.atol
)
def test_posterior_bo_integration__optimization(
register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],
_model_factory: _ModelFactory[Any],
_data: Tuple[tf.Variable, tf.Variable],
_extend_data: Callable[[GPModel], Iterator[int]],
_X_new: tf.Tensor,
_optimize: Callable[[GPModel], None],
) -> None:
"""
Check that data added incrementally is considered when optimizing a model.
"""
_X, Y = _data
n_rows_new = _X_new.shape[0]
n_outputs = Y.shape[1]
model = _model_factory.create_model(_data)
posterior = model.posterior(PrecomputeCacheType.VARIABLE)
register_posterior_bo_integration_test(posterior)
predict_f = posterior.predict_f
if _COMPILE:
predict_f = tf.function(predict_f)
# Add all the data first, and then `optimize`, so that both models are optimized the same number
# of times and with the same data, so they converge to the same result.
for _ in _extend_data(model):
pass
_optimize(model)
posterior.update_cache()
compiled_mean, compiled_var = predict_f(_X_new)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)
np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)
eager_model = _model_factory.create_model(_data)
_optimize(eager_model)
eager_mean, eager_var = eager_model.predict_f(_X_new)
np.testing.assert_allclose(
eager_mean, compiled_mean, rtol=_model_factory.rtol, atol=_model_factory.atol
)
np.testing.assert_allclose(
eager_var, compiled_var, rtol=_model_factory.rtol, atol=_model_factory.atol
)
| [
"gpflow.kernels.Matern52",
"numpy.random.default_rng",
"gpflow.likelihoods.Exponential",
"numpy.testing.assert_equal",
"gpflow.optimizers.Scipy",
"tensorflow.reduce_sum",
"numpy.testing.assert_allclose",
"tensorflow.concat",
"gpflow.models.vgp.update_vgp_data",
"tensorflow.function",
"gpflow.mod... | [((1331, 1375), 'typing.TypeVar', 'TypeVar', (['"""_M"""'], {'bound': 'GPModel', 'covariant': '(True)'}), "('_M', bound=GPModel, covariant=True)\n", (1338, 1375), False, 'from typing import Any, Callable, DefaultDict, Generic, Iterator, List, Set, Tuple, Type, TypeVar, cast\n'), ((2082, 2143), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""register_posterior_bo_integration_test"""'}), "(name='register_posterior_bo_integration_test')\n", (2096, 2143), False, 'import pytest\n'), ((7383, 7422), 'pytest.fixture', 'pytest.fixture', ([], {'params': '_MODEL_FACTORIES'}), '(params=_MODEL_FACTORIES)\n', (7397, 7422), False, 'import pytest\n'), ((3094, 3104), 'gpflow.kernels.Matern52', 'Matern52', ([], {}), '()\n', (3102, 3104), False, 'from gpflow.kernels import Kernel, Matern52\n'), ((3157, 3170), 'gpflow.likelihoods.Exponential', 'Exponential', ([], {}), '()\n', (3168, 3170), False, 'from gpflow.likelihoods import Exponential, Likelihood\n'), ((3311, 3342), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220208)'], {}), '(20220208)\n', (3332, 3342), True, 'import numpy as np\n'), ((3419, 3436), 'gpflow.inducing_variables.InducingPoints', 'InducingPoints', (['Z'], {}), '(Z)\n', (3433, 3436), False, 'from gpflow.inducing_variables import InducingPoints, InducingVariables\n'), ((3645, 3676), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220133)'], {}), '(20220133)\n', (3666, 3676), True, 'import numpy as np\n'), ((6109, 6140), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220131)'], {}), '(20220131)\n', (6130, 6140), True, 'import numpy as np\n'), ((6831, 6862), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220131)'], {}), '(20220131)\n', (6852, 6862), True, 'import numpy as np\n'), ((7497, 7536), 'typing.cast', 'cast', (['_ModelFactory[Any]', 'request.param'], {}), '(_ModelFactory[Any], request.param)\n', (7501, 7536), False, 'from typing import Any, Callable, DefaultDict, Generic, Iterator, List, Set, Tuple, Type, TypeVar, cast\n'), ((8391, 8422), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220126)'], {}), '(20220126)\n', (8412, 8422), True, 'import numpy as np\n'), ((8927, 8958), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220127)'], {}), '(20220127)\n', (8948, 8958), True, 'import numpy as np\n'), ((9582, 9613), 'numpy.random.default_rng', 'np.random.default_rng', (['(20220128)'], {}), '(20220128)\n', (9603, 9613), True, 'import numpy as np\n'), ((12666, 12735), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(n_rows_new, n_outputs)', 'compiled_mean.shape'], {}), '((n_rows_new, n_outputs), compiled_mean.shape)\n', (12689, 12735), True, 'import numpy as np\n'), ((12740, 12808), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(n_rows_new, n_outputs)', 'compiled_var.shape'], {}), '((n_rows_new, n_outputs), compiled_var.shape)\n', (12763, 12808), True, 'import numpy as np\n'), ((12953, 13063), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eager_mean', 'compiled_mean'], {'rtol': '_model_factory.rtol', 'atol': '_model_factory.atol'}), '(eager_mean, compiled_mean, rtol=_model_factory.\n rtol, atol=_model_factory.atol)\n', (12979, 13063), True, 'import numpy as np\n'), ((13077, 13185), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eager_var', 'compiled_var'], {'rtol': '_model_factory.rtol', 'atol': '_model_factory.atol'}), '(eager_var, compiled_var, rtol=_model_factory.\n rtol, atol=_model_factory.atol)\n', (13103, 13185), True, 'import numpy as np\n'), ((8136, 8166), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['err_sq'], {'axis': '(-1)'}), '(err_sq, axis=-1)\n', (8149, 8166), True, 'import tensorflow as tf\n'), ((10859, 10881), 'tensorflow.function', 'tf.function', (['predict_f'], {}), '(predict_f)\n', (10870, 10881), True, 'import tensorflow as tf\n'), ((11015, 11084), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(n_rows_new, n_outputs)', 'compiled_mean.shape'], {}), '((n_rows_new, n_outputs), compiled_mean.shape)\n', (11038, 11084), True, 'import numpy as np\n'), ((11093, 11161), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(n_rows_new, n_outputs)', 'compiled_var.shape'], {}), '((n_rows_new, n_outputs), compiled_var.shape)\n', (11116, 11161), True, 'import numpy as np\n'), ((11291, 11401), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eager_mean', 'compiled_mean'], {'rtol': '_model_factory.rtol', 'atol': '_model_factory.atol'}), '(eager_mean, compiled_mean, rtol=_model_factory.\n rtol, atol=_model_factory.atol)\n', (11317, 11401), True, 'import numpy as np\n'), ((11427, 11535), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eager_var', 'compiled_var'], {'rtol': '_model_factory.rtol', 'atol': '_model_factory.atol'}), '(eager_var, compiled_var, rtol=_model_factory.\n rtol, atol=_model_factory.atol)\n', (11453, 11535), True, 'import numpy as np\n'), ((12309, 12331), 'tensorflow.function', 'tf.function', (['predict_f'], {}), '(predict_f)\n', (12320, 12331), True, 'import tensorflow as tf\n'), ((8542, 8557), 'gpflow.config.default_float', 'default_float', ([], {}), '()\n', (8555, 8557), False, 'from gpflow.config import default_float\n'), ((8673, 8688), 'gpflow.config.default_float', 'default_float', ([], {}), '()\n', (8686, 8688), False, 'from gpflow.config import default_float\n'), ((9202, 9231), 'tensorflow.concat', 'tf.concat', (['[X, X_new]'], {'axis': '(0)'}), '([X, X_new], axis=0)\n', (9211, 9231), True, 'import tensorflow as tf\n'), ((9250, 9279), 'tensorflow.concat', 'tf.concat', (['[Y, Y_new]'], {'axis': '(0)'}), '([Y, Y_new], axis=0)\n', (9259, 9279), True, 'import tensorflow as tf\n'), ((9912, 9965), 'gpflow.models.training_loss_closure', 'training_loss_closure', (['model', '_data'], {'compile': '_COMPILE'}), '(model, _data, compile=_COMPILE)\n', (9933, 9965), False, 'from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel, training_loss_closure\n'), ((7785, 7800), 'gpflow.config.default_float', 'default_float', ([], {}), '()\n', (7798, 7800), False, 'from gpflow.config import default_float\n'), ((7896, 7911), 'gpflow.config.default_float', 'default_float', ([], {}), '()\n', (7909, 7911), False, 'from gpflow.config import default_float\n'), ((9336, 9370), 'gpflow.models.vgp.update_vgp_data', 'update_vgp_data', (['model', '(X_i, Y_i)'], {}), '(model, (X_i, Y_i))\n', (9351, 9370), False, 'from gpflow.models.vgp import update_vgp_data\n'), ((9864, 9889), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (9887, 9889), False, 'import gpflow\n')] |
import numpy as np
from mushroom_rl.algorithms.policy_search import REINFORCE, GPOMDP, eNAC
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.core import Core
from mushroom_rl.environments import LQR
from mushroom_rl.policy import StateStdGaussianPolicy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.parameters import AdaptiveParameter
from tqdm import tqdm
"""
This script aims to replicate the experiments on the LQR MDP using policy
gradient algorithms.
"""
tqdm.monitor_interval = 0
def experiment(alg, n_epochs, n_iterations, ep_per_run):
np.random.seed()
# MDP
mdp = LQR.generate(dimensions=1)
approximator = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape)
sigma_weights = 2 * np.ones(sigma.weights_size)
sigma.set_weights(sigma_weights)
policy = StateStdGaussianPolicy(approximator, sigma)
# Agent
learning_rate = AdaptiveParameter(value=.01)
algorithm_params = dict(learning_rate=learning_rate)
agent = alg(mdp.info, policy, **algorithm_params)
# Train
core = Core(agent, mdp)
dataset_eval = core.evaluate(n_episodes=ep_per_run)
print('policy parameters: ', policy.get_weights())
J = compute_J(dataset_eval, gamma=mdp.info.gamma)
print('J at start : ' + str(np.mean(J)))
for i in range(n_epochs):
core.learn(n_episodes=n_iterations * ep_per_run,
n_episodes_per_fit=ep_per_run)
dataset_eval = core.evaluate(n_episodes=ep_per_run)
print('policy parameters: ', policy.get_weights())
J = compute_J(dataset_eval, gamma=mdp.info.gamma)
print('J at iteration ' + str(i) + ': ' + str(np.mean(J)))
if __name__ == '__main__':
algs = [REINFORCE, GPOMDP, eNAC]
for alg in algs:
print(alg.__name__)
experiment(alg, n_epochs=10, n_iterations=4, ep_per_run=100)
| [
"numpy.mean",
"mushroom_rl.environments.LQR.generate",
"numpy.ones",
"mushroom_rl.utils.parameters.AdaptiveParameter",
"mushroom_rl.utils.dataset.compute_J",
"mushroom_rl.approximators.regressor.Regressor",
"numpy.random.seed",
"mushroom_rl.core.Core",
"mushroom_rl.policy.StateStdGaussianPolicy"
] | [((674, 690), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (688, 690), True, 'import numpy as np\n'), ((712, 738), 'mushroom_rl.environments.LQR.generate', 'LQR.generate', ([], {'dimensions': '(1)'}), '(dimensions=1)\n', (724, 738), False, 'from mushroom_rl.environments import LQR\n'), ((759, 880), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (768, 880), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((948, 1069), 'mushroom_rl.approximators.regressor.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape)\n', (957, 1069), False, 'from mushroom_rl.approximators.regressor import Regressor\n'), ((1214, 1257), 'mushroom_rl.policy.StateStdGaussianPolicy', 'StateStdGaussianPolicy', (['approximator', 'sigma'], {}), '(approximator, sigma)\n', (1236, 1257), False, 'from mushroom_rl.policy import StateStdGaussianPolicy\n'), ((1291, 1320), 'mushroom_rl.utils.parameters.AdaptiveParameter', 'AdaptiveParameter', ([], {'value': '(0.01)'}), '(value=0.01)\n', (1308, 1320), False, 'from mushroom_rl.utils.parameters import AdaptiveParameter\n'), ((1455, 1471), 'mushroom_rl.core.Core', 'Core', (['agent', 'mdp'], {}), '(agent, mdp)\n', (1459, 1471), False, 'from mushroom_rl.core import Core\n'), ((1591, 1636), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset_eval'], {'gamma': 'mdp.info.gamma'}), '(dataset_eval, gamma=mdp.info.gamma)\n', (1600, 1636), False, 'from mushroom_rl.utils.dataset import compute_J\n'), ((1135, 1162), 'numpy.ones', 'np.ones', (['sigma.weights_size'], {}), '(sigma.weights_size)\n', (1142, 1162), True, 'import numpy as np\n'), ((1951, 1996), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset_eval'], {'gamma': 'mdp.info.gamma'}), '(dataset_eval, gamma=mdp.info.gamma)\n', (1960, 1996), False, 'from mushroom_rl.utils.dataset import compute_J\n'), ((1669, 1679), 'numpy.mean', 'np.mean', (['J'], {}), '(J)\n', (1676, 1679), True, 'import numpy as np\n'), ((2051, 2061), 'numpy.mean', 'np.mean', (['J'], {}), '(J)\n', (2058, 2061), True, 'import numpy as np\n')] |
import mmcv
import numpy as np
from numpy import random
from mmdet.core import poly2bbox
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from pycocotools.coco import maskUtils
import cv2
from functools import partial
import copy
def TuplePoly2Poly(poly):
outpoly = [poly[0][0], poly[0][1],
poly[1][0], poly[1][1],
poly[2][0], poly[2][1],
poly[3][0], poly[3][1]
]
return outpoly
def mask2poly_single(binary_mask):
"""
:param binary_mask:
:return:
"""
# try:
contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_contour = max(contours, key=len)
rect = cv2.minAreaRect(max_contour)
poly = cv2.boxPoints(rect)
poly = TuplePoly2Poly(poly)
return poly
def mask2poly(binary_mask_list):
polys = map(mask2poly_single, binary_mask_list)
return list(polys)
def poly2mask_single(h, w, poly):
# TODO: write test for poly2mask, using mask2poly convert mask to poly', compare poly with poly'
# visualize the mask
rles = maskUtils.frPyObjects(poly, h, w)
rle = maskUtils.merge(rles)
mask = maskUtils.decode(rle)
return mask
def poly2mask(polys, h, w):
poly2mask_fn = partial(poly2mask_single, h, w)
masks = list(map(poly2mask_fn, polys))
# TODO: check if len(masks) == 0
return masks
def rotate_poly_single(h, w, new_h, new_w, rotate_matrix_T, poly):
poly[::2] = poly[::2] - (w - 1) * 0.5
poly[1::2] = poly[1::2] - (h - 1) * 0.5
coords = poly.reshape(4, 2)
new_coords = np.matmul(coords, rotate_matrix_T) + np.array([(new_w - 1) * 0.5, (new_h - 1) * 0.5])
rotated_polys = new_coords.reshape(-1, ).tolist()
return rotated_polys
# TODO: refactor the single - map to whole numpy computation
def rotate_poly(h, w, new_h, new_w, rotate_matrix_T, polys):
rotate_poly_fn = partial(rotate_poly_single, h, w, new_h, new_w, rotate_matrix_T)
rotated_polys = list(map(rotate_poly_fn, polys))
return rotated_polys
class RotateAugmentation(object):
"""
1. rotate image and polygons, transfer polygons to masks
2. polygon 2 mask
"""
def __init__(self,
# center=None,
CLASSES=None,
scale=1.0,
border_value=0,
auto_bound=True,
rotate_range=(-180, 180),
small_filter=4):
self.CLASSES = CLASSES
self.scale = scale
self.border_value = border_value
self.auto_bound = auto_bound
self.rotate_range = rotate_range
self.small_filter = small_filter
# self.center = center
def __call__(self, img, boxes, masks, labels, filename):
angle = np.random.rand() * (self.rotate_range[1] - self.rotate_range[0]) + self.rotate_range[0]
discrete_range = [90, 180, -90, -180]
for label in labels:
# print('label: ', label)
cls = self.CLASSES[label-1]
# print('cls: ', cls)
if (cls == 'storage-tank') or (cls == 'roundabout') or (cls == 'airport'):
random.shuffle(discrete_range)
angle = discrete_range[0]
break
# rotate image, copy from mmcv.imrotate
h, w = img.shape[:2]
center = ((w - 1) * 0.5, (h - 1) * 0.5)
# print('len boxes: ', len(boxes))
# print('len masks: ', len(masks))
# print('len labels: ', len(labels))
matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
matrix_T = copy.deepcopy(matrix[:2, :2]).T
if self.auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated_img = cv2.warpAffine(img, matrix, (w, h), borderValue=self.border_value)
polys = mask2poly(masks)
rotated_polys = rotate_poly(img.shape[0], img.shape[1], h, w, matrix_T, np.array(polys))
rotated_polys_np = np.array(rotated_polys)
# add dimension in poly2mask
# print('rotated_polys_np: ', rotated_polys_np)
rotated_masks = poly2mask(rotated_polys_np[:, np.newaxis, :].tolist(), h, w)
# print('rotated_masks: ', rotated_masks)
# print('rotated masks sum: ', sum(sum(rotated_masks[0])))
rotated_boxes = poly2bbox(rotated_polys_np).astype(np.float32)
# print('len rotated boxes: ', len(rotated_boxes))
# print('len rotaed polys: ', len(rotated_polys))
# print('len rotated_masks: ', len(rotated_masks))
# print('len labels: ', len(labels))
# True rotated h, sqrt((x1-x2)^2 + (y1-y2)^2)
rotated_h = np.sqrt(np.power(rotated_polys_np[:, 0] - rotated_polys_np[:, 2], 2)
+ np.power(rotated_polys_np[:, 1] - rotated_polys_np[:, 3], 2) )
# True rotated w, sqrt((x2 - x3)^2 + (y2 - y3)^2)
rotated_w = np.sqrt(np.power(rotated_polys_np[:, 2] - rotated_polys_np[:, 4], 2)
+ np.power(rotated_polys_np[:, 3] - rotated_polys_np[:, 5], 2) )
min_w_h = np.minimum(rotated_h, rotated_w)
keep_inds = (min_w_h * img.shape[0] / np.float32(h)) >= self.small_filter
if len(keep_inds) > 0:
rotated_boxes = rotated_boxes[keep_inds].tolist()
rotated_masks = np.array(rotated_masks)[keep_inds]
labels = labels[keep_inds]
# TODO: rm it after debug
# ori_masks = np.array(masks)[keep_inds]
# rotated_polys = rotated_polys_np[keep_inds]
# ori_polys = np.array(polys)[keep_inds]
# print('rotated masks: ', rotated_masks)
# for idx, mask in enumerate(rotated_masks):
# if sum(sum(mask)) == 0:
# print('len rotated masks: ', len(rotated_masks))
# print('rotated mask: ', mask)
# print('rotated_poly ', rotated_polys[idx])
# print('sum ori mask ', sum(sum(ori_masks[idx])))
# print('ori poly ', ori_polys[idx])
# print('filename: ', filename)
# print('rotated mask shape: ', mask.shape)
# import pdb
# pdb.set_trace()
else:
rotated_boxes = np.zeros((0, 4), dtype=np.float32).tolist()
rotated_masks = []
labels = np.array([], dtype=np.int64)
return rotated_img, rotated_boxes, rotated_masks, labels
class RotateTestAugmentation(object):
"""
rotate image give a specific angle
"""
def __init__(self,
scale=1.0,
border_value=0,
auto_bound=True):
self.scale = scale
self.border_value = border_value
self.auto_bound = auto_bound
# self.center = center
def __call__(self, img, angle=None):
"""
:param angle: the angle is in degeree
:return:
"""
assert angle in [90, 180, 270]
# rotate image, copy from mmcv.imrotate
h, w = img.shape[:2]
center = ((w - 1) * 0.5, (h - 1) * 0.5)
# print('len boxes: ', len(boxes))
# print('len masks: ', len(masks))
# print('len labels: ', len(labels))
matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
if self.auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated_img = cv2.warpAffine(img, matrix, (w, h), borderValue=self.border_value)
rotated_img_shape = rotated_img.shape
# rotated_img = rotated_img.transpose(2, 0, 1)
# print('in rotate transform, rotated img shape: ', rotated_img_shape)
# import pdb; pdb.set_trace()
return rotated_img, rotated_img_shape, rotated_img_shape, self.scale | [
"numpy.random.rand",
"pycocotools.coco.maskUtils.merge",
"numpy.array",
"copy.deepcopy",
"cv2.minAreaRect",
"pycocotools.coco.maskUtils.frPyObjects",
"numpy.matmul",
"numpy.round",
"numpy.abs",
"cv2.warpAffine",
"cv2.boxPoints",
"pycocotools.coco.maskUtils.decode",
"mmdet.core.poly2bbox",
... | [((620, 691), 'cv2.findContours', 'cv2.findContours', (['binary_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (636, 691), False, 'import cv2\n'), ((744, 772), 'cv2.minAreaRect', 'cv2.minAreaRect', (['max_contour'], {}), '(max_contour)\n', (759, 772), False, 'import cv2\n'), ((784, 803), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (797, 803), False, 'import cv2\n'), ((1134, 1167), 'pycocotools.coco.maskUtils.frPyObjects', 'maskUtils.frPyObjects', (['poly', 'h', 'w'], {}), '(poly, h, w)\n', (1155, 1167), False, 'from pycocotools.coco import maskUtils\n'), ((1178, 1199), 'pycocotools.coco.maskUtils.merge', 'maskUtils.merge', (['rles'], {}), '(rles)\n', (1193, 1199), False, 'from pycocotools.coco import maskUtils\n'), ((1211, 1232), 'pycocotools.coco.maskUtils.decode', 'maskUtils.decode', (['rle'], {}), '(rle)\n', (1227, 1232), False, 'from pycocotools.coco import maskUtils\n'), ((1298, 1329), 'functools.partial', 'partial', (['poly2mask_single', 'h', 'w'], {}), '(poly2mask_single, h, w)\n', (1305, 1329), False, 'from functools import partial\n'), ((1941, 2005), 'functools.partial', 'partial', (['rotate_poly_single', 'h', 'w', 'new_h', 'new_w', 'rotate_matrix_T'], {}), '(rotate_poly_single, h, w, new_h, new_w, rotate_matrix_T)\n', (1948, 2005), False, 'from functools import partial\n'), ((1630, 1664), 'numpy.matmul', 'np.matmul', (['coords', 'rotate_matrix_T'], {}), '(coords, rotate_matrix_T)\n', (1639, 1664), True, 'import numpy as np\n'), ((1668, 1716), 'numpy.array', 'np.array', (['[(new_w - 1) * 0.5, (new_h - 1) * 0.5]'], {}), '([(new_w - 1) * 0.5, (new_h - 1) * 0.5])\n', (1676, 1716), True, 'import numpy as np\n'), ((3554, 3605), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '(-angle)', 'self.scale'], {}), '(center, -angle, self.scale)\n', (3577, 3605), False, 'import cv2\n'), ((4027, 4093), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matrix', '(w, h)'], {'borderValue': 'self.border_value'}), '(img, matrix, (w, h), borderValue=self.border_value)\n', (4041, 4093), False, 'import cv2\n'), ((4254, 4277), 'numpy.array', 'np.array', (['rotated_polys'], {}), '(rotated_polys)\n', (4262, 4277), True, 'import numpy as np\n'), ((5361, 5393), 'numpy.minimum', 'np.minimum', (['rotated_h', 'rotated_w'], {}), '(rotated_h, rotated_w)\n', (5371, 5393), True, 'import numpy as np\n'), ((7555, 7606), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '(-angle)', 'self.scale'], {}), '(center, -angle, self.scale)\n', (7578, 7606), False, 'import cv2\n'), ((7977, 8043), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matrix', '(w, h)'], {'borderValue': 'self.border_value'}), '(img, matrix, (w, h), borderValue=self.border_value)\n', (7991, 8043), False, 'import cv2\n'), ((3625, 3654), 'copy.deepcopy', 'copy.deepcopy', (['matrix[:2, :2]'], {}), '(matrix[:2, :2])\n', (3638, 3654), False, 'import copy\n'), ((3703, 3723), 'numpy.abs', 'np.abs', (['matrix[0, 0]'], {}), '(matrix[0, 0])\n', (3709, 3723), True, 'import numpy as np\n'), ((3742, 3762), 'numpy.abs', 'np.abs', (['matrix[0, 1]'], {}), '(matrix[0, 1])\n', (3748, 3762), True, 'import numpy as np\n'), ((4209, 4224), 'numpy.array', 'np.array', (['polys'], {}), '(polys)\n', (4217, 4224), True, 'import numpy as np\n'), ((6670, 6698), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (6678, 6698), True, 'import numpy as np\n'), ((7653, 7673), 'numpy.abs', 'np.abs', (['matrix[0, 0]'], {}), '(matrix[0, 0])\n', (7659, 7673), True, 'import numpy as np\n'), ((7692, 7712), 'numpy.abs', 'np.abs', (['matrix[0, 1]'], {}), '(matrix[0, 1])\n', (7698, 7712), True, 'import numpy as np\n'), ((2805, 2821), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2819, 2821), True, 'import numpy as np\n'), ((3184, 3214), 'numpy.random.shuffle', 'random.shuffle', (['discrete_range'], {}), '(discrete_range)\n', (3198, 3214), False, 'from numpy import random\n'), ((3951, 3966), 'numpy.round', 'np.round', (['new_w'], {}), '(new_w)\n', (3959, 3966), True, 'import numpy as np\n'), ((3988, 4003), 'numpy.round', 'np.round', (['new_h'], {}), '(new_h)\n', (3996, 4003), True, 'import numpy as np\n'), ((4597, 4624), 'mmdet.core.poly2bbox', 'poly2bbox', (['rotated_polys_np'], {}), '(rotated_polys_np)\n', (4606, 4624), False, 'from mmdet.core import poly2bbox\n'), ((4949, 5009), 'numpy.power', 'np.power', (['(rotated_polys_np[:, 0] - rotated_polys_np[:, 2])', '(2)'], {}), '(rotated_polys_np[:, 0] - rotated_polys_np[:, 2], 2)\n', (4957, 5009), True, 'import numpy as np\n'), ((5040, 5100), 'numpy.power', 'np.power', (['(rotated_polys_np[:, 1] - rotated_polys_np[:, 3])', '(2)'], {}), '(rotated_polys_np[:, 1] - rotated_polys_np[:, 3], 2)\n', (5048, 5100), True, 'import numpy as np\n'), ((5189, 5249), 'numpy.power', 'np.power', (['(rotated_polys_np[:, 2] - rotated_polys_np[:, 4])', '(2)'], {}), '(rotated_polys_np[:, 2] - rotated_polys_np[:, 4], 2)\n', (5197, 5249), True, 'import numpy as np\n'), ((5280, 5340), 'numpy.power', 'np.power', (['(rotated_polys_np[:, 3] - rotated_polys_np[:, 5])', '(2)'], {}), '(rotated_polys_np[:, 3] - rotated_polys_np[:, 5], 2)\n', (5288, 5340), True, 'import numpy as np\n'), ((5440, 5453), 'numpy.float32', 'np.float32', (['h'], {}), '(h)\n', (5450, 5453), True, 'import numpy as np\n'), ((5597, 5620), 'numpy.array', 'np.array', (['rotated_masks'], {}), '(rotated_masks)\n', (5605, 5620), True, 'import numpy as np\n'), ((7901, 7916), 'numpy.round', 'np.round', (['new_w'], {}), '(new_w)\n', (7909, 7916), True, 'import numpy as np\n'), ((7938, 7953), 'numpy.round', 'np.round', (['new_h'], {}), '(new_h)\n', (7946, 7953), True, 'import numpy as np\n'), ((6574, 6608), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (6582, 6608), True, 'import numpy as np\n')] |
import random
import time
from copy import deepcopy
from functools import total_ordering
from multiprocessing.queues import Queue
from queue import PriorityQueue
import numpy as np
import math
from scipy.linalg import cholesky, cho_solve, solve_triangular, LinAlgError
from scipy.optimize import linear_sum_assignment
from sklearn.metrics.pairwise import rbf_kernel
from autokeras.constant import Constant
from autokeras.net_transformer import transform
from autokeras.nn.layers import is_layer, LayerType
def layer_distance(a, b):
"""The distance between two layers."""
if type(a) != type(b):
return 1.0
if is_layer(a, LayerType.CONV):
att_diff = [(a.filters, b.filters),
(a.kernel_size, b.kernel_size),
(a.stride, b.stride)]
return attribute_difference(att_diff)
if is_layer(a, LayerType.POOL):
att_diff = [(a.padding, b.padding),
(a.kernel_size, b.kernel_size),
(a.stride, b.stride)]
return attribute_difference(att_diff)
return 0.0
def attribute_difference(att_diff):
ret = 0
for a_value, b_value in att_diff:
if max(a_value, b_value) == 0:
ret += 0
else:
ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value)
return ret * 1.0 / len(att_diff)
def layers_distance(list_a, list_b):
"""The distance between the layers of two neural networks."""
len_a = len(list_a)
len_b = len(list_b)
f = np.zeros((len_a + 1, len_b + 1))
f[-1][-1] = 0
for i in range(-1, len_a):
f[i][-1] = i + 1
for j in range(-1, len_b):
f[-1][j] = j + 1
for i in range(len_a):
for j in range(len_b):
f[i][j] = min(f[i][j - 1] + 1, f[i - 1][j] + 1, f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]))
return f[len_a - 1][len_b - 1]
def skip_connection_distance(a, b):
"""The distance between two skip-connections."""
if a[2] != b[2]:
return 1.0
len_a = abs(a[1] - a[0])
len_b = abs(b[1] - b[0])
return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b))
def skip_connections_distance(list_a, list_b):
"""The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b)))
for i, a in enumerate(list_a):
for j, b in enumerate(list_b):
distance_matrix[i][j] = skip_connection_distance(a, b)
return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(len(list_a) - len(list_b))
def edit_distance(x, y):
"""The distance between two neural networks.
Args:
x: An instance of NetworkDescriptor.
y: An instance of NetworkDescriptor
Returns:
The edit-distance between x and y.
"""
ret = layers_distance(x.layers, y.layers)
ret += Constant.KERNEL_LAMBDA * skip_connections_distance(x.skip_connections, y.skip_connections)
return ret
class IncrementalGaussianProcess:
"""Gaussian process regressor.
Attributes:
alpha: A hyperparameter.
"""
def __init__(self):
self.alpha = 1e-10
self._distance_matrix = None
self._x = None
self._y = None
self._first_fitted = False
self._l_matrix = None
self._alpha_vector = None
@property
def kernel_matrix(self):
return self._distance_matrix
def fit(self, train_x, train_y):
""" Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of metric values.
"""
if self.first_fitted:
self.incremental_fit(train_x, train_y)
else:
self.first_fit(train_x, train_y)
def incremental_fit(self, train_x, train_y):
""" Incrementally fit the regressor. """
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# Incrementally compute K
up_right_k = edit_distance_matrix(self._x, train_x)
down_left_k = np.transpose(up_right_k)
down_right_k = edit_distance_matrix(train_x)
up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
down_k = np.concatenate((down_left_k, down_right_k), axis=1)
temp_distance_matrix = np.concatenate((up_k, down_k), axis=0)
k_matrix = bourgain_embedding_matrix(temp_distance_matrix)
diagonal = np.diag_indices_from(k_matrix)
diagonal = (diagonal[0][-len(train_x):], diagonal[1][-len(train_x):])
k_matrix[diagonal] += self.alpha
try:
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
except LinAlgError:
return self
self._x = np.concatenate((self._x, train_x), axis=0)
self._y = np.concatenate((self._y, train_y), axis=0)
self._distance_matrix = temp_distance_matrix
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
return self
@property
def first_fitted(self):
return self._first_fitted
def first_fit(self, train_x, train_y):
""" Fit the regressor for the first time. """
train_x, train_y = np.array(train_x), np.array(train_y)
self._x = np.copy(train_x)
self._y = np.copy(train_y)
self._distance_matrix = edit_distance_matrix(self._x)
k_matrix = bourgain_embedding_matrix(self._distance_matrix)
k_matrix[np.diag_indices_from(k_matrix)] += self.alpha
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
self._first_fitted = True
return self
def predict(self, train_x):
"""Predict the result.
Args:
train_x: A list of NetworkDescriptor.
Returns:
y_mean: The predicted mean.
y_std: The predicted standard deviation.
"""
k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2))
y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star)
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
l_inv = solve_triangular(self._l_matrix.T, np.eye(self._l_matrix.shape[0]))
k_inv = l_inv.dot(l_inv.T)
# Compute variance of predictive distribution
y_var = np.ones(len(train_x), dtype=np.float)
y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
def edit_distance_matrix(train_x, train_y=None):
"""Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix.
"""
if train_y is None:
ret = np.zeros((train_x.shape[0], train_x.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_x):
if x_index == y_index:
ret[x_index][y_index] = 0
elif x_index < y_index:
ret[x_index][y_index] = edit_distance(x, y)
else:
ret[x_index][y_index] = ret[y_index][x_index]
return ret
ret = np.zeros((train_x.shape[0], train_y.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_y):
ret[x_index][y_index] = edit_distance(x, y)
return ret
def vector_distance(a, b):
"""The Euclidean distance between two vectors."""
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b)
def bourgain_embedding_matrix(distance_matrix):
"""Use Bourgain algorithm to embed the neural architectures based on their edit-distance.
Args:
distance_matrix: A matrix of edit-distances.
Returns:
A matrix of distances after embedding.
"""
distance_matrix = np.array(distance_matrix)
n = len(distance_matrix)
if n == 1:
return distance_matrix
np.random.seed(123)
distort_elements = []
r = range(n)
k = int(math.ceil(math.log(n) / math.log(2) - 1))
t = int(math.ceil(math.log(n)))
counter = 0
for i in range(0, k + 1):
for t in range(t):
s = np.random.choice(r, 2 ** i)
for j in r:
d = min([distance_matrix[j][s] for s in s])
counter += len(s)
if i == 0 and t == 0:
distort_elements.append([d])
else:
distort_elements[j].append(d)
return rbf_kernel(distort_elements, distort_elements)
class BayesianOptimizer:
""" A Bayesian optimizer for neural architectures.
Attributes:
searcher: The Searcher which is calling the Bayesian optimizer.
t_min: The minimum temperature for simulated annealing.
metric: An instance of the Metric subclasses.
gpr: A GaussianProcessRegressor for bayesian optimization.
beta: The beta in acquisition function. (refer to our paper)
search_tree: The network morphism search tree.
"""
def __init__(self, searcher, t_min, metric, beta=None):
self.searcher = searcher
self.t_min = t_min
self.metric = metric
self.gpr = IncrementalGaussianProcess()
self.beta = beta if beta is not None else Constant.BETA
self.search_tree = SearchTree()
def fit(self, x_queue, y_queue):
""" Fit the optimizer with new architectures and performances.
Args:
x_queue: A list of NetworkDescriptor.
y_queue: A list of metric values.
"""
self.gpr.fit(x_queue, y_queue)
def generate(self, descriptors, timeout, sync_message=None):
"""Generate new architecture.
Args:
descriptors: All the searched neural architectures.
timeout: An integer. The time limit in seconds.
sync_message: the Queue for multiprocessing return value.
Returns:
graph: An instance of Graph. A morphed neural network with weights.
father_id: The father node ID in the search tree.
"""
model_ids = self.search_tree.adj_list.keys()
start_time = time.time()
target_graph = None
father_id = None
descriptors = deepcopy(descriptors)
elem_class = Elem
if self.metric.higher_better():
elem_class = ReverseElem
# Initialize the priority queue.
pq = PriorityQueue()
temp_list = []
for model_id in model_ids:
metric_value = self.searcher.get_metric_value_by_id(model_id)
temp_list.append((metric_value, model_id))
temp_list = sorted(temp_list)
for metric_value, model_id in temp_list:
graph = self.searcher.load_model_by_id(model_id)
graph.clear_operation_history()
graph.clear_weights()
pq.put(elem_class(metric_value, model_id, graph))
t = 1.0
t_min = self.t_min
alpha = 0.9
opt_acq = self._get_init_opt_acq_value()
remaining_time = timeout
while not pq.empty() and remaining_time > 0 and t > t_min:
if isinstance(sync_message, Queue) and sync_message.qsize() != 0:
break
elem = pq.get()
if self.metric.higher_better():
temp_exp = min((elem.metric_value - opt_acq) / t, 1.0)
else:
temp_exp = min((opt_acq - elem.metric_value) / t, 1.0)
ap = math.exp(temp_exp)
if ap >= random.uniform(0, 1):
for temp_graph in transform(elem.graph):
if contain(descriptors, temp_graph.extract_descriptor()):
continue
temp_acq_value = self.acq(temp_graph)
pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph))
descriptors.append(temp_graph.extract_descriptor())
if self._accept_new_acq_value(opt_acq, temp_acq_value):
opt_acq = temp_acq_value
father_id = elem.father_id
target_graph = deepcopy(temp_graph)
t *= alpha
remaining_time = timeout - (time.time() - start_time)
if remaining_time < 0:
raise TimeoutError
# Did not found a not duplicated architecture
if father_id is None:
return None, None
nm_graph = self.searcher.load_model_by_id(father_id)
for args in target_graph.operation_history:
getattr(nm_graph, args[0])(*list(args[1:]))
return nm_graph, father_id
def acq(self, graph):
mean, std = self.gpr.predict(np.array([graph.extract_descriptor()]))
if self.metric.higher_better():
return mean + self.beta * std
return mean - self.beta * std
def _get_init_opt_acq_value(self):
if self.metric.higher_better():
return -np.inf
return np.inf
def _accept_new_acq_value(self, opt_acq, temp_acq_value):
if temp_acq_value > opt_acq and self.metric.higher_better():
return True
if temp_acq_value < opt_acq and not self.metric.higher_better():
return True
return False
def add_child(self, father_id, model_id):
self.search_tree.add_child(father_id, model_id)
@total_ordering
class Elem:
"""Elements to be sorted according to metric value."""
def __init__(self, metric_value, father_id, graph):
self.father_id = father_id
self.graph = graph
self.metric_value = metric_value
def __eq__(self, other):
return self.metric_value == other.metric_value
def __lt__(self, other):
return self.metric_value < other.metric_value
class ReverseElem(Elem):
"""Elements to be reversely sorted according to metric value."""
def __lt__(self, other):
return self.metric_value > other.metric_value
def contain(descriptors, target_descriptor):
"""Check if the target descriptor is in the descriptors."""
for descriptor in descriptors:
if edit_distance(descriptor, target_descriptor) < 1e-5:
return True
return False
class SearchTree:
"""The network morphism search tree."""
def __init__(self):
self.root = None
self.adj_list = {}
def add_child(self, u, v):
if u == -1:
self.root = v
self.adj_list[v] = []
return
if v not in self.adj_list[u]:
self.adj_list[u].append(v)
if v not in self.adj_list:
self.adj_list[v] = []
def get_dict(self, u=None):
""" A recursive function to return the content of the tree in a dict."""
if u is None:
return self.get_dict(self.root)
children = []
for v in self.adj_list[u]:
children.append(self.get_dict(v))
ret = {'name': u, 'children': children}
return ret
| [
"numpy.sqrt",
"sklearn.metrics.pairwise.rbf_kernel",
"math.log",
"autokeras.net_transformer.transform",
"numpy.array",
"scipy.linalg.cholesky",
"copy.deepcopy",
"numpy.linalg.norm",
"math.exp",
"numpy.diag_indices_from",
"scipy.linalg.cho_solve",
"scipy.optimize.linear_sum_assignment",
"nump... | [((632, 659), 'autokeras.nn.layers.is_layer', 'is_layer', (['a', 'LayerType.CONV'], {}), '(a, LayerType.CONV)\n', (640, 659), False, 'from autokeras.nn.layers import is_layer, LayerType\n'), ((852, 879), 'autokeras.nn.layers.is_layer', 'is_layer', (['a', 'LayerType.POOL'], {}), '(a, LayerType.POOL)\n', (860, 879), False, 'from autokeras.nn.layers import is_layer, LayerType\n'), ((1512, 1544), 'numpy.zeros', 'np.zeros', (['(len_a + 1, len_b + 1)'], {}), '((len_a + 1, len_b + 1))\n', (1520, 1544), True, 'import numpy as np\n'), ((7666, 7712), 'numpy.zeros', 'np.zeros', (['(train_x.shape[0], train_y.shape[0])'], {}), '((train_x.shape[0], train_y.shape[0]))\n', (7674, 7712), True, 'import numpy as np\n'), ((7963, 7974), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (7971, 7974), True, 'import numpy as np\n'), ((7983, 7994), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (7991, 7994), True, 'import numpy as np\n'), ((8006, 8027), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (8020, 8027), True, 'import numpy as np\n'), ((8328, 8353), 'numpy.array', 'np.array', (['distance_matrix'], {}), '(distance_matrix)\n', (8336, 8353), True, 'import numpy as np\n'), ((8433, 8452), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8447, 8452), True, 'import numpy as np\n'), ((8991, 9037), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_kernel', (['distort_elements', 'distort_elements'], {}), '(distort_elements, distort_elements)\n', (9001, 9037), False, 'from sklearn.metrics.pairwise import rbf_kernel\n'), ((4188, 4212), 'numpy.transpose', 'np.transpose', (['up_right_k'], {}), '(up_right_k)\n', (4200, 4212), True, 'import numpy as np\n'), ((4281, 4340), 'numpy.concatenate', 'np.concatenate', (['(self._distance_matrix, up_right_k)'], {'axis': '(1)'}), '((self._distance_matrix, up_right_k), axis=1)\n', (4295, 4340), True, 'import numpy as np\n'), ((4358, 4409), 'numpy.concatenate', 'np.concatenate', (['(down_left_k, down_right_k)'], {'axis': '(1)'}), '((down_left_k, down_right_k), axis=1)\n', (4372, 4409), True, 'import numpy as np\n'), ((4441, 4479), 'numpy.concatenate', 'np.concatenate', (['(up_k, down_k)'], {'axis': '(0)'}), '((up_k, down_k), axis=0)\n', (4455, 4479), True, 'import numpy as np\n'), ((4566, 4596), 'numpy.diag_indices_from', 'np.diag_indices_from', (['k_matrix'], {}), '(k_matrix)\n', (4586, 4596), True, 'import numpy as np\n'), ((4871, 4913), 'numpy.concatenate', 'np.concatenate', (['(self._x, train_x)'], {'axis': '(0)'}), '((self._x, train_x), axis=0)\n', (4885, 4913), True, 'import numpy as np\n'), ((4932, 4974), 'numpy.concatenate', 'np.concatenate', (['(self._y, train_y)'], {'axis': '(0)'}), '((self._y, train_y), axis=0)\n', (4946, 4974), True, 'import numpy as np\n'), ((5058, 5100), 'scipy.linalg.cho_solve', 'cho_solve', (['(self._l_matrix, True)', 'self._y'], {}), '((self._l_matrix, True), self._y)\n', (5067, 5100), False, 'from scipy.linalg import cholesky, cho_solve, solve_triangular, LinAlgError\n'), ((5390, 5406), 'numpy.copy', 'np.copy', (['train_x'], {}), '(train_x)\n', (5397, 5406), True, 'import numpy as np\n'), ((5425, 5441), 'numpy.copy', 'np.copy', (['train_y'], {}), '(train_y)\n', (5432, 5441), True, 'import numpy as np\n'), ((5662, 5692), 'scipy.linalg.cholesky', 'cholesky', (['k_matrix'], {'lower': '(True)'}), '(k_matrix, lower=True)\n', (5670, 5692), False, 'from scipy.linalg import cholesky, cho_solve, solve_triangular, LinAlgError\n'), ((5733, 5775), 'scipy.linalg.cho_solve', 'cho_solve', (['(self._l_matrix, True)', 'self._y'], {}), '((self._l_matrix, True), self._y)\n', (5742, 5775), False, 'from scipy.linalg import cholesky, cho_solve, solve_triangular, LinAlgError\n'), ((6826, 6848), 'numpy.any', 'np.any', (['y_var_negative'], {}), '(y_var_negative)\n', (6832, 6848), True, 'import numpy as np\n'), ((7217, 7263), 'numpy.zeros', 'np.zeros', (['(train_x.shape[0], train_x.shape[0])'], {}), '((train_x.shape[0], train_x.shape[0]))\n', (7225, 7263), True, 'import numpy as np\n'), ((10659, 10670), 'time.time', 'time.time', ([], {}), '()\n', (10668, 10670), False, 'import time\n'), ((10746, 10767), 'copy.deepcopy', 'deepcopy', (['descriptors'], {}), '(descriptors)\n', (10754, 10767), False, 'from copy import deepcopy\n'), ((10926, 10941), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (10939, 10941), False, 'from queue import PriorityQueue\n'), ((4034, 4051), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (4042, 4051), True, 'import numpy as np\n'), ((4053, 4070), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (4061, 4070), True, 'import numpy as np\n'), ((4759, 4789), 'scipy.linalg.cholesky', 'cholesky', (['k_matrix'], {'lower': '(True)'}), '(k_matrix, lower=True)\n', (4767, 4789), False, 'from scipy.linalg import cholesky, cho_solve, solve_triangular, LinAlgError\n'), ((5334, 5351), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (5342, 5351), True, 'import numpy as np\n'), ((5353, 5370), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (5361, 5370), True, 'import numpy as np\n'), ((5590, 5620), 'numpy.diag_indices_from', 'np.diag_indices_from', (['k_matrix'], {}), '(k_matrix)\n', (5610, 5620), True, 'import numpy as np\n'), ((6409, 6440), 'numpy.eye', 'np.eye', (['self._l_matrix.shape[0]'], {}), '(self._l_matrix.shape[0])\n', (6415, 6440), True, 'import numpy as np\n'), ((6624, 6646), 'numpy.dot', 'np.dot', (['k_trans', 'k_inv'], {}), '(k_trans, k_inv)\n', (6630, 6646), True, 'import numpy as np\n'), ((6913, 6927), 'numpy.sqrt', 'np.sqrt', (['y_var'], {}), '(y_var)\n', (6920, 6927), True, 'import numpy as np\n'), ((8572, 8583), 'math.log', 'math.log', (['n'], {}), '(n)\n', (8580, 8583), False, 'import math\n'), ((8675, 8702), 'numpy.random.choice', 'np.random.choice', (['r', '(2 ** i)'], {}), '(r, 2 ** i)\n', (8691, 8702), True, 'import numpy as np\n'), ((11979, 11997), 'math.exp', 'math.exp', (['temp_exp'], {}), '(temp_exp)\n', (11987, 11997), False, 'import math\n'), ((12019, 12039), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (12033, 12039), False, 'import random\n'), ((12075, 12096), 'autokeras.net_transformer.transform', 'transform', (['elem.graph'], {}), '(elem.graph)\n', (12084, 12096), False, 'from autokeras.net_transformer import transform\n'), ((2516, 2554), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['distance_matrix'], {}), '(distance_matrix)\n', (2537, 2554), False, 'from scipy.optimize import linear_sum_assignment\n'), ((8518, 8529), 'math.log', 'math.log', (['n'], {}), '(n)\n', (8526, 8529), False, 'import math\n'), ((8532, 8543), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (8540, 8543), False, 'import math\n'), ((12722, 12733), 'time.time', 'time.time', ([], {}), '()\n', (12731, 12733), False, 'import time\n'), ((12638, 12658), 'copy.deepcopy', 'deepcopy', (['temp_graph'], {}), '(temp_graph)\n', (12646, 12658), False, 'from copy import deepcopy\n')] |
from sympy import ( symbols, solve, diff, integrate, exp, sqrt, lambdify, pprint )
import matplotlib.pyplot as plt
import numpy as np
# The difference between the true value of an integral and the value given by the trapezoidal rule or Simpson's rule is known as the error.
# In numerical analysis, the error is studied to determine how large n must be for the error to be smaller than some specified amount.
#
# For both rules, the error is inversely proportional to a power of n, the number of subdivisions.
# In other words, the error is roughly StartFraction k / n^p
# where k is a constant that depends on the function and the interval, and p is a power that depends only on the method used.
# With a little experimentation, you can find out what the power p is for the trapezoidal rule and for Simpson's rule.
def simpsons_rule( f, a, b, n ):
area = 0
steps = np.linspace( a, b, n + 1, endpoint = True )
for i in range( 0, n + 1 ):
v = f.subs( { x: steps[ i ] } )
if i == 0 or i == n:
area += v
elif i % 2 == 0:
area += 2 * v
else:
area += 4 * v
return area * ( b - a ) / ( 3*n )
# Function
x = symbols( 'x' )
F = 11*x**4
# What does the petal distribution look like?
g_xlim = [ -.01, 2 ]
lam_p = lambdify( x, F, np )
x_vals = np.linspace( g_xlim[0], g_xlim[1], 1000, endpoint=True )
y_vals = lam_p( x_vals )
plt.plot( x_vals, y_vals )
x_min, x_max = 0, 1
plt.vlines( x = x_min, ymin = 0, ymax = F.subs( { x: x_min } ), color = 'Black', zorder = 1 )
plt.vlines( x = x_max, ymin = 0, ymax = F.subs( { x: x_max } ), color = 'Red', zorder = 1 )
plt.show()
# Find the exact value of
a, b = 0, 1
area = integrate( F, ( x, a, b ) ).evalf()
area
# Approximate the integral in part a using Simpson's rule with n = 4, 8, 16, and 32
n = 4
n4 = round( simpsons_rule( F, a, b, n ), 7 )
n4
e4 = round( abs( area - n4 ), 7 )
'{:.7f}'.format( abs( n4 - area ) )
n = 8
n8 = round( simpsons_rule( F, a, b, n ), 7 )
n8
e8 = abs( area - n8 )
'{:.7f}'.format( abs( n8 - area ) )
n = 16
n16 = round( simpsons_rule( F, a, b, n ), 7 )
n16
e16 = abs( n16 - area )
'{:.7f}'.format( e16 )
n = 32
n32 = round( simpsons_rule( F, a, b, n ), 7 )
n32
e32 = abs( n32 - area )
'{:.7f}'.format( e32 )
# c
# If the error is k/n**p, then the error times n Superscript p should be approximately a constant. Multiply the errors in part b times n**p for p=1, 2, etc.,
# until you find a power p yielding approximately the same answer for all four values of n.
p = 4
e4 * 4 ** p
e8 * 8 ** p
e16 * 16 ** p
e32 * 32 ** p | [
"sympy.lambdify",
"matplotlib.pyplot.plot",
"sympy.symbols",
"numpy.linspace",
"sympy.integrate",
"matplotlib.pyplot.show"
] | [((1145, 1157), 'sympy.symbols', 'symbols', (['"""x"""'], {}), "('x')\n", (1152, 1157), False, 'from sympy import symbols, solve, diff, integrate, exp, sqrt, lambdify, pprint\n'), ((1250, 1268), 'sympy.lambdify', 'lambdify', (['x', 'F', 'np'], {}), '(x, F, np)\n', (1258, 1268), False, 'from sympy import symbols, solve, diff, integrate, exp, sqrt, lambdify, pprint\n'), ((1280, 1334), 'numpy.linspace', 'np.linspace', (['g_xlim[0]', 'g_xlim[1]', '(1000)'], {'endpoint': '(True)'}), '(g_xlim[0], g_xlim[1], 1000, endpoint=True)\n', (1291, 1334), True, 'import numpy as np\n'), ((1362, 1386), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (1370, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1606, 1608), True, 'import matplotlib.pyplot as plt\n'), ((885, 924), 'numpy.linspace', 'np.linspace', (['a', 'b', '(n + 1)'], {'endpoint': '(True)'}), '(a, b, n + 1, endpoint=True)\n', (896, 924), True, 'import numpy as np\n'), ((1656, 1679), 'sympy.integrate', 'integrate', (['F', '(x, a, b)'], {}), '(F, (x, a, b))\n', (1665, 1679), False, 'from sympy import symbols, solve, diff, integrate, exp, sqrt, lambdify, pprint\n')] |
import numpy as np
import numpy.testing as npt
import unittest
import sys
import os
import torch
import torch.legacy.nn as nn
from _test_utils import _INPUT_SHAPE
sys.path.append(
os.path.dirname(os.path.realpath(__file__)) + "/../torch2coreml/"
)
class SingleLayerTest(unittest.TestCase):
def setUp(self):
self.input = np.random.ranf(_INPUT_SHAPE)
self.torch_batch_mode = True
self.output_count = 1
def _forward_torch(self, torch_model):
if isinstance(self.input, list):
inputs = [
torch.from_numpy(
np.asarray([inp] if self.torch_batch_mode else inp)
).float()
for inp in self.input
]
result = torch_model.forward(inputs)
else:
input_tensor = torch.from_numpy(
np.asarray(
[self.input] if self.torch_batch_mode else self.input
)
).float()
result = torch_model.forward(input_tensor)
if isinstance(result, list):
return [
(r.numpy()[0] if self.torch_batch_mode else r.numpy())
for r in result
]
else:
r = result.numpy()
return r[0] if self.torch_batch_mode else r
def _forward_coreml(self, torch_model):
from _torch_converter import convert
output_names = ['output']
if self.output_count > 1:
output_names = [
'output_' + str(i)
for i in range(self.output_count)
]
if isinstance(self.input, list):
input_shapes = [inp.shape for inp in self.input]
input_names = ['input_' + str(i) for i in range(len(self.input))]
coreml_model = convert(
torch_model,
input_shapes,
input_names=input_names,
output_names=output_names
)
result = coreml_model.predict(
dict(zip(input_names, self.input)), useCPUOnly=True
)
else:
coreml_model = convert(
torch_model,
[self.input.shape],
output_names=output_names
)
result = coreml_model.predict(
{'input': self.input}, useCPUOnly=True
)
if self.output_count > 1:
return [result[name] for name in output_names]
else:
return result['output']
def _assert_outputs(self, torch_output, coreml_output, decimal):
if isinstance(torch_output, list):
self.assertTrue(isinstance(coreml_output, list))
self.assertEqual(len(torch_output), len(coreml_output))
for i in range(len(torch_output)):
tout = torch_output[i]
cout = coreml_output[i]
self.assertEqual(tout.shape, cout.shape)
npt.assert_almost_equal(cout, tout, decimal=decimal)
else:
self.assertEqual(torch_output.shape, coreml_output.shape)
npt.assert_almost_equal(
coreml_output, torch_output, decimal=decimal
)
def _test_single_layer(self, layer, decimal=7):
torch_model = nn.Sequential()
torch_model.add(layer)
coreml_output = self._forward_coreml(torch_model)
if not isinstance(coreml_output, list):
coreml_output = coreml_output.copy()
# XXX: pytorch legacy.nn has problem with state clearing, so we need to
# do it manually
for l in torch_model.modules:
if isinstance(l.output, torch.Tensor):
l.output = l.output.new()
torch_output = self._forward_torch(torch_model)
if not isinstance(torch_output, list):
torch_output = torch_output.copy()
self._assert_outputs(torch_output, coreml_output, decimal)
def test_elu(self):
self._test_single_layer(nn.ELU())
def test_relu(self):
self._test_single_layer(nn.ReLU())
def test_softmax(self):
self._test_single_layer(nn.SoftMax())
self._test_single_layer(nn.SpatialSoftMax())
def test_convolution(self):
self._test_single_layer(
nn.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3),
decimal=6
)
def test_max_pooling(self):
self._test_single_layer(nn.SpatialMaxPooling(3, 3, 1, 1, 1, 1))
def test_avg_pooling(self):
self._test_single_layer(
nn.SpatialAveragePooling(5, 5, 1, 1, 2, 2),
decimal=6
)
def test_linear(self):
self.input = self.input.flatten()
input_size = self.input.shape[0]
self._test_single_layer(nn.Linear(input_size, 3, True), decimal=5)
def test_tanh(self):
self._test_single_layer(nn.Tanh())
def test_mul_constant(self):
self._test_single_layer(nn.MulConstant(3.0))
def test_zero_padding(self):
self._test_single_layer(nn.SpatialZeroPadding(1, 2, 3, 4))
self._test_single_layer(nn.SpatialZeroPadding(-2, -2, -2, -2))
def test_full_convolution(self):
self._test_single_layer(
nn.SpatialFullConvolution(3, 1, 7, 7, 5, 5, 2, 2, 2, 2)
)
def test_batch_norm(self):
self._test_single_layer(nn.SpatialBatchNormalization(3))
def test_narrow(self):
self.torch_batch_mode = False
self._test_single_layer(nn.Narrow(1, 1, 1))
def test_reflection_padding(self):
self._test_single_layer(nn.SpatialReflectionPadding(1, 2, 3, 4))
def test_upsample_nearest(self):
self._test_single_layer(nn.SpatialUpSamplingNearest(2))
def test_cadd_table(self):
self.input = [self.input] * 5
self._test_single_layer(nn.CAddTable())
def test_split_table(self):
self.output_count = 3
self.torch_batch_mode = False
self._test_single_layer(nn.SplitTable(0))
def test_sigmoid(self):
self._test_single_layer(nn.Sigmoid())
def test_power(self):
self._test_single_layer(nn.Power(2))
| [
"torch.legacy.nn.SpatialUpSamplingNearest",
"torch.legacy.nn.SpatialMaxPooling",
"torch.legacy.nn.SpatialFullConvolution",
"torch.legacy.nn.SpatialReflectionPadding",
"torch.legacy.nn.SpatialConvolution",
"torch.legacy.nn.Sigmoid",
"torch.legacy.nn.SpatialSoftMax",
"torch.legacy.nn.Tanh",
"_torch_co... | [((341, 369), 'numpy.random.ranf', 'np.random.ranf', (['_INPUT_SHAPE'], {}), '(_INPUT_SHAPE)\n', (355, 369), True, 'import numpy as np\n'), ((3285, 3300), 'torch.legacy.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (3298, 3300), True, 'import torch.legacy.nn as nn\n'), ((203, 229), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (219, 229), False, 'import os\n'), ((1804, 1895), '_torch_converter.convert', 'convert', (['torch_model', 'input_shapes'], {'input_names': 'input_names', 'output_names': 'output_names'}), '(torch_model, input_shapes, input_names=input_names, output_names=\n output_names)\n', (1811, 1895), False, 'from _torch_converter import convert\n'), ((2135, 2202), '_torch_converter.convert', 'convert', (['torch_model', '[self.input.shape]'], {'output_names': 'output_names'}), '(torch_model, [self.input.shape], output_names=output_names)\n', (2142, 2202), False, 'from _torch_converter import convert\n'), ((3110, 3179), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['coreml_output', 'torch_output'], {'decimal': 'decimal'}), '(coreml_output, torch_output, decimal=decimal)\n', (3133, 3179), True, 'import numpy.testing as npt\n'), ((4006, 4014), 'torch.legacy.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4012, 4014), True, 'import torch.legacy.nn as nn\n'), ((4074, 4083), 'torch.legacy.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4081, 4083), True, 'import torch.legacy.nn as nn\n'), ((4146, 4158), 'torch.legacy.nn.SoftMax', 'nn.SoftMax', ([], {}), '()\n', (4156, 4158), True, 'import torch.legacy.nn as nn\n'), ((4192, 4211), 'torch.legacy.nn.SpatialSoftMax', 'nn.SpatialSoftMax', ([], {}), '()\n', (4209, 4211), True, 'import torch.legacy.nn as nn\n'), ((4291, 4337), 'torch.legacy.nn.SpatialConvolution', 'nn.SpatialConvolution', (['(3)', '(64)', '(7)', '(7)', '(2)', '(2)', '(3)', '(3)'], {}), '(3, 64, 7, 7, 2, 2, 3, 3)\n', (4312, 4337), True, 'import torch.legacy.nn as nn\n'), ((4436, 4474), 'torch.legacy.nn.SpatialMaxPooling', 'nn.SpatialMaxPooling', (['(3)', '(3)', '(1)', '(1)', '(1)', '(1)'], {}), '(3, 3, 1, 1, 1, 1)\n', (4456, 4474), True, 'import torch.legacy.nn as nn\n'), ((4554, 4596), 'torch.legacy.nn.SpatialAveragePooling', 'nn.SpatialAveragePooling', (['(5)', '(5)', '(1)', '(1)', '(2)', '(2)'], {}), '(5, 5, 1, 1, 2, 2)\n', (4578, 4596), True, 'import torch.legacy.nn as nn\n'), ((4773, 4803), 'torch.legacy.nn.Linear', 'nn.Linear', (['input_size', '(3)', '(True)'], {}), '(input_size, 3, True)\n', (4782, 4803), True, 'import torch.legacy.nn as nn\n'), ((4874, 4883), 'torch.legacy.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4881, 4883), True, 'import torch.legacy.nn as nn\n'), ((4951, 4970), 'torch.legacy.nn.MulConstant', 'nn.MulConstant', (['(3.0)'], {}), '(3.0)\n', (4965, 4970), True, 'import torch.legacy.nn as nn\n'), ((5038, 5071), 'torch.legacy.nn.SpatialZeroPadding', 'nn.SpatialZeroPadding', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (5059, 5071), True, 'import torch.legacy.nn as nn\n'), ((5105, 5142), 'torch.legacy.nn.SpatialZeroPadding', 'nn.SpatialZeroPadding', (['(-2)', '(-2)', '(-2)', '(-2)'], {}), '(-2, -2, -2, -2)\n', (5126, 5142), True, 'import torch.legacy.nn as nn\n'), ((5227, 5282), 'torch.legacy.nn.SpatialFullConvolution', 'nn.SpatialFullConvolution', (['(3)', '(1)', '(7)', '(7)', '(5)', '(5)', '(2)', '(2)', '(2)', '(2)'], {}), '(3, 1, 7, 7, 5, 5, 2, 2, 2, 2)\n', (5252, 5282), True, 'import torch.legacy.nn as nn\n'), ((5357, 5388), 'torch.legacy.nn.SpatialBatchNormalization', 'nn.SpatialBatchNormalization', (['(3)'], {}), '(3)\n', (5385, 5388), True, 'import torch.legacy.nn as nn\n'), ((5488, 5506), 'torch.legacy.nn.Narrow', 'nn.Narrow', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (5497, 5506), True, 'import torch.legacy.nn as nn\n'), ((5580, 5619), 'torch.legacy.nn.SpatialReflectionPadding', 'nn.SpatialReflectionPadding', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (5607, 5619), True, 'import torch.legacy.nn as nn\n'), ((5691, 5721), 'torch.legacy.nn.SpatialUpSamplingNearest', 'nn.SpatialUpSamplingNearest', (['(2)'], {}), '(2)\n', (5718, 5721), True, 'import torch.legacy.nn as nn\n'), ((5825, 5839), 'torch.legacy.nn.CAddTable', 'nn.CAddTable', ([], {}), '()\n', (5837, 5839), True, 'import torch.legacy.nn as nn\n'), ((5974, 5990), 'torch.legacy.nn.SplitTable', 'nn.SplitTable', (['(0)'], {}), '(0)\n', (5987, 5990), True, 'import torch.legacy.nn as nn\n'), ((6053, 6065), 'torch.legacy.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6063, 6065), True, 'import torch.legacy.nn as nn\n'), ((6126, 6137), 'torch.legacy.nn.Power', 'nn.Power', (['(2)'], {}), '(2)\n', (6134, 6137), True, 'import torch.legacy.nn as nn\n'), ((2961, 3013), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['cout', 'tout'], {'decimal': 'decimal'}), '(cout, tout, decimal=decimal)\n', (2984, 3013), True, 'import numpy.testing as npt\n'), ((853, 918), 'numpy.asarray', 'np.asarray', (['([self.input] if self.torch_batch_mode else self.input)'], {}), '([self.input] if self.torch_batch_mode else self.input)\n', (863, 918), True, 'import numpy as np\n'), ((599, 650), 'numpy.asarray', 'np.asarray', (['([inp] if self.torch_batch_mode else inp)'], {}), '([inp] if self.torch_batch_mode else inp)\n', (609, 650), True, 'import numpy as np\n')] |
import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8, nb.i8[:, :]), cache=True)
def solve(n: int, xy: np.ndarray) -> typing.NoReturn:
m = len(xy)
after = np.zeros(n, np.int64)
for i in range(m):
x, y = xy[i]
after[x] |= 1 << y
dp = np.zeros(1 << n, np.int64)
for i in range(n):
dp[1 << i] = 1
for s in range(1 << n):
for i in range(n):
if s >> i & 1:
continue
u = s | 1 << i
dp[u] += 0 if s & after[i] else dp[s]
print(dp[-1])
def main() -> typing.NoReturn:
n, m = map(int, input().split())
xy = (
np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2)
- 1
)
solve(n, xy)
main()
| [
"numpy.zeros",
"numba.njit",
"sys.stdin.read"
] | [((74, 115), 'numba.njit', 'nb.njit', (['(nb.i8, nb.i8[:, :])'], {'cache': '(True)'}), '((nb.i8, nb.i8[:, :]), cache=True)\n', (81, 115), True, 'import numba as nb\n'), ((201, 222), 'numpy.zeros', 'np.zeros', (['n', 'np.int64'], {}), '(n, np.int64)\n', (209, 222), True, 'import numpy as np\n'), ((309, 335), 'numpy.zeros', 'np.zeros', (['(1 << n)', 'np.int64'], {}), '(1 << n, np.int64)\n', (317, 335), True, 'import numpy as np\n'), ((713, 729), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (727, 729), False, 'import sys\n')] |
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ..utilities import Vec3
import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
def plot_turbines(ax, layout_x, layout_y, yaw_angles, D):
"""
Plot wind plant layout from turbine locations.
Args:
ax (:py:class:`matplotlib.pyplot.axes`): figure axes.
layout_x (np.array): wind turbine locations (east-west).
layout_y (np.array): wind turbine locations (north-south).
yaw_angles (np.array): yaw angles of each wind turbine.
D (float): wind turbine rotor diameter.
"""
for x, y, yaw in zip(layout_x, layout_y, yaw_angles):
R = D / 2.
x_0 = x + np.sin(np.deg2rad(yaw)) * R
x_1 = x - np.sin(np.deg2rad(yaw)) * R
y_0 = y - np.cos(np.deg2rad(yaw)) * R
y_1 = y + np.cos(np.deg2rad(yaw)) * R
ax.plot([x_0, x_1], [y_0, y_1], color='k')
def line_contour_cut_plane(cut_plane,
ax=None,
levels=None,
colors=None,
**kwargs):
"""
Visualize the scan as a simple contour.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
CutPlane Object.
ax (:py:class:`matplotlib.pyplot.axes`): figure axes. Defaults
to None.
levels (np.array, optional): contour levels for plot.
Defaults to None.
colors (list, optional): strings of color specification info.
Defaults to None.
"""
if not ax:
fig, ax = plt.subplots()
# Reshape UMesh internally
u_mesh = cut_plane.u_mesh.reshape(cut_plane.resolution[1],
cut_plane.resolution[0])
Zm = np.ma.masked_where(np.isnan(u_mesh), u_mesh)
rcParams['contour.negative_linestyle'] = 'solid'
# # Plot the cut-through
ax.contour(cut_plane.x1_lin,
cut_plane.x2_lin,
Zm,
levels=levels,
colors=colors,
**kwargs)
# Make equal axis
ax.set_aspect('equal')
def visualize_cut_plane(cut_plane,
ax=None,
minSpeed=None,
maxSpeed=None,
cmap='coolwarm'):
"""
Generate pseudocolor mesh plot of the scan.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`): 2D
plane through wind plant.
ax (:py:class:`matplotlib.pyplot.axes`): figure axes. Defaults
to None.
minSpeed (float, optional): Minimum value of wind speed for
contours. Defaults to None.
maxSpeed (float, optional): Maximum value of wind speed for
contours. Defaults to None.
cmap (str, optional): Colormap specifier. Defaults to
'coolwarm'.
Returns:
im (plt.pcolormesh): image handle
"""
if not ax:
fig, ax = plt.subplots()
if minSpeed is None:
minSpeed = cut_plane.u_mesh.min()
if maxSpeed is None:
maxSpeed = cut_plane.u_mesh.max()
# Reshape UMesh internally
u_mesh = cut_plane.u_mesh.reshape(cut_plane.resolution[1],
cut_plane.resolution[0])
Zm = np.ma.masked_where(np.isnan(u_mesh), u_mesh)
# Plot the cut-through
im = ax.pcolormesh(cut_plane.x1_lin,
cut_plane.x2_lin,
Zm,
cmap=cmap,
vmin=minSpeed,
vmax=maxSpeed)
# Add line contour
line_contour_cut_plane(cut_plane,
ax=ax,
levels=None,
colors='w',
linewidths=0.8,
alpha=0.3)
# Make equal axis
ax.set_aspect('equal')
# Return im
return im
def reverse_cut_plane_x_axis_in_plot(ax):
"""
Shortcut method to reverse direction of x-axis.
Args:
ax (:py:class:`matplotlib.pyplot.axes`): figure axes.
"""
ax.invert_xaxis() | [
"numpy.deg2rad",
"numpy.isnan",
"matplotlib.pyplot.subplots"
] | [((2105, 2119), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2117, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2322), 'numpy.isnan', 'np.isnan', (['u_mesh'], {}), '(u_mesh)\n', (2314, 2322), True, 'import numpy as np\n'), ((3494, 3508), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3506, 3508), True, 'import matplotlib.pyplot as plt\n'), ((3829, 3845), 'numpy.isnan', 'np.isnan', (['u_mesh'], {}), '(u_mesh)\n', (3837, 3845), True, 'import numpy as np\n'), ((1215, 1230), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1225, 1230), True, 'import numpy as np\n'), ((1261, 1276), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1271, 1276), True, 'import numpy as np\n'), ((1307, 1322), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1317, 1322), True, 'import numpy as np\n'), ((1353, 1368), 'numpy.deg2rad', 'np.deg2rad', (['yaw'], {}), '(yaw)\n', (1363, 1368), True, 'import numpy as np\n')] |
"""Manifold test invariants."""
import tensorflow as tf
import numpy as np
def random_constant(shape, dtype):
return tf.constant(
np.random.uniform(size=shape, high=1e-1),
dtype=dtype.as_numpy_dtype,
)
class TestInvariants(tf.test.TestCase):
def check_random(self, manifold, shape, dtype):
"""Check random point generator"""
with self.cached_session(use_gpu=True):
x = manifold.random(shape=shape, dtype=dtype)
self.assertEqual(list(shape), x.shape.as_list())
x_on_manifold = manifold.check_point_on_manifold(x)
if not tf.executing_eagerly():
x_on_manifold = self.evaluate(x_on_manifold)
self.assertTrue(x_on_manifold)
def check_dist(self, manifold, shape, dtype):
"""Check the distance axioms"""
with self.cached_session(use_gpu=True):
x_rand = random_constant(shape=shape, dtype=dtype)
x = manifold.projx(x_rand)
y_rand = random_constant(shape=shape, dtype=dtype)
y = manifold.projx(x_rand)
dist_xy = manifold.dist(x, y)
dist_yx = manifold.dist(y, x)
self.assertAllCloseAccordingToType(dist_xy, dist_yx)
dist_xx = manifold.dist(x, x)
# low precision comparison for manifolds which use trigonometric functions
self.assertAllClose(dist_xx, tf.zeros_like(dist_xx), atol=1e-3)
orig_shape = x.shape.as_list()
keepdims_shape = manifold.dist(x, x, keepdims=True).shape.as_list()
nokeepdims_shape = manifold.dist(
x, x, keepdims=False
).shape.as_list()
self.assertEqual(len(keepdims_shape), len(orig_shape))
self.assertEqual(
len(nokeepdims_shape), len(orig_shape) - manifold.ndims
)
if manifold.ndims > 0:
self.assertEqual(
keepdims_shape[-manifold.ndims :], [1] * manifold.ndims
)
def check_inner(self, manifold, shape, dtype):
"""Check the inner product axioms"""
with self.cached_session(use_gpu=True):
x_rand = random_constant(shape=shape, dtype=dtype)
x = manifold.projx(x_rand)
u = manifold.proju(x, x_rand)
orig_shape = u.shape.as_list()
keepdims_shape = manifold.inner(
x, u, u, keepdims=True
).shape.as_list()
nokeepdims_shape = manifold.inner(
x, u, u, keepdims=False
).shape.as_list()
self.assertEqual(len(keepdims_shape), len(orig_shape))
self.assertEqual(
len(nokeepdims_shape), len(orig_shape) - manifold.ndims
)
if manifold.ndims > 0:
self.assertEqual(
keepdims_shape[-manifold.ndims :], [1] * manifold.ndims
)
def check_proj(self, manifold, shape, dtype):
"""Check projection from the ambient space"""
with self.cached_session(use_gpu=True):
x_rand = random_constant(shape=shape, dtype=dtype)
x = manifold.projx(x_rand)
x_on_manifold = manifold.check_point_on_manifold(x)
if not tf.executing_eagerly():
x_on_manifold = self.evaluate(x_on_manifold)
self.assertTrue(x_on_manifold)
u_rand = random_constant(shape=shape, dtype=dtype)
u = manifold.proju(x, u_rand)
u_on_tangent = manifold.check_vector_on_tangent(x, u)
if not tf.executing_eagerly():
u_on_tangent = self.evaluate(u_on_tangent)
self.assertTrue(u_on_tangent)
def check_exp_log_inverse(self, manifold, shape, dtype):
"""Check that logarithmic map is the inverse of exponential map"""
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape, dtype))
u = manifold.proju(x, random_constant(shape, dtype))
y = manifold.exp(x, u)
y_on_manifold = manifold.check_point_on_manifold(y)
if not tf.executing_eagerly():
y_on_manifold = self.evaluate(y_on_manifold)
self.assertTrue(y_on_manifold)
v = manifold.log(x, y)
v_on_tangent = manifold.check_vector_on_tangent(x, v)
if not tf.executing_eagerly():
v_on_tangent = self.evaluate(v_on_tangent)
self.assertTrue(v_on_tangent)
self.assertAllCloseAccordingToType(u, v)
def check_transp_retr(self, manifold, shape, dtype):
"""Test that vector transport is compatible with retraction"""
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape, dtype))
u = manifold.proju(x, random_constant(shape, dtype))
y = manifold.retr(x, u)
y_on_manifold = manifold.check_point_on_manifold(y)
if not tf.executing_eagerly():
y_on_manifold = self.evaluate(y_on_manifold)
v = manifold.proju(x, random_constant(shape, dtype))
v_ = manifold.transp(x, y, v)
v_on_tangent = manifold.check_vector_on_tangent(y, v_)
if not tf.executing_eagerly():
v_on_tangent = self.evaluate(v_on_tangent)
self.assertTrue(v_on_tangent)
w = manifold.proju(x, random_constant(shape, dtype))
w_ = manifold.transp(x, y, w)
w_v = v + w
w_v_ = manifold.transp(x, y, w_v)
self.assertAllCloseAccordingToType(w_v_, w_ + v_)
def check_ptransp_inverse(self, manifold, shape, dtype):
"""Test that parallel transport is an invertible operation"""
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape, dtype))
y = manifold.projx(random_constant(shape, dtype))
u = manifold.proju(x, random_constant(shape, dtype))
v = manifold.ptransp(x, y, u)
v_on_tangent = manifold.check_vector_on_tangent(y, v)
if not tf.executing_eagerly():
v_on_tangent = self.evaluate(v_on_tangent)
self.assertTrue(v_on_tangent)
w = manifold.ptransp(y, x, v)
w_on_tangent = manifold.check_vector_on_tangent(x, w)
if not tf.executing_eagerly():
w_on_tangent = self.evaluate(w_on_tangent)
self.assertTrue(w_on_tangent)
self.assertAllCloseAccordingToType(u, w)
def check_ptransp_inner(self, manifold, shape, dtype):
"""Check that parallel transport preserves the inner product"""
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape, dtype))
y = manifold.projx(random_constant(shape, dtype))
u = manifold.proju(x, random_constant(shape, dtype))
v = manifold.proju(x, random_constant(shape, dtype))
uv = manifold.inner(x, u, v)
u_ = manifold.ptransp(x, y, u)
v_ = manifold.ptransp(x, y, v)
u_v_ = manifold.inner(y, u_, v_)
self.assertAllCloseAccordingToType(uv, u_v_)
def check_geodesic(self, manifold, shape, dtype):
"""Check that the exponential map lies on a geodesic"""
with self.cached_session(use_gpu=True):
x = manifold.projx(random_constant(shape=shape, dtype=dtype))
u = manifold.proju(x, random_constant(shape=shape, dtype=dtype))
y = manifold.geodesic(x, u, 1.0)
y_on_manifold = manifold.check_point_on_manifold(y)
if not tf.executing_eagerly():
y_on_manifold = self.evaluate(y_on_manifold)
self.assertTrue(y_on_manifold)
y_ = manifold.exp(x, u)
self.assertAllCloseAccordingToType(y, y_)
def check_pairmean(self, manifold, shape, dtype):
"""Check that the Riemannian mean is equidistant from points"""
with self.cached_session(use_gpu=True):
x_rand = random_constant(shape=shape, dtype=dtype)
x = manifold.projx(x_rand)
y_rand = random_constant(shape=shape, dtype=dtype)
y = manifold.projx(x_rand)
m = manifold.pairmean(x, y)
m_on_manifold = manifold.check_point_on_manifold(m)
if not tf.executing_eagerly():
m_on_manifold = self.evaluate(m_on_manifold)
self.assertTrue(m_on_manifold)
dist_x_m = manifold.dist(x, m)
dist_y_m = manifold.dist(y, m)
self.assertAllCloseAccordingToType(dist_x_m, dist_y_m)
| [
"tensorflow.zeros_like",
"tensorflow.executing_eagerly",
"numpy.random.uniform"
] | [((144, 183), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape', 'high': '(0.1)'}), '(size=shape, high=0.1)\n', (161, 183), True, 'import numpy as np\n'), ((615, 637), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (635, 637), True, 'import tensorflow as tf\n'), ((1409, 1431), 'tensorflow.zeros_like', 'tf.zeros_like', (['dist_xx'], {}), '(dist_xx)\n', (1422, 1431), True, 'import tensorflow as tf\n'), ((3273, 3295), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (3293, 3295), True, 'import tensorflow as tf\n'), ((3591, 3613), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (3611, 3613), True, 'import tensorflow as tf\n'), ((4146, 4168), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (4166, 4168), True, 'import tensorflow as tf\n'), ((4394, 4416), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (4414, 4416), True, 'import tensorflow as tf\n'), ((4995, 5017), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (5015, 5017), True, 'import tensorflow as tf\n'), ((5273, 5295), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (5293, 5295), True, 'import tensorflow as tf\n'), ((6133, 6155), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (6153, 6155), True, 'import tensorflow as tf\n'), ((6385, 6407), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (6405, 6407), True, 'import tensorflow as tf\n'), ((7672, 7694), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (7692, 7694), True, 'import tensorflow as tf\n'), ((8392, 8414), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (8412, 8414), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
import os
import psutil
import traceback
import h5py
import numpy as np
from buffalo.data import prepro
from buffalo.misc import aux, log
from buffalo.data.base import Data, DataOption
class MatrixMarketOptions(DataOption):
def get_default_option(self) -> aux.Option:
opt = {
'type': 'matrix_market',
'input': {
'main': '',
'uid': '', # if not set, row-id is used as userid.
'iid': '' # if not set, col-id is used as itemid.
},
'data': {
'internal_data_type': 'matrix',
'validation': {
'name': 'sample',
'p': 0.01,
'max_samples': 500
},
'batch_mb': 1024,
'use_cache': False,
'tmp_dir': '/tmp/',
'path': './mm.h5py'
}
}
return aux.Option(opt)
def is_valid_option(self, opt) -> bool:
assert super(MatrixMarketOptions, self).is_valid_option(opt)
if not opt['type'] == 'matrix_market':
raise RuntimeError('Invalid data type: %s' % opt['type'])
if opt['data']['internal_data_type'] != 'matrix':
raise RuntimeError('MatrixMarket only support internal data type(matrix)')
return True
class MatrixMarket(Data):
def __init__(self, opt, *args, **kwargs):
super().__init__(opt, *args, **kwargs)
self.name = 'MatrixMarket'
self.logger = log.get_logger('MatrixMarket')
if isinstance(self.value_prepro,
(prepro.SPPMI)):
raise RuntimeError(f'{self.opt.data.value_prepro.name} does not support MatrixMarket')
self.data_type = 'matrix'
def _create(self, data_path, P, H):
def get_max_column_length(fname):
with open(fname) as fin:
max_col = 0
for l in fin:
max_col = max(max_col, len(l))
return max_col
uid_path, iid_path, main_path = P['uid_path'], P['iid_path'], P['main_path']
num_users, num_items, num_nnz = map(int, H.split())
# Manually updating progress bar is a bit naive
with log.ProgressBar(log.DEBUG, total=5, mininterval=30) as pbar:
uid_max_col = len(str(num_users)) + 1
if uid_path:
uid_max_col = get_max_column_length(uid_path) + 1
pbar.update(1)
iid_max_col = len(str(num_items)) + 1
if iid_path:
iid_max_col = get_max_column_length(iid_path) + 1
pbar.update(1)
try:
db = self._create_database(data_path,
num_users=num_users,
num_items=num_items,
num_nnz=num_nnz,
uid_max_col=uid_max_col,
iid_max_col=iid_max_col)
idmap = db['idmap']
# if not given, assume id as is
if uid_path:
with open(uid_path) as fin:
idmap['rows'][:] = np.loadtxt(fin, dtype=f'S{uid_max_col}')
else:
idmap['rows'][:] = np.array([str(i) for i in range(1, num_users + 1)],
dtype=f'S{uid_max_col}')
pbar.update(1)
if iid_path:
with open(iid_path) as fin:
idmap['cols'][:] = np.loadtxt(fin, dtype=f'S{iid_max_col}')
else:
idmap['cols'][:] = np.array([str(i) for i in range(1, num_items + 1)],
dtype=f'S{iid_max_col}')
pbar.update(1)
num_header_lines = 0
with open(main_path) as fin:
for line in fin:
if line.strip().startswith('%'):
num_header_lines += 1
else:
break
pbar.update(1)
except Exception as e:
self.logger.error('Cannot create db: %s' % (str(e)))
self.logger.error(traceback.format_exc())
raise
return db, num_header_lines
def _create_working_data(self, db, source_path, ignore_lines):
"""
Args:
source_path: source data file path
ignore_lines: number of lines to skip from start line
"""
vali_indexes = [] if 'vali' not in db else db['vali']['indexes']
vali_lines = []
file_path = aux.get_temporary_file(self.opt.data.tmp_dir)
with open(file_path, 'w') as w:
fin = open(source_path, mode='r')
file_size = fin.seek(0, 2)
fin.seek(0, 0)
for _ in range(ignore_lines):
fin.readline()
total = file_size - fin.tell()
buffered = ''
CHUNK_SIZE = 4096 * 1000
total_lines = 0
vali_indexes = sorted(vali_indexes)
target_index = vali_indexes[0] if vali_indexes else -1
vali_indexes = vali_indexes[1:]
with log.ProgressBar(log.INFO, total=total, mininterval=10) as pbar:
while True:
buffered += fin.read(CHUNK_SIZE)
if buffered == '':
break
current_file_position = fin.tell()
pbar.update(CHUNK_SIZE)
num_lines_on_buffer = buffered.count('\n')
# search the position of validation sample and extract
# it from training data
while target_index >= 0 and target_index <= (total_lines + num_lines_on_buffer):
no_line = total_lines
new_buffered = ''
from_index = 0
for idx, c in enumerate(buffered):
if c == '\n':
if no_line == target_index:
vali_lines.append(buffered[from_index:idx])
if from_index > 0:
w.write(buffered[0:from_index])
new_buffered = buffered[idx + 1:]
no_line += 1
total_lines += 1
num_lines_on_buffer -= 1
break
no_line += 1
total_lines += 1
from_index = idx + 1
num_lines_on_buffer -= 1
buffered = new_buffered
if vali_indexes:
target_index, vali_indexes = vali_indexes[0], vali_indexes[1:]
else:
target_index = -1
where = buffered.rfind('\n')
total_lines += num_lines_on_buffer
if where != -1:
w.write(buffered[:where + 1])
buffered = buffered[where + 1:]
elif current_file_position == file_size:
w.write(buffered)
buffered = ''
w.close()
fin.close()
return w.name, vali_lines
def create(self) -> h5py.File:
mm_main_path = self.opt.input.main
mm_uid_path = self.opt.input.uid
mm_iid_path = self.opt.input.iid
data_path = self.opt.data.path
if os.path.isfile(data_path) and self.opt.data.use_cache:
self.logger.info('Use cached DB on %s' % data_path)
self.open(data_path)
return
self.logger.info('Create the database from matrix market file.')
with open(mm_main_path) as fin:
header = '%'
while header.startswith('%'):
header = fin.readline()
self.logger.debug('Building meta part...')
db, num_header_lines = self._create(data_path,
{'main_path': mm_main_path,
'uid_path': mm_uid_path,
'iid_path': mm_iid_path},
header)
try:
num_header_lines += 1 # add metaline
self.logger.info('Creating working data...')
tmp_main, validation_data = self._create_working_data(db,
mm_main_path,
num_header_lines)
self.logger.debug(f'Working data is created on {tmp_main}')
self.logger.info('Building data part...')
self._build_data(db, tmp_main, validation_data)
db.attrs['completed'] = 1
db.close()
self.handle = h5py.File(data_path, 'r')
self.path = data_path
except Exception as e:
self.logger.error('Cannot create db: %s' % (str(e)))
self.logger.error(traceback.format_exc().splitlines())
raise
finally:
if hasattr(self, 'patr'):
if os.path.isfile(self.path):
os.remove(self.path)
self.logger.info('DB built on %s' % data_path)
| [
"traceback.format_exc",
"buffalo.misc.log.get_logger",
"buffalo.misc.log.ProgressBar",
"h5py.File",
"buffalo.misc.aux.Option",
"os.path.isfile",
"os.remove",
"numpy.loadtxt",
"buffalo.misc.aux.get_temporary_file"
] | [((964, 979), 'buffalo.misc.aux.Option', 'aux.Option', (['opt'], {}), '(opt)\n', (974, 979), False, 'from buffalo.misc import aux, log\n'), ((1554, 1584), 'buffalo.misc.log.get_logger', 'log.get_logger', (['"""MatrixMarket"""'], {}), "('MatrixMarket')\n", (1568, 1584), False, 'from buffalo.misc import aux, log\n'), ((4778, 4823), 'buffalo.misc.aux.get_temporary_file', 'aux.get_temporary_file', (['self.opt.data.tmp_dir'], {}), '(self.opt.data.tmp_dir)\n', (4800, 4823), False, 'from buffalo.misc import aux, log\n'), ((2269, 2320), 'buffalo.misc.log.ProgressBar', 'log.ProgressBar', (['log.DEBUG'], {'total': '(5)', 'mininterval': '(30)'}), '(log.DEBUG, total=5, mininterval=30)\n', (2284, 2320), False, 'from buffalo.misc import aux, log\n'), ((7872, 7897), 'os.path.isfile', 'os.path.isfile', (['data_path'], {}), '(data_path)\n', (7886, 7897), False, 'import os\n'), ((5359, 5413), 'buffalo.misc.log.ProgressBar', 'log.ProgressBar', (['log.INFO'], {'total': 'total', 'mininterval': '(10)'}), '(log.INFO, total=total, mininterval=10)\n', (5374, 5413), False, 'from buffalo.misc import aux, log\n'), ((9334, 9359), 'h5py.File', 'h5py.File', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (9343, 9359), False, 'import h5py\n'), ((9681, 9706), 'os.path.isfile', 'os.path.isfile', (['self.path'], {}), '(self.path)\n', (9695, 9706), False, 'import os\n'), ((3265, 3305), 'numpy.loadtxt', 'np.loadtxt', (['fin'], {'dtype': 'f"""S{uid_max_col}"""'}), "(fin, dtype=f'S{uid_max_col}')\n", (3275, 3305), True, 'import numpy as np\n'), ((3643, 3683), 'numpy.loadtxt', 'np.loadtxt', (['fin'], {'dtype': 'f"""S{iid_max_col}"""'}), "(fin, dtype=f'S{iid_max_col}')\n", (3653, 3683), True, 'import numpy as np\n'), ((4360, 4382), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4380, 4382), False, 'import traceback\n'), ((9732, 9752), 'os.remove', 'os.remove', (['self.path'], {}), '(self.path)\n', (9741, 9752), False, 'import os\n'), ((9536, 9558), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9556, 9558), False, 'import traceback\n')] |
import time
import numpy as np
def time_it(func):
def wrapper(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
print(f'Function took {end-start}s')
return ret
return wrapper
rng = np.random.RandomState(0)
# Create a lot of numbers
nums = rng.random(10000000)
# Decorate np.sort with our time_it transformer
timed_sort = time_it(np.sort)
# Perform the sort with our time_it functionality
timed_sort(nums) | [
"time.time",
"numpy.random.RandomState"
] | [((269, 293), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (290, 293), True, 'import numpy as np\n'), ((102, 113), 'time.time', 'time.time', ([], {}), '()\n', (111, 113), False, 'import time\n'), ((164, 175), 'time.time', 'time.time', ([], {}), '()\n', (173, 175), False, 'import time\n')] |
import numpy as np
import cv2
from scipy.interpolate import *
from matplotlib.pyplot import *
from sklearn.linear_model import LinearRegression
import math
class SlideWindow:
def __init__(self):
self.left_fit = None
self.right_fit = None
self.leftx = None
self.rightx = None
def w_slidewindow(self, img, dist_threshold = 10):
height, width = img.shape
roi_img = img[height-150:height-100,:].copy()
roi_height, roi_width = roi_img.shape
cf_img = np.dstack((roi_img,roi_img,roi_img))
window_height = 20
window_width = 30
minpix = 10
n_windows = roi_width//window_width//2
pts_center = np.array([[roi_width//2,0],[roi_width//2, roi_height]], np.int32)
cv2.polylines(cf_img, [pts_center],False, (0,120,120),1)
nonzero = roi_img.nonzero()
# print("nonz",nonzero)
nonzeroy = np.array(nonzero[0])
# print("nonzy",nonzeroy)
nonzerox = np.array(nonzero[1])
# print("nonzx",nonzerox)
x_center = roi_width//2 # 240
y_center = roi_height//2 # 160
left_idx = 0
right_idx = 0
find_left = False
find_right = False
left_start_x = None
left_start_y = None
right_start_x = None
right_start_y = None
#dist_threshold = 180
dist = None
for i in range(0,n_windows):
if find_left is False:
win_left_y_low = y_center - window_height//2
win_left_y_high = y_center + window_height//2
win_left_x_high = x_center - left_idx*window_width
win_left_x_low = x_center - (left_idx+1)*window_width
if find_right is False:
win_right_y_low = y_center - window_height//2
win_right_y_high = y_center + window_height//2
win_right_x_low = x_center + right_idx*window_width
win_right_x_high = x_center + (right_idx+1)*window_width
#print(win_left_y_low, ' ', win_left_x_low, ' ', win_left_y_high, ' ', win_left_x_high )
cv2.rectangle(cf_img, (win_left_x_low, win_left_y_low), (win_left_x_high, win_left_y_high), (0,255,0), 1)
cv2.rectangle(cf_img, (win_right_x_low, win_right_y_low), (win_right_x_high, win_right_y_high), (0,0,255), 1)
good_left_inds = ((nonzeroy >= win_left_y_low) & (nonzeroy < win_left_y_high) & (nonzerox >= win_left_x_low) & (nonzerox < win_left_x_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_right_y_low) & (nonzeroy < win_right_y_high) & (nonzerox >= win_right_x_low) & (nonzerox < win_right_x_high)).nonzero()[0]
if len(good_left_inds) > minpix and find_left is False:
find_left = True
left_start_x = np.int(np.mean(nonzerox[good_left_inds]))
left_start_y = roi_height//2
for i in range(len(good_left_inds)):
cv2.circle(cf_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]]), 1, (0,255,0), -1)
else:
left_idx += 1
if len(good_right_inds) > minpix and find_right is False:
find_right = True
right_start_x = np.int(np.mean(nonzerox[good_right_inds]))
right_start_y = roi_height//2
for i in range(len(good_right_inds)):
cv2.circle(cf_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]]), 1, (0,0,255), -1)
else:
right_idx += 1
if left_start_x is not None and right_start_x is not None:
#dist = right_start_x - left_start_x
#if dist_threshold-80 < dist and dist < dist_threshold + 80:
cv2.circle(cf_img, (right_start_x, right_start_y),3, (255,0,0),-1)
cv2.circle(cf_img, (left_start_x, left_start_y), 3, (255,0,0),-1)
return True, left_start_x, right_start_x, cf_img
return False, left_start_x, right_start_x, cf_img
def h_slidewindow(self, img, x_left_start, x_right_start):
line_fitter = LinearRegression()
h, w = img.shape
output_img = np.dstack((img,img,img))
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
x_left = x_left_start
x_right = x_right_start
y_start = h/2 + 100
window_height = 30
window_width = 26
find_left = False
find_right = False
minpix = window_width * window_height / 30
n_windows = 5
# print('nwindows',n_windows)
left_idx = 0
right_idx = 0
center_x = np.zeros((5),dtype = 'f')
center_y = np.zeros((5),dtype = 'f')
center=np.zeros((5,2),dtype='f')
size_center=0
for i in range(n_windows):
cv2.rectangle(output_img, (x_left - window_width, y_start - i * window_height), (x_left+ window_width, y_start - (i + 1) * window_height), (0,255,255),1)
cv2.rectangle(output_img, (x_right - window_width, y_start - i * window_height), (x_right + window_width, y_start - (i + 1) * window_height), (255,0,255),1)
good_left_inds = ((nonzerox >= x_left - window_width) & (nonzerox < x_left+ window_width) & (nonzeroy < y_start - i * window_height) & (nonzeroy >= y_start - (i + 1) * window_height)).nonzero()[0]
good_right_inds = ((nonzerox >= x_right -window_width) & (nonzerox <= x_right + window_width) & (nonzeroy < y_start - i * window_height) & (nonzeroy >= y_start - (i + 1) * window_height)).nonzero()[0]
# for j in range(len(good_left_inds)):
# cv2.circle(output_img, (nonzerox[good_left_inds[j]], nonzeroy[good_left_inds[j]]), 1, (0,255,0), -1)
#
# for j in range(len(good_right_inds)):
# cv2.circle(output_img, (nonzerox[good_right_inds[j]], nonzeroy[good_right_inds[j]]), 1, (0,0,255), -1)
if len(good_left_inds) > minpix:
# find_left = True
x_left = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
# find_right = True
x_right = np.int(np.mean(nonzerox[good_right_inds]))
center_point = np.int((x_left +x_right)/2)
if i < 5:
# center[i] = np.array([center_point,y_start - i*window_height - 10])
center_x[i] = np.array([center_point])
center_y[i] = np.array([y_start - i*window_height - 10])
size_center+=1
# print('center',center_point)
# print('y',y_start - (i*window_height) -3)
# cv2.circle(output_img, (center_point,y_start - i*window_height - 10 ),3,(0,0,255),1)
fp1 = np.polyfit(center_y,center_x,1)
f1 = np.poly1d(fp1)
returns_x = f1(center_y)
cv2.line(output_img,(returns_x[0],center_y[0]),(returns_x[size_center-1],center_y[size_center-1]),(0,0,255))
for i in range(5):
cv2.circle(output_img,(returns_x[i],center_y[i]),3,(0,0,255),-1)
cv2.circle(output_img,(center_x[0],center_y[0]),9,(255,50,0),-1)
steer_theta=math.degrees(math.atan((center_x[0]-returns_x[size_center-1] )/(center_y[size_center-1]-center_y[0])))
# line_fitter.fit(center.reshape(-1,1),center_y)
# y_predicted = line_fitter.predict(center_x)
return x_left_start, x_right_start, output_img ,steer_theta
| [
"cv2.rectangle",
"numpy.dstack",
"numpy.mean",
"numpy.polyfit",
"cv2.polylines",
"cv2.line",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"math.atan",
"numpy.poly1d",
"numpy.int",
"sklearn.linear_model.LinearRegression"
] | [((523, 561), 'numpy.dstack', 'np.dstack', (['(roi_img, roi_img, roi_img)'], {}), '((roi_img, roi_img, roi_img))\n', (532, 561), True, 'import numpy as np\n'), ((703, 774), 'numpy.array', 'np.array', (['[[roi_width // 2, 0], [roi_width // 2, roi_height]]', 'np.int32'], {}), '([[roi_width // 2, 0], [roi_width // 2, roi_height]], np.int32)\n', (711, 774), True, 'import numpy as np\n'), ((777, 837), 'cv2.polylines', 'cv2.polylines', (['cf_img', '[pts_center]', '(False)', '(0, 120, 120)', '(1)'], {}), '(cf_img, [pts_center], False, (0, 120, 120), 1)\n', (790, 837), False, 'import cv2\n'), ((915, 935), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (923, 935), True, 'import numpy as np\n'), ((982, 1002), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (990, 1002), True, 'import numpy as np\n'), ((4164, 4182), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4180, 4182), False, 'from sklearn.linear_model import LinearRegression\n'), ((4229, 4255), 'numpy.dstack', 'np.dstack', (['(img, img, img)'], {}), '((img, img, img))\n', (4238, 4255), True, 'import numpy as np\n'), ((4305, 4325), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (4313, 4325), True, 'import numpy as np\n'), ((4345, 4365), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (4353, 4365), True, 'import numpy as np\n'), ((4737, 4759), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': '"""f"""'}), "(5, dtype='f')\n", (4745, 4759), True, 'import numpy as np\n'), ((4782, 4804), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': '"""f"""'}), "(5, dtype='f')\n", (4790, 4804), True, 'import numpy as np\n'), ((4823, 4850), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {'dtype': '"""f"""'}), "((5, 2), dtype='f')\n", (4831, 4850), True, 'import numpy as np\n'), ((6858, 6891), 'numpy.polyfit', 'np.polyfit', (['center_y', 'center_x', '(1)'], {}), '(center_y, center_x, 1)\n', (6868, 6891), True, 'import numpy as np\n'), ((6903, 6917), 'numpy.poly1d', 'np.poly1d', (['fp1'], {}), '(fp1)\n', (6912, 6917), True, 'import numpy as np\n'), ((6959, 7083), 'cv2.line', 'cv2.line', (['output_img', '(returns_x[0], center_y[0])', '(returns_x[size_center - 1], center_y[size_center - 1])', '(0, 0, 255)'], {}), '(output_img, (returns_x[0], center_y[0]), (returns_x[size_center - \n 1], center_y[size_center - 1]), (0, 0, 255))\n', (6967, 7083), False, 'import cv2\n'), ((7181, 7252), 'cv2.circle', 'cv2.circle', (['output_img', '(center_x[0], center_y[0])', '(9)', '(255, 50, 0)', '(-1)'], {}), '(output_img, (center_x[0], center_y[0]), 9, (255, 50, 0), -1)\n', (7191, 7252), False, 'import cv2\n'), ((2125, 2236), 'cv2.rectangle', 'cv2.rectangle', (['cf_img', '(win_left_x_low, win_left_y_low)', '(win_left_x_high, win_left_y_high)', '(0, 255, 0)', '(1)'], {}), '(cf_img, (win_left_x_low, win_left_y_low), (win_left_x_high,\n win_left_y_high), (0, 255, 0), 1)\n', (2138, 2236), False, 'import cv2\n'), ((2243, 2358), 'cv2.rectangle', 'cv2.rectangle', (['cf_img', '(win_right_x_low, win_right_y_low)', '(win_right_x_high, win_right_y_high)', '(0, 0, 255)', '(1)'], {}), '(cf_img, (win_right_x_low, win_right_y_low), (win_right_x_high,\n win_right_y_high), (0, 0, 255), 1)\n', (2256, 2358), False, 'import cv2\n'), ((4919, 5084), 'cv2.rectangle', 'cv2.rectangle', (['output_img', '(x_left - window_width, y_start - i * window_height)', '(x_left + window_width, y_start - (i + 1) * window_height)', '(0, 255, 255)', '(1)'], {}), '(output_img, (x_left - window_width, y_start - i *\n window_height), (x_left + window_width, y_start - (i + 1) *\n window_height), (0, 255, 255), 1)\n', (4932, 5084), False, 'import cv2\n'), ((5085, 5252), 'cv2.rectangle', 'cv2.rectangle', (['output_img', '(x_right - window_width, y_start - i * window_height)', '(x_right + window_width, y_start - (i + 1) * window_height)', '(255, 0, 255)', '(1)'], {}), '(output_img, (x_right - window_width, y_start - i *\n window_height), (x_right + window_width, y_start - (i + 1) *\n window_height), (255, 0, 255), 1)\n', (5098, 5252), False, 'import cv2\n'), ((6350, 6380), 'numpy.int', 'np.int', (['((x_left + x_right) / 2)'], {}), '((x_left + x_right) / 2)\n', (6356, 6380), True, 'import numpy as np\n'), ((7108, 7179), 'cv2.circle', 'cv2.circle', (['output_img', '(returns_x[i], center_y[i])', '(3)', '(0, 0, 255)', '(-1)'], {}), '(output_img, (returns_x[i], center_y[i]), 3, (0, 0, 255), -1)\n', (7118, 7179), False, 'import cv2\n'), ((7280, 7382), 'math.atan', 'math.atan', (['((center_x[0] - returns_x[size_center - 1]) / (center_y[size_center - 1] -\n center_y[0]))'], {}), '((center_x[0] - returns_x[size_center - 1]) / (center_y[\n size_center - 1] - center_y[0]))\n', (7289, 7382), False, 'import math\n'), ((3803, 3873), 'cv2.circle', 'cv2.circle', (['cf_img', '(right_start_x, right_start_y)', '(3)', '(255, 0, 0)', '(-1)'], {}), '(cf_img, (right_start_x, right_start_y), 3, (255, 0, 0), -1)\n', (3813, 3873), False, 'import cv2\n'), ((3886, 3954), 'cv2.circle', 'cv2.circle', (['cf_img', '(left_start_x, left_start_y)', '(3)', '(255, 0, 0)', '(-1)'], {}), '(cf_img, (left_start_x, left_start_y), 3, (255, 0, 0), -1)\n', (3896, 3954), False, 'import cv2\n'), ((6516, 6540), 'numpy.array', 'np.array', (['[center_point]'], {}), '([center_point])\n', (6524, 6540), True, 'import numpy as np\n'), ((6571, 6615), 'numpy.array', 'np.array', (['[y_start - i * window_height - 10]'], {}), '([y_start - i * window_height - 10])\n', (6579, 6615), True, 'import numpy as np\n'), ((2832, 2865), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (2839, 2865), True, 'import numpy as np\n'), ((2986, 3089), 'cv2.circle', 'cv2.circle', (['cf_img', '(nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]])', '(1)', '(0, 255, 0)', '(-1)'], {}), '(cf_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]\n ]), 1, (0, 255, 0), -1)\n', (2996, 3089), False, 'import cv2\n'), ((3278, 3312), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (3285, 3312), True, 'import numpy as np\n'), ((3435, 3540), 'cv2.circle', 'cv2.circle', (['cf_img', '(nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]])', '(1)', '(0, 0, 255)', '(-1)'], {}), '(cf_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[\n i]]), 1, (0, 0, 255), -1)\n', (3445, 3540), False, 'import cv2\n'), ((6135, 6168), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (6142, 6168), True, 'import numpy as np\n'), ((6286, 6320), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (6293, 6320), True, 'import numpy as np\n')] |
from os.path import join, dirname, abspath
from numpy import mean
from joblib import load as joblib_load
from pickle import load as pickle_load
from ase.io import read
from pymatgen.io.ase import AseAtomsAdaptor
from ffp4mof.featurize import get_features
AVAILABLE_FORCE_FIELD_PRECURSORS = [
"partial_charge",
"fluctuating_polarizability",
"FF_polarizability",
"C6_coefficient",
"QDO_mass",
"QDO_charge",
"QDO_frequency",
"a_electron_parameter",
"b_electron_parameter",
]
LOG10_FORCE_FIELD_PRECURSORS = [
"fluctuating_polarizability",
"FF_polarizability",
"C6_coefficient",
]
def _get_ffp(features, ffp_type):
scaler = joblib_load(join(dirname(abspath(__file__)), "scalers", ffp_type, "scaler.gz"))
scaled_features = scaler.transform(features)
targets = []
for i in range(5):
model = pickle_load(open(join(dirname(abspath(__file__)), "models", ffp_type, f"best_model_{i}.pickle"), "rb"))
targets.append(model.predict(scaled_features))
targets = mean(targets, axis=0)
return targets
def get_ffps(filename, ffps_to_calc=None):
a = AseAtomsAdaptor()
structure = a.get_structure(read(filename))
structure_name = filename.split("/")[-1][:-4]
features = get_features(structure)
ffps_dict = {}
ffps_to_calc = AVAILABLE_FORCE_FIELD_PRECURSORS if ffps_to_calc is None else ffps_to_calc
for ffp_type in ffps_to_calc:
assert ffp_type in AVAILABLE_FORCE_FIELD_PRECURSORS
ffp_values = _get_ffp(features, ffp_type)
if ffp_type == "partial_charge":
ffp_values = ffp_values - sum(ffp_values) / ffp_values.size
if ffp_type in LOG10_FORCE_FIELD_PRECURSORS:
ffp_values = 10 ** (ffp_values)
structure.add_site_property(ffp_type, ffp_values.tolist())
structure.to("json", f"{structure_name}.json")
| [
"numpy.mean",
"ffp4mof.featurize.get_features",
"pymatgen.io.ase.AseAtomsAdaptor",
"ase.io.read",
"os.path.abspath"
] | [((1038, 1059), 'numpy.mean', 'mean', (['targets'], {'axis': '(0)'}), '(targets, axis=0)\n', (1042, 1059), False, 'from numpy import mean\n'), ((1133, 1150), 'pymatgen.io.ase.AseAtomsAdaptor', 'AseAtomsAdaptor', ([], {}), '()\n', (1148, 1150), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((1264, 1287), 'ffp4mof.featurize.get_features', 'get_features', (['structure'], {}), '(structure)\n', (1276, 1287), False, 'from ffp4mof.featurize import get_features\n'), ((1183, 1197), 'ase.io.read', 'read', (['filename'], {}), '(filename)\n', (1187, 1197), False, 'from ase.io import read\n'), ((703, 720), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (710, 720), False, 'from os.path import join, dirname, abspath\n'), ((894, 911), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (901, 911), False, 'from os.path import join, dirname, abspath\n')] |
import datetime
import logging
import numpy as np
from dateutil.parser import parse
from great_expectations.core import ExpectationSuite
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.dataset import Dataset, PandasDataset
from great_expectations.exceptions import ProfilerError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.util import attempt_allowing_relative_error
from great_expectations.profile.base import (
OrderedProfilerCardinality,
ProfilerTypeMapping,
profiler_data_types_with_mapping,
profiler_semantic_types,
)
from great_expectations.validator.validation_graph import MetricConfiguration
from great_expectations.validator.validator import Validator
logger = logging.getLogger(__name__)
class UserConfigurableProfiler:
"""
The UserConfigurableProfiler is used to build an expectation suite from a dataset. The expectations built are
strict - they can be used to determine whether two tables are the same.
The profiler may be instantiated with or without a number of configuration arguments. Once a profiler is
instantiated, if these arguments change, a new profiler will be needed.
A profiler is used to build a suite without a config as follows:
profiler = UserConfigurableProfiler(dataset)
suite = profiler.build_suite()
A profiler is used to build a suite with a semantic_types dict, as follows:
semantic_types_dict = {
"numeric": ["c_acctbal"],
"string": ["c_address","c_custkey"],
"value_set": ["c_nationkey","c_mktsegment", 'c_custkey', 'c_name', 'c_address', 'c_phone'],
}
profiler = UserConfigurableProfiler(dataset, semantic_types_dict=semantic_types_dict)
suite = profiler.build_suite()
"""
def __init__(
self,
profile_dataset,
excluded_expectations: list = None,
ignored_columns: list = None,
not_null_only: bool = False,
primary_or_compound_key: list = False,
semantic_types_dict: dict = None,
table_expectations_only: bool = False,
value_set_threshold: str = "MANY",
):
"""
The UserConfigurableProfiler is used to build an expectation suite from a dataset. The profiler may be
instantiated with or without a config. The config may contain a semantic_types dict or not. Once a profiler is
instantiated, if config items change, a new profiler will be needed.
Write an entry on how to use the profiler for the GE docs site
Args:
profile_dataset: A Great Expectations Dataset or Validator object
excluded_expectations: A list of expectations to not include in the suite
ignored_columns: A list of columns for which you would like to NOT create expectations
not_null_only: Boolean, default False. By default, each column is evaluated for nullity. If the column
values contain fewer than 50% null values, then the profiler will add
`expect_column_values_to_not_be_null`; if greater than 50% it will add
`expect_column_values_to_be_null`. If not_null_only is set to True, the profiler will add a
not_null expectation irrespective of the percent nullity (and therefore will not add an
`expect_column_values_to_be_null`
primary_or_compound_key: A list containing one or more columns which are a dataset's primary or
compound key. This will create an `expect_column_values_to_be_unique` or
`expect_compound_columns_to_be_unique` expectation. This will occur even if one or more of the
primary_or_compound_key columns are specified in ignored_columns
semantic_types_dict: A dictionary where the keys are available semantic_types (see profiler.base.profiler_semantic_types)
and the values are lists of columns for which you would like to create semantic_type specific
expectations e.g.:
"semantic_types": { "value_set": ["state","country"], "numeric":["age", "amount_due"]}
table_expectations_only: Boolean, default False. If True, this will only create the two table level expectations
available to this profiler (`expect_table_columns_to_match_ordered_list` and
`expect_table_row_count_to_be_between`). If a primary_or_compound key is specified, it will create
a uniqueness expectation for that column as well
value_set_threshold: Takes a string from the following ordered list - "none", "one", "two",
"very_few", "few", "many", "very_many", "unique". When the profiler runs without a semantic_types
dict, each column is profiled for cardinality. This threshold determines the greatest cardinality
for which to add `expect_column_values_to_be_in_set`. For example, if value_set_threshold is set to
"unique", it will add a value_set expectation for every included column. If set to "few", it will
add a value_set expectation for columns whose cardinality is one of "one", "two", "very_few" or
"few". The default value is "many". For the purposes of comparing whether two tables are identical,
it might make the most sense to set this to "unique"
"""
self.column_info = {}
self.profile_dataset = profile_dataset
assert isinstance(self.profile_dataset, (Dataset, Validator, Batch))
if isinstance(self.profile_dataset, Batch):
self.profile_dataset = Validator(
execution_engine=self.profile_dataset.data.execution_engine,
batches=[self.profile_dataset],
)
self.all_table_columns = self.profile_dataset.get_metric(
MetricConfiguration("table.columns", dict())
)
elif isinstance(self.profile_dataset, Validator):
self.all_table_columns = self.profile_dataset.get_metric(
MetricConfiguration("table.columns", dict())
)
else:
self.all_table_columns = self.profile_dataset.get_table_columns()
self.semantic_types_dict = semantic_types_dict
assert isinstance(self.semantic_types_dict, (dict, type(None)))
self.ignored_columns = ignored_columns or []
assert isinstance(self.ignored_columns, list)
self.excluded_expectations = excluded_expectations or []
assert isinstance(self.excluded_expectations, list)
self.value_set_threshold = value_set_threshold.upper()
assert isinstance(self.value_set_threshold, str)
self.not_null_only = not_null_only
assert isinstance(self.not_null_only, bool)
self.table_expectations_only = table_expectations_only
assert isinstance(self.table_expectations_only, bool)
if self.table_expectations_only is True:
logger.info(
"table_expectations_only is set to True. When used to build a suite, this profiler will ignore all"
"columns and create expectations only at the table level. If you would also like to create expectations "
"at the column level, you can instantiate a new profiler with table_expectations_only set to False"
)
self.primary_or_compound_key = primary_or_compound_key or []
assert isinstance(self.primary_or_compound_key, list)
if self.table_expectations_only:
self.ignored_columns = self.all_table_columns
if self.primary_or_compound_key:
for column in self.primary_or_compound_key:
if column not in self.all_table_columns:
raise ValueError(
f"Column {column} not found. Please ensure that this column is in the {type(profile_dataset).__name__} "
f"if you would like to use it as a primary_or_compound_key."
)
included_columns = [
column_name
for column_name in self.all_table_columns
if column_name not in self.ignored_columns
]
for column_name in included_columns:
self._add_column_cardinality_to_column_info(
self.profile_dataset, column_name
)
self._add_column_type_to_column_info(self.profile_dataset, column_name)
if self.semantic_types_dict is not None:
self._validate_semantic_types_dict(self.profile_dataset)
for column_name in included_columns:
self._add_semantic_types_by_column_from_config_to_column_info(
column_name
)
self.semantic_type_functions = {
"DATETIME": self._build_expectations_datetime,
"NUMERIC": self._build_expectations_numeric,
"STRING": self._build_expectations_string,
"VALUE_SET": self._build_expectations_value_set,
"BOOLEAN": self._build_expectations_value_set,
}
def build_suite(self):
"""
User-facing expectation-suite building function. Works with an instantiated UserConfigurableProfiler object.
Args:
Returns:
An expectation suite built either with or without a semantic_types dict
"""
if len(self.profile_dataset.get_expectation_suite().expectations) > 0:
suite_name = self.profile_dataset._expectation_suite.expectation_suite_name
self.profile_dataset._expectation_suite = ExpectationSuite(suite_name)
if self.semantic_types_dict:
return self._build_expectation_suite_from_semantic_types_dict()
return self._profile_and_build_expectation_suite()
def _build_expectation_suite_from_semantic_types_dict(self):
"""
Uses a semantic_type dict to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built from a semantic_types dict
"""
if not self.semantic_types_dict:
raise ValueError(
"A config with a semantic_types dict must be included in order to use this profiler."
)
self._build_expectations_table(self.profile_dataset)
if self.value_set_threshold:
logger.info(
"Using this profiler with a semantic_types dict will ignore the value_set_threshold parameter. If "
"you would like to include value_set expectations, you can include a 'value_set' entry in your "
"semantic_types dict with any columns for which you would like a value_set expectation, or you can "
"remove the semantic_types dict from the config."
)
if self.primary_or_compound_key:
self._build_expectations_primary_or_compound_key(
self.profile_dataset, self.primary_or_compound_key
)
for column_name, column_info in self.column_info.items():
semantic_types = column_info.get("semantic_types")
for semantic_type in semantic_types:
semantic_type_fn = self.semantic_type_functions.get(semantic_type)
semantic_type_fn(
profile_dataset=self.profile_dataset, column=column_name
)
for column_name in self.column_info.keys():
self._build_expectations_for_all_column_types(
self.profile_dataset, column_name
)
expectation_suite = self._build_column_description_metadata(
self.profile_dataset
)
self._display_suite_by_column(suite=expectation_suite)
return expectation_suite
def _profile_and_build_expectation_suite(self):
"""
Profiles the provided dataset to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built after profiling the dataset
"""
if self.primary_or_compound_key:
self._build_expectations_primary_or_compound_key(
profile_dataset=self.profile_dataset,
column_list=self.primary_or_compound_key,
)
self._build_expectations_table(profile_dataset=self.profile_dataset)
for column_name, column_info in self.column_info.items():
data_type = column_info.get("type")
cardinality = column_info.get("cardinality")
if data_type in ("FLOAT", "INT", "NUMERIC"):
self._build_expectations_numeric(
profile_dataset=self.profile_dataset,
column=column_name,
)
if data_type == "DATETIME":
self._build_expectations_datetime(
profile_dataset=self.profile_dataset,
column=column_name,
)
if (
OrderedProfilerCardinality[self.value_set_threshold]
>= OrderedProfilerCardinality[cardinality]
):
self._build_expectations_value_set(
profile_dataset=self.profile_dataset, column=column_name
)
self._build_expectations_for_all_column_types(
profile_dataset=self.profile_dataset, column=column_name
)
expectation_suite = self._build_column_description_metadata(
self.profile_dataset
)
self._display_suite_by_column(
suite=expectation_suite
) # include in the actual profiler
return expectation_suite
def _validate_semantic_types_dict(self, profile_dataset):
"""
Validates a semantic_types dict to ensure correct formatting, that all semantic_types are recognized, and that
the semantic_types align with the column data types
Args:
profile_dataset: A GE dataset
config: A config dictionary
Returns:
The validated semantic_types dictionary
"""
if not isinstance(self.semantic_types_dict, dict):
raise ValueError(
f"The semantic_types dict in the config must be a dictionary, but is currently a "
f"{type(self.semantic_types_dict)}. Please reformat."
)
for k, v in self.semantic_types_dict.items():
assert isinstance(v, list), (
"Entries in semantic type dict must be lists of column names e.g. "
"{'semantic_types': {'numeric': ['number_of_transactions']}}"
)
if k.upper() not in profiler_semantic_types:
raise ValueError(
f"{k} is not a recognized semantic_type. Please only include one of "
f"{profiler_semantic_types}"
)
selected_columns = [
column
for column_list in self.semantic_types_dict.values()
for column in column_list
]
if selected_columns:
for column in selected_columns:
if column not in self.all_table_columns:
raise ProfilerError(f"Column {column} does not exist.")
elif column in self.ignored_columns:
raise ValueError(
f"Column {column} is specified in both the semantic_types_dict and the list of "
f"ignored columns. Please remove one of these entries to proceed."
)
for semantic_type, column_list in self.semantic_types_dict.items():
for column_name in column_list:
processed_column = self.column_info.get(column_name)
if semantic_type == "datetime":
assert processed_column.get("type") in ("DATETIME", "STRING",), (
f"Column {column_name} must be a datetime column or a string but appears to be "
f"{processed_column.get('type')}"
)
elif semantic_type == "numeric":
assert processed_column.get("type") in (
"INT",
"FLOAT",
"NUMERIC",
), f"Column {column_name} must be an int or a float but appears to be {processed_column.get('type')}"
elif semantic_type in ("STRING", "VALUE_SET"):
pass
return self.semantic_types_dict
def _add_column_type_to_column_info(self, profile_dataset, column_name):
"""
Adds the data type of a column to the column_info dictionary on self
Args:
profile_dataset: A GE dataset
column_name: The name of the column for which to retrieve the data type
Returns:
The type of the column
"""
if "expect_column_values_to_be_in_type_list" in self.excluded_expectations:
logger.info(
"expect_column_values_to_be_in_type_list is in the excluded_expectations list. This"
"expectation is required to establish column data, so it will be run and then removed from the"
"expectation suite."
)
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
column_type = column_info_entry.get("type")
if not column_type:
column_type = self._get_column_type(profile_dataset, column_name)
column_info_entry["type"] = column_type
return column_type
def _get_column_type(self, profile_dataset, column):
"""
Determines the data type of a column by evaluating the success of `expect_column_values_to_be_in_type_list`.
In the case of type Decimal, this data type is returned as NUMERIC, which contains the type lists for both INTs
and FLOATs.
The type_list expectation used here is removed, since it will need to be built once the build_suite function is
actually called. This is because calling build_suite wipes any existing expectations, so expectations called
during the init of the profiler do not persist.
Args:
profile_dataset: A GE dataset
column: The column for which to get the data type
Returns:
The data type of the specified column
"""
# list of types is used to support pandas and sqlalchemy
type_ = None
try:
if (
profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
).success
and profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES))
).success
):
type_ = "NUMERIC"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
).success:
type_ = "INT"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES))
).success:
type_ = "FLOAT"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.STRING_TYPE_NAMES))
).success:
type_ = "STRING"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.BOOLEAN_TYPE_NAMES))
).success:
type_ = "BOOLEAN"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.DATETIME_TYPE_NAMES))
).success:
type_ = "DATETIME"
else:
type_ = "UNKNOWN"
except NotImplementedError:
type_ = "unknown"
if type_ == "NUMERIC":
profile_dataset.expect_column_values_to_be_in_type_list(
column,
type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
+ sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES)),
)
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
return type_
def _add_column_cardinality_to_column_info(self, profile_dataset, column_name):
"""
Adds the cardinality of a column to the column_info dictionary on self
Args:
profile_dataset: A GE Dataset
column_name: The name of the column for which to add cardinality
Returns:
The cardinality of the column
"""
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
column_cardinality = column_info_entry.get("cardinality")
if not column_cardinality:
column_cardinality = self._get_column_cardinality(
profile_dataset, column_name
)
column_info_entry["cardinality"] = column_cardinality
# remove the expectations
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_unique_value_count_to_be_between",
kwargs={"column": column_name},
)
)
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_proportion_of_unique_values_to_be_between",
kwargs={"column": column_name},
)
)
return column_cardinality
def _get_column_cardinality(self, profile_dataset, column):
"""
Determines the cardinality of a column using the get_basic_column_cardinality method from
OrderedProfilerCardinality
Args:
profile_dataset: A GE Dataset
column: The column for which to get cardinality
Returns:
The cardinality of the specified column
"""
num_unique = None
pct_unique = None
try:
num_unique = profile_dataset.expect_column_unique_value_count_to_be_between(
column, None, None
).result["observed_value"]
pct_unique = (
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, None, None
).result["observed_value"]
)
except KeyError: # if observed_value value is not set
logger.error(
"Failed to get cardinality of column {:s} - continuing...".format(
column
)
)
# Previously, if we had 25 possible categories out of 1000 rows, this would comes up as many, because of its
# percentage, so it was tweaked here, but is still experimental.
cardinality = OrderedProfilerCardinality.get_basic_column_cardinality(
num_unique, pct_unique
)
return cardinality.name
def _add_semantic_types_by_column_from_config_to_column_info(self, column_name):
"""
Adds the semantic type of a column to the column_info dict on self, for display purposes after suite creation
Args:
column_name: The name of the column
Returns:
A list of semantic_types for a given colum
"""
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
semantic_types = column_info_entry.get("semantic_types")
if not semantic_types:
assert isinstance(
self.semantic_types_dict, dict
), f"The semantic_types dict in the config must be a dictionary, but is currently a {type(self.semantic_types_dict)}. Please reformat."
semantic_types = []
for semantic_type, column_list in self.semantic_types_dict.items():
if column_name in column_list:
semantic_types.append(semantic_type.upper())
column_info_entry["semantic_types"] = semantic_types
if all(
i in column_info_entry.get("semantic_types")
for i in ["BOOLEAN", "VALUE_SET"]
):
logger.info(
f"Column {column_name} has both 'BOOLEAN' and 'VALUE_SET' specified as semantic_types."
f"As these are currently the same in function, the 'VALUE_SET' type will be removed."
)
column_info_entry["semantic_types"].remove("VALUE_SET")
self.column_info[column_name] = column_info_entry
return semantic_types
def _build_column_description_metadata(self, profile_dataset):
"""
Adds column description metadata to the suite on a Dataset object
Args:
profile_dataset: A GE Dataset
Returns:
An expectation suite with column description metadata
"""
columns = self.all_table_columns
expectation_suite = profile_dataset.get_expectation_suite(
suppress_warnings=True, discard_failed_expectations=False
)
meta_columns = {}
for column in columns:
meta_columns[column] = {"description": ""}
if not expectation_suite.meta:
expectation_suite.meta = {"columns": meta_columns, "notes": {""}}
else:
expectation_suite.meta["columns"] = meta_columns
return expectation_suite
def _display_suite_by_column(self, suite):
"""
Displays the expectations of a suite by column, along with the column cardinality, and semantic or data type so
that a user can easily see which expectations were created for which columns
Args:
suite: An ExpectationSuite
Returns:
The ExpectationSuite
"""
expectations = suite.expectations
expectations_by_column = {}
for expectation in expectations:
domain = expectation["kwargs"].get("column") or "table_level_expectations"
if expectations_by_column.get(domain) is None:
expectations_by_column[domain] = [expectation]
else:
expectations_by_column[domain].append(expectation)
if not expectations_by_column:
print("No expectations included in suite.")
else:
print("Creating an expectation suite with the following expectations:\n")
if "table_level_expectations" in expectations_by_column:
table_level_expectations = expectations_by_column.pop(
"table_level_expectations"
)
print("Table-Level Expectations")
for expectation in sorted(
table_level_expectations, key=lambda x: x.expectation_type
):
print(expectation.expectation_type)
if expectations_by_column:
print("\nExpectations by Column")
contains_semantic_types = [
v for v in self.column_info.values() if v.get("semantic_types")
]
for column in sorted(expectations_by_column):
info_column = self.column_info.get(column) or {}
semantic_types = info_column.get("semantic_types") or "not_specified"
type_ = info_column.get("type")
cardinality = info_column.get("cardinality")
if len(contains_semantic_types) > 0:
type_string = f" | Semantic Type: {semantic_types[0] if len(semantic_types)==1 else semantic_types}"
elif type_:
type_string = f" | Column Data Type: {type_}"
else:
type_string = ""
if cardinality:
cardinality_string = f" | Cardinality: {cardinality}"
else:
cardinality_string = ""
column_string = (
f"Column Name: {column}{type_string or ''}{cardinality_string or ''}"
)
print(column_string)
for expectation in sorted(
expectations_by_column.get(column), key=lambda x: x.expectation_type
):
print(expectation.expectation_type)
print("\n")
return True
def _build_expectations_value_set(self, profile_dataset, column, **kwargs):
"""
Adds a value_set expectation for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add an expectation
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_be_in_set" not in self.excluded_expectations:
value_set = profile_dataset.expect_column_distinct_values_to_be_in_set(
column, value_set=None, result_format="SUMMARY"
).result["observed_value"]
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_distinct_values_to_be_in_set",
kwargs={"column": column},
),
match_type="domain",
)
profile_dataset.expect_column_values_to_be_in_set(
column, value_set=value_set
)
return profile_dataset
def _build_expectations_numeric(self, profile_dataset, column, **kwargs):
"""
Adds a set of numeric expectations for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
"""
# min
if "expect_column_min_to_be_between" not in self.excluded_expectations:
observed_min = profile_dataset.expect_column_min_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_min):
profile_dataset.expect_column_min_to_be_between(
column,
min_value=observed_min,
max_value=observed_min,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_min_to_be_between because observed value is nan: {observed_min}"
)
# max
if "expect_column_max_to_be_between" not in self.excluded_expectations:
observed_max = profile_dataset.expect_column_max_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_max):
profile_dataset.expect_column_max_to_be_between(
column,
min_value=observed_max,
max_value=observed_max,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_max_to_be_between because observed value is nan: {observed_max}"
)
# mean
if "expect_column_mean_to_be_between" not in self.excluded_expectations:
observed_mean = profile_dataset.expect_column_mean_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_mean):
profile_dataset.expect_column_mean_to_be_between(
column,
min_value=observed_mean,
max_value=observed_mean,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_mean_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_mean_to_be_between because observed value is nan: {observed_mean}"
)
# median
if "expect_column_median_to_be_between" not in self.excluded_expectations:
observed_median = profile_dataset.expect_column_median_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_median):
profile_dataset.expect_column_median_to_be_between(
column,
min_value=observed_median,
max_value=observed_median,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_median_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_median_to_be_between because observed value is nan: {observed_median}"
)
if (
"expect_column_quantile_values_to_be_between"
not in self.excluded_expectations
):
if isinstance(profile_dataset, Dataset):
if isinstance(profile_dataset, PandasDataset):
allow_relative_error = "lower"
else:
allow_relative_error = (
profile_dataset.attempt_allowing_relative_error()
)
elif isinstance(profile_dataset, Validator):
if isinstance(profile_dataset.execution_engine, PandasExecutionEngine):
allow_relative_error = "lower"
if isinstance(profile_dataset.execution_engine, SparkDFExecutionEngine):
allow_relative_error = 0.0
if isinstance(
profile_dataset.execution_engine, SqlAlchemyExecutionEngine
):
allow_relative_error = attempt_allowing_relative_error(
profile_dataset.execution_engine.engine.dialect
)
quantile_result = (
profile_dataset.expect_column_quantile_values_to_be_between(
column,
quantile_ranges={
"quantiles": [0.05, 0.25, 0.5, 0.75, 0.95],
"value_ranges": [
[None, None],
[None, None],
[None, None],
[None, None],
[None, None],
],
},
allow_relative_error=allow_relative_error,
result_format="SUMMARY",
)
)
if quantile_result.exception_info and (
quantile_result.exception_info["exception_traceback"]
or quantile_result.exception_info["exception_message"]
):
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_quantile_values_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(quantile_result.exception_info["exception_traceback"])
logger.debug(quantile_result.exception_info["exception_message"])
else:
profile_dataset.expect_column_quantile_values_to_be_between(
column,
quantile_ranges={
"quantiles": quantile_result.result["observed_value"][
"quantiles"
],
"value_ranges": [
[v, v]
for v in quantile_result.result["observed_value"]["values"]
],
},
allow_relative_error=allow_relative_error,
)
return profile_dataset
def _build_expectations_primary_or_compound_key(
self, profile_dataset, column_list, **kwargs
):
"""
Adds a uniqueness expectation for a given column or set of columns
Args:
profile_dataset: A GE Dataset
column_list: A list containing one or more columns for which to add a uniqueness expectation
**kwargs:
Returns:
The GE Dataset
"""
# uniqueness
if (
len(column_list) > 1
and "expect_compound_columns_to_be_unique" not in self.excluded_expectations
):
profile_dataset.expect_compound_columns_to_be_unique(column_list)
elif len(column_list) < 1:
raise ValueError(
"When specifying a primary or compound key, column_list must not be empty"
)
else:
[column] = column_list
if "expect_column_values_to_be_unique" not in self.excluded_expectations:
profile_dataset.expect_column_values_to_be_unique(column)
return profile_dataset
def _build_expectations_string(self, profile_dataset, column, **kwargs):
"""
Adds a set of string expectations for a given column. Currently does not do anything.
With the 0.12 API there isn't a quick way to introspect for value_lengths - if we did that, we could build a
potentially useful value_lengths expectation here.
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
"""
if (
"expect_column_value_lengths_to_be_between"
not in self.excluded_expectations
):
pass
return profile_dataset
def _build_expectations_datetime(self, profile_dataset, column, **kwargs):
"""
Adds `expect_column_values_to_be_between` for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectation
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_be_between" not in self.excluded_expectations:
min_value = profile_dataset.expect_column_min_to_be_between(
column,
min_value=None,
max_value=None,
result_format="SUMMARY",
parse_strings_as_datetimes=True,
).result["observed_value"]
if min_value is not None:
try:
min_value = parse(min_value)
except TypeError:
pass
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
max_value = profile_dataset.expect_column_max_to_be_between(
column,
min_value=None,
max_value=None,
result_format="SUMMARY",
parse_strings_as_datetimes=True,
).result["observed_value"]
if max_value is not None:
try:
max_value = parse(max_value)
except TypeError:
pass
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
if min_value is not None or max_value is not None:
profile_dataset.expect_column_values_to_be_between(
column,
min_value=min_value,
max_value=max_value,
parse_strings_as_datetimes=True,
)
return profile_dataset
def _build_expectations_for_all_column_types(
self, profile_dataset, column, **kwargs
):
"""
Adds these expectations for all included columns irrespective of type. Includes:
- `expect_column_values_to_not_be_null` (or `expect_column_values_to_be_null`)
- `expect_column_proportion_of_unique_values_to_be_between`
- `expect_column_values_to_be_in_type_list`
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectations
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_not_be_null" not in self.excluded_expectations:
not_null_result = profile_dataset.expect_column_values_to_not_be_null(
column
)
if not not_null_result.success:
unexpected_percent = float(not_null_result.result["unexpected_percent"])
if unexpected_percent >= 50 and not self.not_null_only:
potential_mostly_value = unexpected_percent / 100.0
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": column},
),
match_type="domain",
)
if (
"expect_column_values_to_be_null"
not in self.excluded_expectations
):
profile_dataset.expect_column_values_to_be_null(
column, mostly=potential_mostly_value
)
else:
potential_mostly_value = (100.0 - unexpected_percent) / 100.0
safe_mostly_value = round(max(0.001, potential_mostly_value), 3)
profile_dataset.expect_column_values_to_not_be_null(
column, mostly=safe_mostly_value
)
if (
"expect_column_proportion_of_unique_values_to_be_between"
not in self.excluded_expectations
):
pct_unique = (
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, None, None
).result["observed_value"]
)
if not self._is_nan(pct_unique):
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, min_value=pct_unique, max_value=pct_unique
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_proportion_of_unique_values_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_proportion_of_unique_values_to_be_between because observed value is nan: {pct_unique}"
)
if "expect_column_values_to_be_in_type_list" not in self.excluded_expectations:
col_type = self.column_info.get(column).get("type")
if col_type != "UNKNOWN":
type_list = profiler_data_types_with_mapping.get(col_type)
profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=type_list
)
else:
logger.info(
f"Column type for column {column} is unknown. "
f"Skipping expect_column_values_to_be_in_type_list for this column."
)
def _build_expectations_table(self, profile_dataset, **kwargs):
"""
Adds two table level expectations to the dataset
Args:
profile_dataset: A GE Dataset
**kwargs:
Returns:
The GE Dataset
"""
if (
"expect_table_columns_to_match_ordered_list"
not in self.excluded_expectations
):
columns = self.all_table_columns
profile_dataset.expect_table_columns_to_match_ordered_list(columns)
if "expect_table_row_count_to_be_between" not in self.excluded_expectations:
row_count = profile_dataset.expect_table_row_count_to_be_between(
min_value=0, max_value=None
).result["observed_value"]
min_value = max(0, int(row_count))
max_value = int(row_count)
profile_dataset.expect_table_row_count_to_be_between(
min_value=min_value, max_value=max_value
)
def _is_nan(self, value):
"""
If value is an array, test element-wise for NaN and return result as a boolean array.
If value is a scalar, return boolean.
Args:
value: The value to test
Returns:
The results of the test
"""
try:
return np.isnan(value)
except TypeError:
return True
| [
"logging.getLogger",
"dateutil.parser.parse",
"great_expectations.validator.validator.Validator",
"great_expectations.profile.base.profiler_data_types_with_mapping.get",
"great_expectations.expectations.metrics.util.attempt_allowing_relative_error",
"great_expectations.profile.base.OrderedProfilerCardinal... | [((940, 967), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (957, 967), False, 'import logging\n'), ((24174, 24253), 'great_expectations.profile.base.OrderedProfilerCardinality.get_basic_column_cardinality', 'OrderedProfilerCardinality.get_basic_column_cardinality', (['num_unique', 'pct_unique'], {}), '(num_unique, pct_unique)\n', (24229, 24253), False, 'from great_expectations.profile.base import OrderedProfilerCardinality, ProfilerTypeMapping, profiler_data_types_with_mapping, profiler_semantic_types\n'), ((6130, 6236), 'great_expectations.validator.validator.Validator', 'Validator', ([], {'execution_engine': 'self.profile_dataset.data.execution_engine', 'batches': '[self.profile_dataset]'}), '(execution_engine=self.profile_dataset.data.execution_engine,\n batches=[self.profile_dataset])\n', (6139, 6236), False, 'from great_expectations.validator.validator import Validator\n'), ((10098, 10126), 'great_expectations.core.ExpectationSuite', 'ExpectationSuite', (['suite_name'], {}), '(suite_name)\n', (10114, 10126), False, 'from great_expectations.core import ExpectationSuite\n'), ((21186, 21302), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_values_to_be_in_type_list"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_values_to_be_in_type_list', kwargs={'column': column})\n", (21210, 21302), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((47752, 47767), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (47760, 47767), True, 'import numpy as np\n'), ((22360, 22492), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_unique_value_count_to_be_between"""', 'kwargs': "{'column': column_name}"}), "(expectation_type=\n 'expect_column_unique_value_count_to_be_between', kwargs={'column':\n column_name})\n", (22384, 22492), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((22640, 22782), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_proportion_of_unique_values_to_be_between"""', 'kwargs': "{'column': column_name}"}), "(expectation_type=\n 'expect_column_proportion_of_unique_values_to_be_between', kwargs={\n 'column': column_name})\n", (22664, 22782), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((30353, 30472), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_distinct_values_to_be_in_set"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_distinct_values_to_be_in_set', kwargs={'column': column})\n", (30377, 30472), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((41221, 41328), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_min_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type='expect_column_min_to_be_between',\n kwargs={'column': column})\n", (41245, 41328), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((41978, 42085), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_max_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type='expect_column_max_to_be_between',\n kwargs={'column': column})\n", (42002, 42085), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((46015, 46061), 'great_expectations.profile.base.profiler_data_types_with_mapping.get', 'profiler_data_types_with_mapping.get', (['col_type'], {}), '(col_type)\n', (46051, 46061), False, 'from great_expectations.profile.base import OrderedProfilerCardinality, ProfilerTypeMapping, profiler_data_types_with_mapping, profiler_semantic_types\n'), ((15757, 15806), 'great_expectations.exceptions.ProfilerError', 'ProfilerError', (['f"""Column {column} does not exist."""'], {}), "(f'Column {column} does not exist.')\n", (15770, 15806), False, 'from great_expectations.exceptions import ProfilerError\n'), ((31728, 31835), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_min_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type='expect_column_min_to_be_between',\n kwargs={'column': column})\n", (31752, 31835), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((32767, 32874), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_max_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type='expect_column_max_to_be_between',\n kwargs={'column': column})\n", (32791, 32874), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((33814, 33923), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_mean_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_mean_to_be_between', kwargs={'column': column})\n", (33838, 33923), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((34881, 34992), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_median_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_median_to_be_between', kwargs={'column': column})\n", (34905, 34992), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((37359, 37479), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_quantile_values_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_quantile_values_to_be_between', kwargs={'column': column})\n", (37383, 37479), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((41061, 41077), 'dateutil.parser.parse', 'parse', (['min_value'], {}), '(min_value)\n', (41066, 41077), False, 'from dateutil.parser import parse\n'), ((41818, 41834), 'dateutil.parser.parse', 'parse', (['max_value'], {}), '(max_value)\n', (41823, 41834), False, 'from dateutil.parser import parse\n'), ((45356, 45493), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_proportion_of_unique_values_to_be_between"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_proportion_of_unique_values_to_be_between', kwargs={\n 'column': column})\n", (45380, 45493), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((36247, 36332), 'great_expectations.expectations.metrics.util.attempt_allowing_relative_error', 'attempt_allowing_relative_error', (['profile_dataset.execution_engine.engine.dialect'], {}), '(profile_dataset.execution_engine.engine.dialect\n )\n', (36278, 36332), False, 'from great_expectations.expectations.metrics.util import attempt_allowing_relative_error\n'), ((43742, 43854), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_values_to_not_be_null"""', 'kwargs': "{'column': column}"}), "(expectation_type=\n 'expect_column_values_to_not_be_null', kwargs={'column': column})\n", (43766, 43854), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n')] |
import numpy as np
def gini(labels):
total_size = len(labels)
label_quantities = np.unique(labels, return_counts=True)
sum_of_probs_sq = 0
for num_of_elem in label_quantities[1]:
sum_of_probs_sq += (num_of_elem / total_size) ** 2
return 1 - sum_of_probs_sq
| [
"numpy.unique"
] | [((92, 129), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (101, 129), True, 'import numpy as np\n')] |
import collections
import cv2
import numpy as np
import matplotlib.pyplot as plt
import gym
def plot_learning_curve(x, scores, epsilons, filename, lines=None):
fig=plt.figure()
ax=fig.add_subplot(111, label="1")
ax2=fig.add_subplot(111, label="2", frame_on=False)
ax.plot(x, epsilons, color="C0")
ax.set_xlabel("Training Steps", color="C0")
ax.set_ylabel("Epsilon", color="C0")
ax.tick_params(axis='x', colors="C0")
ax.tick_params(axis='y', colors="C0")
N = len(scores)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = np.mean(scores[max(0, t-20):(t+1)])
ax2.scatter(x, running_avg, color="C1")
ax2.axes.get_xaxis().set_visible(False)
ax2.yaxis.tick_right()
ax2.set_ylabel('Score', color="C1")
ax2.yaxis.set_label_position('right')
ax2.tick_params(axis='y', colors="C1")
if lines is not None:
for line in lines:
plt.axvline(x=line)
plt.savefig(filename)
class RepeatActionAndMaxFrame(gym.Wrapper):
""" modified from:
https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On/blob/master/Chapter06/lib/wrappers.py
"""
def __init__(self, env=None, repeat=4, clip_reward=False,
no_ops=0, fire_first=False):
super(RepeatActionAndMaxFrame, self).__init__(env)
self.repeat = repeat
self.shape = env.observation_space.low.shape
self.frame_buffer = np.zeros_like((2,self.shape))
self.clip_reward = clip_reward
self.no_ops = 0
self.fire_first = fire_first
def step(self, action):
t_reward = 0.0
done = False
for i in range(self.repeat):
obs, reward, done, info = self.env.step(action)
if self.clip_reward:
reward = np.clip(np.array([reward]), -1, 1)[0]
t_reward += reward
idx = i % 2
self.frame_buffer[idx] = obs
if done:
break
max_frame = np.maximum(self.frame_buffer[0], self.frame_buffer[1])
return max_frame, t_reward, done, info
def reset(self):
obs = self.env.reset()
no_ops = np.random.randint(self.no_ops)+1 if self.no_ops > 0 else 0
for _ in range(no_ops):
_, _, done, _ = self.env.step(0)
if done:
self.env.reset()
if self.fire_first:
assert self.env.unwrapped.get_action_meanings()[1] == 'FIRE'
obs, _, _, _ = self.env.step(1)
self.frame_buffer = np.zeros_like((2,self.shape))
self.frame_buffer[0] = obs
return obs
class PreprocessFrame(gym.ObservationWrapper):
def __init__(self, shape, env=None):
super(PreprocessFrame, self).__init__(env)
self.shape=(shape[2], shape[0], shape[1])
self.observation_space = gym.spaces.Box(low=0, high=1.0,
shape=self.shape,dtype=np.float32)
def observation(self, obs):
new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
resized_screen = cv2.resize(new_frame, self.shape[1:],
interpolation=cv2.INTER_AREA)
new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
new_obs = new_obs / 255.0
return new_obs
class StackFrames(gym.ObservationWrapper):
def __init__(self, env, repeat):
super(StackFrames, self).__init__(env)
self.observation_space = gym.spaces.Box(
env.observation_space.low.repeat(repeat, axis=0),
env.observation_space.high.repeat(repeat, axis=0),
dtype=np.float32)
self.stack = collections.deque(maxlen=repeat)
def reset(self):
self.stack.clear()
observation = self.env.reset()
for _ in range(self.stack.maxlen):
self.stack.append(observation)
return np.array(self.stack).reshape(self.observation_space.low.shape)
def observation(self, observation):
self.stack.append(observation)
obs = np.array(self.stack).reshape(self.observation_space.low.shape)
return obs
def make_env(env_name, shape=(84,84,1), repeat=4, clip_rewards=False,
no_ops=0, fire_first=False):
env = gym.make(env_name)
env = RepeatActionAndMaxFrame(env, repeat, clip_rewards, no_ops, fire_first)
env = PreprocessFrame(shape, env)
env = StackFrames(env, repeat)
return env
| [
"matplotlib.pyplot.savefig",
"cv2.resize",
"collections.deque",
"gym.spaces.Box",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.empty",
"numpy.array",
"cv2.cvtColor",
"numpy.maximum",
"numpy.zeros_like",
"gym.make"
] | [((169, 181), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (179, 181), True, 'import matplotlib.pyplot as plt\n'), ((527, 538), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (535, 538), True, 'import numpy as np\n'), ((952, 973), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (963, 973), True, 'import matplotlib.pyplot as plt\n'), ((4303, 4321), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (4311, 4321), False, 'import gym\n'), ((1445, 1475), 'numpy.zeros_like', 'np.zeros_like', (['(2, self.shape)'], {}), '((2, self.shape))\n', (1458, 1475), True, 'import numpy as np\n'), ((2001, 2055), 'numpy.maximum', 'np.maximum', (['self.frame_buffer[0]', 'self.frame_buffer[1]'], {}), '(self.frame_buffer[0], self.frame_buffer[1])\n', (2011, 2055), True, 'import numpy as np\n'), ((2538, 2568), 'numpy.zeros_like', 'np.zeros_like', (['(2, self.shape)'], {}), '((2, self.shape))\n', (2551, 2568), True, 'import numpy as np\n'), ((2845, 2912), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(1.0)', 'shape': 'self.shape', 'dtype': 'np.float32'}), '(low=0, high=1.0, shape=self.shape, dtype=np.float32)\n', (2859, 2912), False, 'import gym\n'), ((3010, 3047), 'cv2.cvtColor', 'cv2.cvtColor', (['obs', 'cv2.COLOR_RGB2GRAY'], {}), '(obs, cv2.COLOR_RGB2GRAY)\n', (3022, 3047), False, 'import cv2\n'), ((3073, 3140), 'cv2.resize', 'cv2.resize', (['new_frame', 'self.shape[1:]'], {'interpolation': 'cv2.INTER_AREA'}), '(new_frame, self.shape[1:], interpolation=cv2.INTER_AREA)\n', (3083, 3140), False, 'import cv2\n'), ((3717, 3749), 'collections.deque', 'collections.deque', ([], {'maxlen': 'repeat'}), '(maxlen=repeat)\n', (3734, 3749), False, 'import collections\n'), ((927, 946), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'line'}), '(x=line)\n', (938, 946), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2203), 'numpy.random.randint', 'np.random.randint', (['self.no_ops'], {}), '(self.no_ops)\n', (2190, 2203), True, 'import numpy as np\n'), ((3195, 3235), 'numpy.array', 'np.array', (['resized_screen'], {'dtype': 'np.uint8'}), '(resized_screen, dtype=np.uint8)\n', (3203, 3235), True, 'import numpy as np\n'), ((3940, 3960), 'numpy.array', 'np.array', (['self.stack'], {}), '(self.stack)\n', (3948, 3960), True, 'import numpy as np\n'), ((4097, 4117), 'numpy.array', 'np.array', (['self.stack'], {}), '(self.stack)\n', (4105, 4117), True, 'import numpy as np\n'), ((1811, 1829), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (1819, 1829), True, 'import numpy as np\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from ppdet.core.workspace import register, serializable
from ppdet.utils.download import get_dataset_path
@serializable
class DataSet(object):
"""
Dataset, e.g., coco, pascal voc
Args:
annotation (str): annotation file path
image_dir (str): directory where image files are stored
shuffle (bool): shuffle samples
"""
def __init__(self,
dataset_dir=None,
image_dir=None,
anno_path=None,
sample_num=-1,
with_background=True,
use_default_label=False,
**kwargs):
super(DataSet, self).__init__()
self.anno_path = anno_path
self.image_dir = image_dir if image_dir is not None else ''
self.dataset_dir = dataset_dir if dataset_dir is not None else ''
self.sample_num = sample_num
self.with_background = with_background
self.use_default_label = use_default_label
self.cname2cid = None
self._imid2path = None
def load_roidb_and_cname2cid(self):
"""load dataset"""
raise NotImplementedError('%s.load_roidb_and_cname2cid not available' %
(self.__class__.__name__))
def get_roidb(self):
if not self.roidbs:
data_dir = get_dataset_path(self.dataset_dir, self.anno_path,
self.image_dir)
if data_dir:
self.dataset_dir = data_dir
self.load_roidb_and_cname2cid()
return self.roidbs
def get_cname2cid(self):
if not self.cname2cid:
self.load_roidb_and_cname2cid()
return self.cname2cid
def get_anno(self):
if self.anno_path is None:
return
return os.path.join(self.dataset_dir, self.anno_path)
def get_imid2path(self):
return self._imid2path
def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):
return f.lower().endswith(extensions)
def _make_dataset(data_dir):
data_dir = os.path.expanduser(data_dir)
if not os.path.isdir(data_dir):
raise ('{} should be a dir'.format(data_dir))
images = []
for root, _, fnames in sorted(os.walk(data_dir, followlinks=True)):
for fname in sorted(fnames):
file_path = os.path.join(root, fname)
if _is_valid_file(file_path):
images.append(file_path)
return images
@register
@serializable
class ImageFolder(DataSet):
"""
Args:
dataset_dir (str): root directory for dataset.
image_dir(list|str): list of image folders or list of image files
anno_path (str): annotation file path.
samples (int): number of samples to load, -1 means all
"""
def __init__(self,
dataset_dir=None,
image_dir=None,
anno_path=None,
sample_num=-1,
with_background=True,
use_default_label=False,
**kwargs):
super(ImageFolder, self).__init__(dataset_dir, image_dir, anno_path,
sample_num, with_background,
use_default_label)
self.roidbs = None
self._imid2path = {}
def get_roidb(self):
if not self.roidbs:
self.roidbs = self._load_images()
return self.roidbs
def set_images(self, images):
self.image_dir = images
self.roidbs = self._load_images()
def _parse(self):
image_dir = self.image_dir
if not isinstance(image_dir, Sequence):
image_dir = [image_dir]
images = []
for im_dir in image_dir:
if os.path.isdir(im_dir):
im_dir = os.path.join(self.dataset_dir, im_dir)
images.extend(_make_dataset(im_dir))
elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
images.append(im_dir)
return images
def _load_images(self):
images = self._parse()
ct = 0
records = []
for image in images:
assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num:
break
rec = {'im_id': np.array([ct]), 'im_file': image}
self._imid2path[ct] = image
ct += 1
records.append(rec)
assert len(records) > 0, "No image file found"
return records
| [
"os.path.expanduser",
"os.path.join",
"ppdet.utils.download.get_dataset_path",
"os.path.isfile",
"numpy.array",
"os.path.isdir",
"os.walk"
] | [((2812, 2840), 'os.path.expanduser', 'os.path.expanduser', (['data_dir'], {}), '(data_dir)\n', (2830, 2840), False, 'import os\n'), ((2545, 2591), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'self.anno_path'], {}), '(self.dataset_dir, self.anno_path)\n', (2557, 2591), False, 'import os\n'), ((2852, 2875), 'os.path.isdir', 'os.path.isdir', (['data_dir'], {}), '(data_dir)\n', (2865, 2875), False, 'import os\n'), ((2981, 3016), 'os.walk', 'os.walk', (['data_dir'], {'followlinks': '(True)'}), '(data_dir, followlinks=True)\n', (2988, 3016), False, 'import os\n'), ((2068, 2134), 'ppdet.utils.download.get_dataset_path', 'get_dataset_path', (['self.dataset_dir', 'self.anno_path', 'self.image_dir'], {}), '(self.dataset_dir, self.anno_path, self.image_dir)\n', (2084, 2134), False, 'from ppdet.utils.download import get_dataset_path\n'), ((3080, 3105), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (3092, 3105), False, 'import os\n'), ((4503, 4524), 'os.path.isdir', 'os.path.isdir', (['im_dir'], {}), '(im_dir)\n', (4516, 4524), False, 'import os\n'), ((4551, 4589), 'os.path.join', 'os.path.join', (['self.dataset_dir', 'im_dir'], {}), '(self.dataset_dir, im_dir)\n', (4563, 4589), False, 'import os\n'), ((4931, 4952), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (4945, 4952), False, 'import os\n'), ((5123, 5137), 'numpy.array', 'np.array', (['[ct]'], {}), '([ct])\n', (5131, 5137), True, 'import numpy as np\n'), ((4660, 4682), 'os.path.isfile', 'os.path.isfile', (['im_dir'], {}), '(im_dir)\n', (4674, 4682), False, 'import os\n')] |
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
def read_data():
pass
xtr, ytr = read_data()
params = joblib.load('params.pkl')
n_neg = len(ytr[np.isclose(ytr, 0)])
n_ot = len(ytr) - n_neg
ot_weight = n_neg / n_ot
sw = np.ones_like(ytr)
sw[~np.isclose(ytr, 0)] = ot_weight
rf = RandomForestRegressor(**params)
rf.fit(xtr, ytr, sample_weight=sw)
| [
"numpy.ones_like",
"sklearn.externals.joblib.load",
"numpy.isclose",
"sklearn.ensemble.RandomForestRegressor"
] | [((169, 194), 'sklearn.externals.joblib.load', 'joblib.load', (['"""params.pkl"""'], {}), "('params.pkl')\n", (180, 194), False, 'from sklearn.externals import joblib\n'), ((286, 303), 'numpy.ones_like', 'np.ones_like', (['ytr'], {}), '(ytr)\n', (298, 303), True, 'import numpy as np\n'), ((345, 376), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '(**params)\n', (366, 376), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((211, 229), 'numpy.isclose', 'np.isclose', (['ytr', '(0)'], {}), '(ytr, 0)\n', (221, 229), True, 'import numpy as np\n'), ((308, 326), 'numpy.isclose', 'np.isclose', (['ytr', '(0)'], {}), '(ytr, 0)\n', (318, 326), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from astropy.io import fits
path_to_file = '/Users/enoto/drop/01_enoto/research/nicer/analysis/proximacen/data/200718_Notsu_TESS/add_nicer/'
hdu1 = fits.open(path_to_file + 'tess2019112060037-s0011-0000000388857263-0143-s_lc_nicer.fits')
hdu2 = fits.open(path_to_file + 'tess2019140104343-s0012-0000000388857263-0144-s_lc_nicer.fits')
df1 = pd.DataFrame(hdu1['TRBLIST'].data)
#df1.drop(index='cow', columns='small')
mask_tess1 = (df1['PDCSAP_FLUX'] > 0)
PDCSAP_FLUX_mean1 = np.mean(np.array(df1['PDCSAP_FLUX'][mask_tess1]))
df1['PDCSAP_FLUX_normalized'] = df1['PDCSAP_FLUX'] / PDCSAP_FLUX_mean1
df1_sel = df1.loc[:,['MJD','cps_0.3_1.0keV','exp_1.0_3.0keV','PDCSAP_FLUX','PDCSAP_FLUX_normalized']]
df2 = pd.DataFrame(hdu2['TRBLIST'].data)
mask_tess2 = (df2['PDCSAP_FLUX'] > 0)
PDCSAP_FLUX_mean2 = np.mean(np.array(df2['PDCSAP_FLUX'][mask_tess2]))
df2['PDCSAP_FLUX_normalized'] = df2['PDCSAP_FLUX'] / PDCSAP_FLUX_mean2
df2_sel = df2.loc[:,['MJD','cps_0.3_1.0keV','exp_1.0_3.0keV','PDCSAP_FLUX','PDCSAP_FLUX_normalized']]
df = pd.concat([df1_sel, df2_sel])
mask = (df['exp_1.0_3.0keV'] > 0)
fontsize = 14
fig, axs = plt.subplots(2,1, figsize=(11.69,8.27),
sharex=True, gridspec_kw={'hspace': 0})
axs[0].step(df['MJD'][mask],df['cps_0.3_1.0keV'][mask],
'o',mec='k',markersize=4,where='mid',color='#FF5733')
axs[0].set_ylabel('NICER rate (cps) 0.3-1.0 keV',fontsize=fontsize)
axs[0].set_yscale('log')
axs[1].step(df['MJD'],df['PDCSAP_FLUX_normalized'],
'o',mec='k',markersize=4,where='mid',color='#1565C0')
#axs[1].set_yscale('log')
axs[1].set_ylabel('TESS optical intensity (arbitary unit)',fontsize=fontsize)
axs[1].set_ylim(0.995,1.08)
axs[1].set_xlabel('MJD (day)',fontsize=fontsize)
for ax in axs:
ax.label_outer()
ax.minorticks_on()
ax.xaxis.grid(True)
ax.xaxis.grid(which='major', linestyle='--', color='#000000')
ax.xaxis.grid(which='minor', linestyle='-.')
#ax.xaxis.set_minor_locator(dates.HourLocator())
ax.tick_params(axis="both", which='major', direction='in', length=5)
ax.tick_params(axis="both", which='minor', direction='in', length=3)
fig.align_ylabels(axs)
plt.tight_layout(pad=2)
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.savefig('curve.pdf')
for i in range(len(axs)):
axs[i].set_xlim(58622,58624)
plt.savefig("tmp1.pdf")
fig, axs = plt.subplots(1,1, figsize=(8.27,8.27))
plt.plot(df['PDCSAP_FLUX_normalized'][mask],df['cps_0.3_1.0keV'][mask],'o')
# 'o',mec='k',markersize=4,where='mid',color='#FF5733')
plt.xlabel('TESS optical intensity (arbitary)',fontsize=fontsize)
#axs[0].set_yscale('log')
plt.ylabel('NICER (cps) 0.3-1.0 keV',fontsize=fontsize)
plt.tight_layout(pad=2)
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.savefig('scatter.pdf')
| [
"matplotlib.pylab.subplots",
"matplotlib.pylab.savefig",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.xlabel",
"numpy.array",
"astropy.io.fits.open",
"pandas.DataFrame",
"matplotlib.pylab.plot",
"pandas.concat",
"matplotlib.pylab.ylabel"
] | [((243, 336), 'astropy.io.fits.open', 'fits.open', (["(path_to_file + 'tess2019112060037-s0011-0000000388857263-0143-s_lc_nicer.fits'\n )"], {}), "(path_to_file +\n 'tess2019112060037-s0011-0000000388857263-0143-s_lc_nicer.fits')\n", (252, 336), False, 'from astropy.io import fits\n'), ((340, 433), 'astropy.io.fits.open', 'fits.open', (["(path_to_file + 'tess2019140104343-s0012-0000000388857263-0144-s_lc_nicer.fits'\n )"], {}), "(path_to_file +\n 'tess2019140104343-s0012-0000000388857263-0144-s_lc_nicer.fits')\n", (349, 433), False, 'from astropy.io import fits\n'), ((437, 471), 'pandas.DataFrame', 'pd.DataFrame', (["hdu1['TRBLIST'].data"], {}), "(hdu1['TRBLIST'].data)\n", (449, 471), True, 'import pandas as pd\n'), ((801, 835), 'pandas.DataFrame', 'pd.DataFrame', (["hdu2['TRBLIST'].data"], {}), "(hdu2['TRBLIST'].data)\n", (813, 835), True, 'import pandas as pd\n'), ((1123, 1152), 'pandas.concat', 'pd.concat', (['[df1_sel, df2_sel]'], {}), '([df1_sel, df2_sel])\n', (1132, 1152), True, 'import pandas as pd\n'), ((1213, 1299), 'matplotlib.pylab.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(11.69, 8.27)', 'sharex': '(True)', 'gridspec_kw': "{'hspace': 0}"}), "(2, 1, figsize=(11.69, 8.27), sharex=True, gridspec_kw={\n 'hspace': 0})\n", (1225, 1299), True, 'import matplotlib.pylab as plt\n'), ((2185, 2208), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(2)'}), '(pad=2)\n', (2201, 2208), True, 'import matplotlib.pylab as plt\n'), ((2296, 2320), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""curve.pdf"""'], {}), "('curve.pdf')\n", (2307, 2320), True, 'import matplotlib.pylab as plt\n'), ((2378, 2401), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""tmp1.pdf"""'], {}), "('tmp1.pdf')\n", (2389, 2401), True, 'import matplotlib.pylab as plt\n'), ((2415, 2455), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8.27, 8.27)'}), '(1, 1, figsize=(8.27, 8.27))\n', (2427, 2455), True, 'import matplotlib.pylab as plt\n'), ((2454, 2531), 'matplotlib.pylab.plot', 'plt.plot', (["df['PDCSAP_FLUX_normalized'][mask]", "df['cps_0.3_1.0keV'][mask]", '"""o"""'], {}), "(df['PDCSAP_FLUX_normalized'][mask], df['cps_0.3_1.0keV'][mask], 'o')\n", (2462, 2531), True, 'import matplotlib.pylab as plt\n'), ((2586, 2652), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""TESS optical intensity (arbitary)"""'], {'fontsize': 'fontsize'}), "('TESS optical intensity (arbitary)', fontsize=fontsize)\n", (2596, 2652), True, 'import matplotlib.pylab as plt\n'), ((2678, 2734), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""NICER (cps) 0.3-1.0 keV"""'], {'fontsize': 'fontsize'}), "('NICER (cps) 0.3-1.0 keV', fontsize=fontsize)\n", (2688, 2734), True, 'import matplotlib.pylab as plt\n'), ((2734, 2757), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {'pad': '(2)'}), '(pad=2)\n', (2750, 2757), True, 'import matplotlib.pylab as plt\n'), ((2845, 2871), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""scatter.pdf"""'], {}), "('scatter.pdf')\n", (2856, 2871), True, 'import matplotlib.pylab as plt\n'), ((578, 618), 'numpy.array', 'np.array', (["df1['PDCSAP_FLUX'][mask_tess1]"], {}), "(df1['PDCSAP_FLUX'][mask_tess1])\n", (586, 618), True, 'import numpy as np\n'), ((902, 942), 'numpy.array', 'np.array', (["df2['PDCSAP_FLUX'][mask_tess2]"], {}), "(df2['PDCSAP_FLUX'][mask_tess2])\n", (910, 942), True, 'import numpy as np\n')] |
""" Code for the MAML algorithm and network definitions. """
from __future__ import print_function
import numpy as np
import sys
import tensorflow as tf
try:
import special_grads
except KeyError as e:
print('WARN: Cannot define MaxPoolGrad, likely already defined for this version of tensorflow: %s' % e,
file=sys.stderr)
from tensorflow.python.platform import flags
from utils import mse, xent, conv_block, normalize
FLAGS = flags.FLAGS
class MAML:
def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):
""" must call construct_model() after initializing MAML! """
self.dim_input = dim_input
self.dim_output = dim_output
self.update_lr = FLAGS.update_lr
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
self.classification = False
self.test_num_updates = test_num_updates
self.keep_prob = FLAGS.keep_prob
if FLAGS.datasource == 'sinusoid':
self.dim_hidden = [40, 40]
self.loss_func = mse
if FLAGS.drop_connect == True:
self.forward = self.forward_fc_dropconnect
else:
self.forward = self.forward_fc
self.construct_weights = self.construct_fc_weights
elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':
self.loss_func = xent
self.classification = True
if FLAGS.conv:
self.dim_hidden = FLAGS.num_filters
self.forward = self.forward_conv
self.construct_weights = self.construct_conv_weights
else:
self.dim_hidden = [256, 128, 64, 64]
self.forward=self.forward_fc
self.construct_weights = self.construct_fc_weights
if FLAGS.datasource == 'miniimagenet':
self.channels = 3
else:
self.channels = 1
self.img_size = int(np.sqrt(self.dim_input/self.channels))
else:
raise ValueError('Unrecognized data source.')
def construct_model(self, input_tensors=None, prefix='metatrain_'):
# a: training data for inner gradient, b: test data for meta gradient
if input_tensors is None:
self.inputa = tf.placeholder(tf.float32)
self.inputb = tf.placeholder(tf.float32)
self.labela = tf.placeholder(tf.float32)
self.labelb = tf.placeholder(tf.float32)
else:
self.inputa = input_tensors['inputa']
self.inputb = input_tensors['inputb']
self.labela = input_tensors['labela']
self.labelb = input_tensors['labelb']
with tf.variable_scope('model', reuse=None) as training_scope:
if 'weights' in dir(self):
training_scope.reuse_variables()
weights = self.weights
else:
# Define the weights
self.weights = weights = self.construct_weights()
# outputbs[i] and lossesb[i] is the output and loss after i+1 gradient updates
lossesa, outputas, lossesb, outputbs = [], [], [], []
accuraciesa, accuraciesb = [], []
num_updates = max(self.test_num_updates, FLAGS.num_updates)
outputbs = [[]]*num_updates
lossesb = [[]]*num_updates
accuraciesb = [[]]*num_updates
def task_metalearn(inp, reuse=True):
""" Perform gradient descent for one task in the meta-batch. """
inputa, inputb, labela, labelb = inp
task_outputbs, task_lossesb = [], []
if self.classification:
task_accuraciesb = []
task_outputa = self.forward(inputa, weights, reuse=reuse, keep_prob=self.keep_prob) # only reuse on the first iter
task_lossa = self.loss_func(task_outputa, labela)
grads = tf.gradients(task_lossa, list(weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(weights.keys(), grads))
fast_weights = dict(zip(weights.keys(), [weights[key] - self.update_lr*gradients[key] for key in weights.keys()]))
output = self.forward(inputb, fast_weights, reuse=True, keep_prob=self.keep_prob)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
for j in range(num_updates - 1):
loss = self.loss_func(self.forward(inputa, fast_weights, reuse=True, keep_prob=self.keep_prob), labela)
grads = tf.gradients(loss, list(fast_weights.values()))
if FLAGS.stop_grad:
grads = [tf.stop_gradient(grad) for grad in grads]
gradients = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.update_lr*gradients[key] for key in fast_weights.keys()]))
output = self.forward(inputb, fast_weights, reuse=True, keep_prob=self.keep_prob)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
task_output = [task_outputa, task_outputbs, task_lossa, task_lossesb]
if self.classification:
task_accuracya = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputa), 1), tf.argmax(labela, 1))
for j in range(num_updates):
task_accuraciesb.append(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputbs[j]), 1), tf.argmax(labelb, 1)))
task_output.extend([task_accuracya, task_accuraciesb])
return task_output
# if FLAGS.norm is not 'None':
# # to initialize the batch norm vars, might want to combine this, and not run idx 0 twice.
# unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
out_dtype = [tf.float32, [tf.float32]*num_updates, tf.float32, [tf.float32]*num_updates]
if self.classification:
out_dtype.extend([tf.float32, [tf.float32]*num_updates])
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
if self.classification:
outputas, outputbs, lossesa, lossesb, accuraciesa, accuraciesb = result
else:
outputas, outputbs, lossesa, lossesb = result
self.lossesb = lossesb
self.result = result
## Performance & Optimization
if 'train' in prefix:
self.total_loss1 = total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.total_losses2 = total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
# after the map_fn
self.outputas, self.outputbs = outputas, outputbs
if self.classification:
self.total_accuracy1 = total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.total_accuracies2 = total_accuracies2 = [tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
self.pretrain_op = tf.train.AdamOptimizer(self.meta_lr).minimize(total_loss1)
if FLAGS.metatrain_iterations > 0:
optimizer = tf.train.AdamOptimizer(self.meta_lr)
self.gvs = gvs = optimizer.compute_gradients(self.total_losses2[FLAGS.num_updates-1])
if FLAGS.datasource == 'miniimagenet':
gvs = [(tf.clip_by_value(grad, -10, 10), var) for grad, var in gvs]
self.metatrain_op = optimizer.apply_gradients(gvs)
else:
self.metaval_total_loss1 = total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_losses2 = total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
if self.classification:
self.metaval_total_accuracy1 = total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_accuracies2 = total_accuracies2 =[tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
## Summaries
tf.summary.scalar(prefix+'Pre-update loss', total_loss1)
if self.classification:
tf.summary.scalar(prefix+'Pre-update accuracy', total_accuracy1)
for j in range(num_updates):
tf.summary.scalar(prefix+'Post-update loss, step ' + str(j+1), total_losses2[j])
if self.classification:
tf.summary.scalar(prefix+'Post-update accuracy, step ' + str(j+1), total_accuracies2[j])
### Network construction functions (fc networks and conv networks)
def construct_fc_weights(self):
weights = {}
weights['w1'] = tf.Variable(tf.truncated_normal([self.dim_input, self.dim_hidden[0]], stddev=0.01))
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden[0]]))
for i in range(1,len(self.dim_hidden)):
weights['w'+str(i+1)] = tf.Variable(tf.truncated_normal([self.dim_hidden[i-1], self.dim_hidden[i]], stddev=0.01))
weights['b'+str(i+1)] = tf.Variable(tf.zeros([self.dim_hidden[i]]))
weights['w'+str(len(self.dim_hidden)+1)] = tf.Variable(tf.truncated_normal([self.dim_hidden[-1], self.dim_output], stddev=0.01))
weights['b'+str(len(self.dim_hidden)+1)] = tf.Variable(tf.zeros([self.dim_output]))
return weights
def forward_fc(self, inp, weights, reuse=False, keep_prob=None):
hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
if keep_prob is not None:
hidden = tf.nn.dropout(hidden, keep_prob=keep_prob)
for i in range(1,len(self.dim_hidden)):
hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
if keep_prob is not None:
hidden = tf.nn.dropout(hidden, keep_prob=keep_prob)
return tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]
def forward_fc_dropconnect(self, inp, weights, reuse=False, keep_prob=None):
if keep_prob is not None:
# shape_w1 = weights['w1'].shape
# shape_b1 = weights['b1'].shape
drop_weights_w1 = tf.nn.dropout(weights['w1'], keep_prob=keep_prob) * keep_prob
drop_weights_b1 = tf.nn.dropout(weights['b1'], keep_prob=keep_prob) * keep_prob
# weights_w1 = tf.reshape(drop_weights_w1, shape=shape_w1)
# weights_b1 = tf.reshape(drop_weights_b1, shape=shape_b1)
# hidden = normalize(tf.matmul(inp, weights_w1) + weights_b1, activation=tf.nn.relu, reuse=reuse, scope='0')
hidden = normalize(tf.matmul(inp, drop_weights_w1) + drop_weights_b1, activation=tf.nn.relu, reuse=reuse, scope='0')
else:
hidden = normalize(tf.matmul(inp, weights['w1']) + weights['b1'], activation=tf.nn.relu, reuse=reuse, scope='0')
for i in range(1,len(self.dim_hidden)):
if keep_prob is not None:
# shape_w_i = weights['w'+str(i+1)].shape
# shape_b_i = weights['b'+str(i+1)].shape
drop_weights_w_i = tf.nn.dropout(weights['w'+str(i+1)], keep_prob=keep_prob) * keep_prob
drop_weights_b_i = tf.nn.dropout(weights['b'+str(i+1)], keep_prob=keep_prob) * keep_prob
# weights_w_i = tf.reshape(drop_weights_w_i, shape=shape_w_i)
# weights_b_i = tf.reshape(drop_weights_b_i, shape=shape_b_i)
# hidden = normalize(tf.matmul(hidden, weights_w_i) + weights_b_i, activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
hidden = normalize(tf.matmul(hidden, drop_weights_w_i) + drop_weights_b_i, activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
else:
hidden = tf.nn.dropout(hidden, keep_prob=keep_prob)
hidden = normalize(tf.matmul(hidden, weights['w'+str(i+1)]) + weights['b'+str(i+1)], activation=tf.nn.relu, reuse=reuse, scope=str(i+1))
return tf.matmul(hidden, weights['w'+str(len(self.dim_hidden)+1)]) + weights['b'+str(len(self.dim_hidden)+1)]
def construct_conv_weights(self):
weights = {}
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
k = 3
weights['conv1'] = tf.get_variable('conv1', [k, k, self.channels, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv2'] = tf.get_variable('conv2', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv3'] = tf.get_variable('conv3', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))
weights['conv4'] = tf.get_variable('conv4', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))
if FLAGS.datasource == 'miniimagenet':
# assumes max pooling
weights['w5'] = tf.get_variable('w5', [self.dim_hidden*5*5, self.dim_output], initializer=fc_initializer)
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
else:
weights['w5'] = tf.Variable(tf.random_normal([self.dim_hidden, self.dim_output]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
return weights
def forward_conv(self, inp, weights, reuse=False, scope=''):
# reuse is for the normalization parameters.
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse, scope+'0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse, scope+'1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse, scope+'2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse, scope+'3')
if FLAGS.datasource == 'miniimagenet':
# last hidden layer is 6x6x64-ish, reshape to a vector
hidden4 = tf.reshape(hidden4, [-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])
else:
hidden4 = tf.reduce_mean(hidden4, [1, 2])
return tf.matmul(hidden4, weights['w5']) + weights['b5']
| [
"numpy.sqrt",
"tensorflow.get_variable",
"tensorflow.reduce_sum",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.random_normal",
"tensorflow.placeholder",
"utils.conv_block",
"tensorflow.contrib.layers.xavier_initializer_conv2d",
"tensorflow.matmul",
"t... | [((747, 793), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['FLAGS.meta_lr', '()'], {}), '(FLAGS.meta_lr, ())\n', (774, 793), True, 'import tensorflow as tf\n'), ((8680, 8738), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(prefix + 'Pre-update loss')", 'total_loss1'], {}), "(prefix + 'Pre-update loss', total_loss1)\n", (8697, 8738), True, 'import tensorflow as tf\n'), ((12878, 12934), 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (12921, 12934), True, 'import tensorflow as tf\n'), ((12961, 13010), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (12997, 13010), True, 'import tensorflow as tf\n'), ((13053, 13164), 'tensorflow.get_variable', 'tf.get_variable', (['"""conv1"""', '[k, k, self.channels, self.dim_hidden]'], {'initializer': 'conv_initializer', 'dtype': 'dtype'}), "('conv1', [k, k, self.channels, self.dim_hidden],\n initializer=conv_initializer, dtype=dtype)\n", (13068, 13164), True, 'import tensorflow as tf\n'), ((13253, 13366), 'tensorflow.get_variable', 'tf.get_variable', (['"""conv2"""', '[k, k, self.dim_hidden, self.dim_hidden]'], {'initializer': 'conv_initializer', 'dtype': 'dtype'}), "('conv2', [k, k, self.dim_hidden, self.dim_hidden],\n initializer=conv_initializer, dtype=dtype)\n", (13268, 13366), True, 'import tensorflow as tf\n'), ((13455, 13568), 'tensorflow.get_variable', 'tf.get_variable', (['"""conv3"""', '[k, k, self.dim_hidden, self.dim_hidden]'], {'initializer': 'conv_initializer', 'dtype': 'dtype'}), "('conv3', [k, k, self.dim_hidden, self.dim_hidden],\n initializer=conv_initializer, dtype=dtype)\n", (13470, 13568), True, 'import tensorflow as tf\n'), ((13657, 13770), 'tensorflow.get_variable', 'tf.get_variable', (['"""conv4"""', '[k, k, self.dim_hidden, self.dim_hidden]'], {'initializer': 'conv_initializer', 'dtype': 'dtype'}), "('conv4', [k, k, self.dim_hidden, self.dim_hidden],\n initializer=conv_initializer, dtype=dtype)\n", (13672, 13770), True, 'import tensorflow as tf\n'), ((14499, 14560), 'tensorflow.reshape', 'tf.reshape', (['inp', '[-1, self.img_size, self.img_size, channels]'], {}), '(inp, [-1, self.img_size, self.img_size, channels])\n', (14509, 14560), True, 'import tensorflow as tf\n'), ((14580, 14648), 'utils.conv_block', 'conv_block', (['inp', "weights['conv1']", "weights['b1']", 'reuse', "(scope + '0')"], {}), "(inp, weights['conv1'], weights['b1'], reuse, scope + '0')\n", (14590, 14648), False, 'from utils import mse, xent, conv_block, normalize\n'), ((14665, 14737), 'utils.conv_block', 'conv_block', (['hidden1', "weights['conv2']", "weights['b2']", 'reuse', "(scope + '1')"], {}), "(hidden1, weights['conv2'], weights['b2'], reuse, scope + '1')\n", (14675, 14737), False, 'from utils import mse, xent, conv_block, normalize\n'), ((14754, 14826), 'utils.conv_block', 'conv_block', (['hidden2', "weights['conv3']", "weights['b3']", 'reuse', "(scope + '2')"], {}), "(hidden2, weights['conv3'], weights['b3'], reuse, scope + '2')\n", (14764, 14826), False, 'from utils import mse, xent, conv_block, normalize\n'), ((14843, 14915), 'utils.conv_block', 'conv_block', (['hidden3', "weights['conv4']", "weights['b4']", 'reuse', "(scope + '3')"], {}), "(hidden3, weights['conv4'], weights['b4'], reuse, scope + '3')\n", (14853, 14915), False, 'from utils import mse, xent, conv_block, normalize\n'), ((2292, 2318), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2306, 2318), True, 'import tensorflow as tf\n'), ((2345, 2371), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2359, 2371), True, 'import tensorflow as tf\n'), ((2398, 2424), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2412, 2424), True, 'import tensorflow as tf\n'), ((2451, 2477), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2465, 2477), True, 'import tensorflow as tf\n'), ((2706, 2744), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None'}), "('model', reuse=None)\n", (2723, 2744), True, 'import tensorflow as tf\n'), ((6384, 6533), 'tensorflow.map_fn', 'tf.map_fn', (['task_metalearn'], {'elems': '(self.inputa, self.inputb, self.labela, self.labelb)', 'dtype': 'out_dtype', 'parallel_iterations': 'FLAGS.meta_batch_size'}), '(task_metalearn, elems=(self.inputa, self.inputb, self.labela,\n self.labelb), dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n', (6393, 6533), True, 'import tensorflow as tf\n'), ((8781, 8847), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(prefix + 'Pre-update accuracy')", 'total_accuracy1'], {}), "(prefix + 'Pre-update accuracy', total_accuracy1)\n", (8798, 8847), True, 'import tensorflow as tf\n'), ((9283, 9353), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.dim_input, self.dim_hidden[0]]'], {'stddev': '(0.01)'}), '([self.dim_input, self.dim_hidden[0]], stddev=0.01)\n', (9302, 9353), True, 'import tensorflow as tf\n'), ((9391, 9421), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden[0]]'], {}), '([self.dim_hidden[0]])\n', (9399, 9421), True, 'import tensorflow as tf\n'), ((9740, 9812), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.dim_hidden[-1], self.dim_output]'], {'stddev': '(0.01)'}), '([self.dim_hidden[-1], self.dim_output], stddev=0.01)\n', (9759, 9812), True, 'import tensorflow as tf\n'), ((9877, 9904), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_output]'], {}), '([self.dim_output])\n', (9885, 9904), True, 'import tensorflow as tf\n'), ((10175, 10217), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden'], {'keep_prob': 'keep_prob'}), '(hidden, keep_prob=keep_prob)\n', (10188, 10217), True, 'import tensorflow as tf\n'), ((13197, 13224), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden]'], {}), '([self.dim_hidden])\n', (13205, 13224), True, 'import tensorflow as tf\n'), ((13399, 13426), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden]'], {}), '([self.dim_hidden])\n', (13407, 13426), True, 'import tensorflow as tf\n'), ((13601, 13628), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden]'], {}), '([self.dim_hidden])\n', (13609, 13628), True, 'import tensorflow as tf\n'), ((13803, 13830), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden]'], {}), '([self.dim_hidden])\n', (13811, 13830), True, 'import tensorflow as tf\n'), ((13941, 14038), 'tensorflow.get_variable', 'tf.get_variable', (['"""w5"""', '[self.dim_hidden * 5 * 5, self.dim_output]'], {'initializer': 'fc_initializer'}), "('w5', [self.dim_hidden * 5 * 5, self.dim_output],\n initializer=fc_initializer)\n", (13956, 14038), True, 'import tensorflow as tf\n'), ((15168, 15199), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hidden4', '[1, 2]'], {}), '(hidden4, [1, 2])\n', (15182, 15199), True, 'import tensorflow as tf\n'), ((15216, 15249), 'tensorflow.matmul', 'tf.matmul', (['hidden4', "weights['w5']"], {}), "(hidden4, weights['w5'])\n", (15225, 15249), True, 'import tensorflow as tf\n'), ((6918, 6940), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lossesa'], {}), '(lossesa)\n', (6931, 6940), True, 'import tensorflow as tf\n'), ((6943, 6977), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (6954, 6977), True, 'import tensorflow as tf\n'), ((7694, 7730), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.meta_lr'], {}), '(self.meta_lr)\n', (7716, 7730), True, 'import tensorflow as tf\n'), ((8110, 8132), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lossesa'], {}), '(lossesa)\n', (8123, 8132), True, 'import tensorflow as tf\n'), ((8135, 8169), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (8146, 8169), True, 'import tensorflow as tf\n'), ((9519, 9597), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.dim_hidden[i - 1], self.dim_hidden[i]]'], {'stddev': '(0.01)'}), '([self.dim_hidden[i - 1], self.dim_hidden[i]], stddev=0.01)\n', (9538, 9597), True, 'import tensorflow as tf\n'), ((9645, 9675), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_hidden[i]]'], {}), '([self.dim_hidden[i]])\n', (9653, 9675), True, 'import tensorflow as tf\n'), ((10026, 10055), 'tensorflow.matmul', 'tf.matmul', (['inp', "weights['w1']"], {}), "(inp, weights['w1'])\n", (10035, 10055), True, 'import tensorflow as tf\n'), ((10478, 10520), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden'], {'keep_prob': 'keep_prob'}), '(hidden, keep_prob=keep_prob)\n', (10491, 10520), True, 'import tensorflow as tf\n'), ((10876, 10925), 'tensorflow.nn.dropout', 'tf.nn.dropout', (["weights['w1']"], {'keep_prob': 'keep_prob'}), "(weights['w1'], keep_prob=keep_prob)\n", (10889, 10925), True, 'import tensorflow as tf\n'), ((10968, 11017), 'tensorflow.nn.dropout', 'tf.nn.dropout', (["weights['b1']"], {'keep_prob': 'keep_prob'}), "(weights['b1'], keep_prob=keep_prob)\n", (10981, 11017), True, 'import tensorflow as tf\n'), ((12450, 12492), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden'], {'keep_prob': 'keep_prob'}), '(hidden, keep_prob=keep_prob)\n', (12463, 12492), True, 'import tensorflow as tf\n'), ((14071, 14098), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_output]'], {}), '([self.dim_output])\n', (14079, 14098), True, 'import tensorflow as tf\n'), ((14165, 14217), 'tensorflow.random_normal', 'tf.random_normal', (['[self.dim_hidden, self.dim_output]'], {}), '([self.dim_hidden, self.dim_output])\n', (14181, 14217), True, 'import tensorflow as tf\n'), ((14270, 14297), 'tensorflow.zeros', 'tf.zeros', (['[self.dim_output]'], {}), '([self.dim_output])\n', (14278, 14297), True, 'import tensorflow as tf\n'), ((1970, 2009), 'numpy.sqrt', 'np.sqrt', (['(self.dim_input / self.channels)'], {}), '(self.dim_input / self.channels)\n', (1977, 2009), True, 'import numpy as np\n'), ((7028, 7053), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lossesb[j]'], {}), '(lossesb[j])\n', (7041, 7053), True, 'import tensorflow as tf\n'), ((7056, 7090), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (7067, 7090), True, 'import tensorflow as tf\n'), ((7306, 7332), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accuraciesa'], {}), '(accuraciesa)\n', (7319, 7332), True, 'import tensorflow as tf\n'), ((7335, 7369), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (7346, 7369), True, 'import tensorflow as tf\n'), ((7559, 7595), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.meta_lr'], {}), '(self.meta_lr)\n', (7581, 7595), True, 'import tensorflow as tf\n'), ((8228, 8253), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['lossesb[j]'], {}), '(lossesb[j])\n', (8241, 8253), True, 'import tensorflow as tf\n'), ((8256, 8290), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (8267, 8290), True, 'import tensorflow as tf\n'), ((8421, 8447), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accuraciesa'], {}), '(accuraciesa)\n', (8434, 8447), True, 'import tensorflow as tf\n'), ((8450, 8484), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (8461, 8484), True, 'import tensorflow as tf\n'), ((11324, 11355), 'tensorflow.matmul', 'tf.matmul', (['inp', 'drop_weights_w1'], {}), '(inp, drop_weights_w1)\n', (11333, 11355), True, 'import tensorflow as tf\n'), ((11467, 11496), 'tensorflow.matmul', 'tf.matmul', (['inp', "weights['w1']"], {}), "(inp, weights['w1'])\n", (11476, 11496), True, 'import tensorflow as tf\n'), ((4068, 4090), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['grad'], {}), '(grad)\n', (4084, 4090), True, 'import tensorflow as tf\n'), ((5557, 5577), 'tensorflow.argmax', 'tf.argmax', (['labela', '(1)'], {}), '(labela, 1)\n', (5566, 5577), True, 'import tensorflow as tf\n'), ((7432, 7461), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accuraciesb[j]'], {}), '(accuraciesb[j])\n', (7445, 7461), True, 'import tensorflow as tf\n'), ((7464, 7498), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (7475, 7498), True, 'import tensorflow as tf\n'), ((8554, 8583), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accuraciesb[j]'], {}), '(accuraciesb[j])\n', (8567, 8583), True, 'import tensorflow as tf\n'), ((8586, 8620), 'tensorflow.to_float', 'tf.to_float', (['FLAGS.meta_batch_size'], {}), '(FLAGS.meta_batch_size)\n', (8597, 8620), True, 'import tensorflow as tf\n'), ((12299, 12334), 'tensorflow.matmul', 'tf.matmul', (['hidden', 'drop_weights_w_i'], {}), '(hidden, drop_weights_w_i)\n', (12308, 12334), True, 'import tensorflow as tf\n'), ((4836, 4858), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['grad'], {}), '(grad)\n', (4852, 4858), True, 'import tensorflow as tf\n'), ((5524, 5551), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['task_outputa'], {}), '(task_outputa)\n', (5537, 5551), True, 'import tensorflow as tf\n'), ((7916, 7947), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-10)', '(10)'], {}), '(grad, -10, 10)\n', (7932, 7947), True, 'import tensorflow as tf\n'), ((5751, 5771), 'tensorflow.argmax', 'tf.argmax', (['labelb', '(1)'], {}), '(labelb, 1)\n', (5760, 5771), True, 'import tensorflow as tf\n'), ((5714, 5745), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['task_outputbs[j]'], {}), '(task_outputbs[j])\n', (5727, 5745), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# Standard imports
from typing import Union
from pathlib import Path
import functools
# External imports
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
_DEFAULT_DATASET_ROOT = "/opt/Datasets"
_DEFAULT_MNIST_DIGIT = 6
_IMG_MEAN = 0.5
_IMG_STD = 0.5
def get_dataloaders(dataset_root: Union[str, Path],
cuda: bool,
batch_size: int = 64,
n_threads: int = 4,
dataset: str = "MNIST",
val_size: float = 0.2,
small_experiment: bool = False):
"""
Build and return the pytorch dataloaders
Args:
dataset_root (str, Path) : the root path of the datasets
cuda (bool): whether or not to use cuda
batch_size (int) : the size of the minibatches
n_threads (int): the number of threads to use for dataloading
dataset (str): the dataset to load
val_size (float): the proportion of data for the validation set
small_experiment (bool): wheter or not to use a small
dataset (usefull for debuging)
"""
datasets = ["MNIST", "FashionMNIST", "EMNIST", "SVHN", "CelebA"]
if dataset not in datasets:
raise NotImplementedError(f"Cannot import the dataset {dataset}."
f" Available datasets are {datasets}")
dataset_loader = getattr(torchvision.datasets, f"{dataset}")
train_kwargs = {}
test_kwargs = {}
if dataset in ["MNIST", "FashionMNIST", "EMNIST"]:
train_kwargs['train'] = True
test_kwargs['train'] = False
if dataset == "EMNIST":
train_kwargs['split'] = 'balanced'
elif dataset in ["SVHN", 'CelebA']:
train_kwargs['split'] = 'train'
test_kwargs['split'] = 'test'
# Get the two datasets, make them tensors in [0, 1]
transform= transforms.Compose([
transforms.ToTensor(),
transforms.Normalize( (_IMG_MEAN,), (_IMG_STD,))
]
)
if dataset == 'CelebA':
transform = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transform
])
train_dataset = dataset_loader(root=dataset_root,
**train_kwargs,
download=True,
transform=transform
)
test_dataset = dataset_loader(root=dataset_root,
**test_kwargs,
download=True,
transform=transform
)
dataset = torch.utils.data.ConcatDataset([train_dataset,
test_dataset])
# Compute the channel-wise normalization coefficients
# mean = std = 0
# img, _ = dataset[0]
# print(img.shape)
# N = len(dataset) * img.shape[1] * img.shape[2]
# for img, _ in tqdm.tqdm(dataset):
# mean += img.sum()/N
# for img, _ in tqdm.tqdm(dataset):
# std += ((img - mean)**2).sum()/N
# std = np.sqrt(std)
# print(mean, std)
if small_experiment:
dataset = torch.utils.data.Subset(dataset, range(batch_size))
# Split the dataset in train/valid
indices = np.arange(len(dataset))
np.random.shuffle(indices)
split_idx = int(val_size * len(dataset))
valid_indices, train_indices = indices[:split_idx], indices[split_idx:]
train_dataset = torch.utils.data.Subset(dataset, train_indices)
valid_dataset = torch.utils.data.Subset(dataset, valid_indices)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_threads)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=n_threads)
img_shape = dataset[0][0].shape # C, H, W
return train_loader, valid_loader, img_shape
def test_mnist():
import matplotlib.pyplot as plt
train_loader, valid_loader, img_shape = get_dataloaders(dataset_root=_DEFAULT_DATASET_ROOT,
batch_size=16,
cuda=False,
dataset="MNIST")
print(f"I loaded {len(train_loader)} train minibatches. The images"
f" are of shape {img_shape}")
X, y = next(iter(train_loader))
grid = torchvision.utils.make_grid(X, nrow=4)
print(grid.min(), grid.max())
print(grid.shape)
plt.figure()
plt.imshow(np.transpose(grid.numpy(), (1, 2, 0)), cmap='gray_r')
plt.show()
def test_celeba():
import matplotlib.pyplot as plt
train_loader, valid_loader, img_shape = get_dataloaders(dataset_root=_DEFAULT_DATASET_ROOT,
batch_size=16,
cuda=False,
dataset="CelebA")
print(f"I loaded {len(train_loader)} train minibatches. The images"
f" are of shape {img_shape}")
X, y = next(iter(train_loader))
grid = torchvision.utils.make_grid(X, nrow=4)
print(grid.min(), grid.max())
print(grid.shape)
plt.figure()
plt.imshow(np.transpose(grid.numpy(), (1, 2, 0)) * _IMG_STD + _IMG_MEAN)
plt.show()
if __name__ == '__main__':
# test_mnist()
test_celeba()
| [
"torch.utils.data.ConcatDataset",
"torchvision.transforms.CenterCrop",
"matplotlib.pyplot.show",
"torch.utils.data.Subset",
"matplotlib.pyplot.figure",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.utils.make_grid",
"torchvision.tr... | [((2761, 2822), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['[train_dataset, test_dataset]'], {}), '([train_dataset, test_dataset])\n', (2791, 2822), False, 'import torch\n'), ((3430, 3456), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3447, 3456), True, 'import numpy as np\n'), ((3599, 3646), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset', 'train_indices'], {}), '(dataset, train_indices)\n', (3622, 3646), False, 'import torch\n'), ((3667, 3714), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['dataset', 'valid_indices'], {}), '(dataset, valid_indices)\n', (3690, 3714), False, 'import torch\n'), ((3735, 3842), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'n_threads'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=n_threads)\n', (3762, 3842), False, 'import torch\n'), ((3998, 4106), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'n_threads'}), '(valid_dataset, batch_size=batch_size, shuffle=\n False, num_workers=n_threads)\n', (4025, 4106), False, 'import torch\n'), ((4879, 4917), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['X'], {'nrow': '(4)'}), '(X, nrow=4)\n', (4906, 4917), False, 'import torchvision\n'), ((4979, 4991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4989, 4991), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5075), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5073, 5075), True, 'import matplotlib.pyplot as plt\n'), ((5616, 5654), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['X'], {'nrow': '(4)'}), '(X, nrow=4)\n', (5643, 5654), False, 'import torchvision\n'), ((5716, 5728), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5726, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5810, 5820), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5818, 5820), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2014), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2012, 2014), True, 'import torchvision.transforms as transforms\n'), ((2024, 2071), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(_IMG_MEAN,)', '(_IMG_STD,)'], {}), '((_IMG_MEAN,), (_IMG_STD,))\n', (2044, 2071), True, 'import torchvision.transforms as transforms\n'), ((2166, 2187), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (2183, 2187), True, 'import torchvision.transforms as transforms\n'), ((2201, 2226), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(64)'], {}), '(64)\n', (2222, 2226), True, 'import torchvision.transforms as transforms\n')] |
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import partial
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
import math
import numpy as np
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
from nncf.common.tensor import NNCFTensor
from nncf.common.graph.layer_attributes import ConvolutionLayerAttributes
from nncf.common.graph.operator_metatypes import OperatorMetatype
from nncf.common.pruning.clusterization import Cluster
from nncf.common.pruning.clusterization import Clusterization
from nncf.common.pruning.structs import PrunedLayerInfoBase
from nncf.common.utils.registry import Registry
def is_grouped_conv(node: NNCFNode) -> bool:
return isinstance(node.layer_attributes, ConvolutionLayerAttributes) \
and node.layer_attributes.groups != 1
def get_sources_of_node(nncf_node: NNCFNode, graph: NNCFGraph, sources_types: List[str]) -> List[NNCFNode]:
"""
Source is a node of source such that there is path from this node to `nncf_node` and on this path
no node has one of `sources_types` type.
:param sources_types: List of sources types.
:param nncf_node: NNCFNode to get sources.
:param graph: NNCF graph to work with.
:return: List of all sources nodes.
"""
visited = {node_id: False for node_id in graph.get_all_node_ids()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x in sources_types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type in sources_types:
nncf_nodes = graph.get_previous_nodes(nncf_node)
source_nodes = []
for node in nncf_nodes:
source_nodes.extend(graph.traverse_graph(node, partial_traverse_function, False))
return source_nodes
def find_next_nodes_not_of_types(graph: NNCFGraph, nncf_node: NNCFNode, types: List[str]) -> List[NNCFNode]:
"""
Traverse nodes in the graph from nncf node to find first nodes that aren't of type from types list.
First nodes with some condition mean nodes:
- for which this condition is true
- reachable from `nncf_node` such that on the path from `nncf_node` to
this nodes there are no other nodes with fulfilled condition
:param graph: Graph to work with.
:param nncf_node: NNCFNode to start search.
:param types: List of types.
:return: List of next nodes for nncf_node of type not from types list.
"""
visited = {node_id: False for node_id in graph.get_all_node_ids()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x not in types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type not in types:
nncf_nodes = graph.get_next_nodes(nncf_node)
next_nodes = []
for node in nncf_nodes:
next_nodes.extend(graph.traverse_graph(node, partial_traverse_function))
return next_nodes
def get_next_nodes_of_types(graph: NNCFGraph, nncf_node: NNCFNode, types: List[str]) -> List[NNCFNode]:
"""
Looking for nodes with type from types list from `nncf_node` such that there is path
from `nncf_node` to this node and on this path no node has one of types type.
:param graph: Graph to work with.
:param nncf_node: NNCFNode to start search.
:param types: List of types to find.
:return: List of next nodes of nncf_node with type from types list.
"""
sources_types = types
visited = {node_id: False for node_id in graph.get_all_node_ids()}
partial_traverse_function = partial(traverse_function, type_check_fn=lambda x: x in sources_types,
visited=visited)
nncf_nodes = [nncf_node]
if nncf_node.node_type in sources_types:
nncf_nodes = graph.get_next_nodes(nncf_node)
next_nodes = []
for node in nncf_nodes:
next_nodes.extend(graph.traverse_graph(node, partial_traverse_function))
return next_nodes
def get_rounded_pruned_element_number(total: int, sparsity_rate: float, multiple_of: int = 8) -> int:
"""
Calculates number of sparsified elements (approximately sparsity rate) from total such as
number of remaining items will be multiple of some value.
Always rounds number of remaining elements up.
:param total: Total elements number.
:param sparsity_rate: Prorortion of zero elements in total.
:param multiple_of: Number of remaining elements must be a multiple of `multiple_of`.
:return: Number of elements to be zeroed.
"""
remaining_elems = math.ceil((total - total * sparsity_rate) / multiple_of) * multiple_of
return max(total - remaining_elems, 0)
def traverse_function(node: NNCFNode, output: List[NNCFNode], type_check_fn, visited) \
-> Tuple[bool, List[NNCFNode]]:
if visited[node.node_id]:
return True, output
visited[node.node_id] = True
if not type_check_fn(node.node_type):
return False, output
output.append(node)
return True, output
def get_first_nodes_of_type(graph: NNCFGraph, op_types: List[str]) -> List[NNCFNode]:
"""
Looking for first node in graph with type in `op_types`.
First == layer with type in `op_types`, that there is a path from the input such that there are no other
operations with type in `op_types` on it.
:param op_types: Types of modules to track.
:param graph: Graph to work with.
:return: List of all first nodes with type in `op_types`.
"""
graph_roots = graph.get_input_nodes() # NNCFNodes here
visited = {node_id: False for node_id in graph.get_all_node_ids()}
partial_traverse_function = partial(traverse_function,
type_check_fn=lambda x: x in op_types,
visited=visited)
first_nodes_of_type = []
for root in graph_roots:
first_nodes_of_type.extend(graph.traverse_graph(root, partial_traverse_function))
return first_nodes_of_type
def get_last_nodes_of_type(graph: NNCFGraph, op_types: List[str]) -> List[NNCFNode]:
"""
Looking for last node in graph with type in `op_types`.
Last == layer with type in `op_types`, that there is a path from this layer to the model output
such that there are no other operations with type in `op_types` on it.
:param op_types: Types of modules to track.
:param graph: Graph to work with.
:return: List of all last pruned nodes.
"""
graph_outputs = graph.get_output_nodes() # NNCFNodes here
visited = {node_id: False for node_id in graph.get_all_node_ids()}
partial_traverse_function = partial(traverse_function,
type_check_fn=lambda x: x in op_types,
visited=visited)
last_nodes_of_type = []
for output in graph_outputs:
last_nodes_of_type.extend(graph.traverse_graph(output, partial_traverse_function, False))
return last_nodes_of_type
def get_previous_convs(graph: NNCFGraph, nncf_node: NNCFNode,
pruning_types: List[str], stop_propagation_ops: List[str]) -> Optional[NNCFNode]:
"""
Returns source convolutions of the node.
:return: List of source convolutions of node.
"""
sources = get_sources_of_node(nncf_node, graph, pruning_types + stop_propagation_ops)
sources = [source for source in sources if source.node_type in pruning_types]
return sources
def get_conv_in_out_channels(graph: NNCFGraph):
"""
Collects the number of input and output channels for each convolution in the graph.
:param graph: NNCFGraph
:return Dictionary with the number of input channels to convolution layers:
{node_name: input_channels_num}
Dictionary with the number of output channels from convolution layers:
{node_name: output_channels_num}
"""
in_channels, out_channels = {}, {}
for node in graph.get_all_nodes():
if isinstance(node.layer_attributes, ConvolutionLayerAttributes):
name = node.node_name
if name in in_channels and name in out_channels:
continue
in_channels[name] = node.layer_attributes.in_channels
out_channels[name] = node.layer_attributes.out_channels
return in_channels, out_channels
def get_cluster_next_nodes(graph: NNCFGraph, pruned_groups_info: Clusterization[PrunedLayerInfoBase],
prunable_types: List[str]) -> Dict[int, List[NNCFNodeName]]:
"""
Finds nodes of `prunable_types` types that receive the output of a pruned cluster as input.
:param graph: NNCFGraph.
:param pruned_groups_info: `Clusterization` of pruning groups.
:param prunable_types: Types of nodes that will be returned.
:return Dictionary of next node names by cluster {cluster_id: [node_name]}.
"""
next_nodes = {}
for cluster in pruned_groups_info.get_all_clusters():
next_nodes_cluster = set()
cluster_nodes = set()
for pruned_layer_info in cluster.elements:
nncf_cluster_node = graph.get_node_by_id(pruned_layer_info.nncf_node_id)
cluster_nodes.add(nncf_cluster_node.node_name)
curr_next_nodes = get_next_nodes_of_types(graph, nncf_cluster_node, prunable_types)
next_nodes_idxs = [n.node_name for n in curr_next_nodes]
next_nodes_cluster = next_nodes_cluster.union(next_nodes_idxs)
next_nodes[cluster.id] = list(next_nodes_cluster - cluster_nodes)
return next_nodes
def count_flops_and_weights(graph: NNCFGraph,
input_shapes: Dict[NNCFNodeName, List[int]],
output_shapes: Dict[NNCFNodeName, List[int]],
conv_op_metatypes: List[Type[OperatorMetatype]],
linear_op_metatypes: List[Type[OperatorMetatype]],
input_channels: Dict[NNCFNodeName, int] = None,
output_channels: Dict[NNCFNodeName, int] = None) -> Tuple[int, int]:
"""
Counts the number weights and FLOPs in the model for convolution and fully connected layers.
:param graph: NNCFGraph.
:param input_shapes: Dictionary of input dimension shapes for convolutions and
fully connected layers. E.g {node_name: (height, width)}
:param output_shapes: Dictionary of output dimension shapes for convolutions and
fully connected layers. E.g {node_name: (height, width)}
:param conv_op_metatypes: List of metatypes defining convolution operations.
:param linear_op_metatypes: List of metatypes defining linear/fully connected operations.
:param input_channels: Dictionary of input channels number in convolutions.
If not specified, taken from the graph. {node_name: channels_num}
:param output_channels: Dictionary of output channels number in convolutions.
If not specified, taken from the graph. {node_name: channels_num}
:return number of FLOPs for the model
number of weights (params) in the model
"""
flops_pers_node, weights_per_node = count_flops_and_weights_per_node(graph,
input_shapes, output_shapes,
conv_op_metatypes, linear_op_metatypes,
input_channels, output_channels)
return sum(flops_pers_node.values()), sum(weights_per_node.values())
def count_flops_and_weights_per_node(graph: NNCFGraph,
input_shapes: Dict[NNCFNodeName, List[int]],
output_shapes: Dict[NNCFNodeName, List[int]],
conv_op_metatypes: List[Type[OperatorMetatype]],
linear_op_metatypes: List[Type[OperatorMetatype]],
input_channels: Dict[NNCFNodeName, int] = None,
output_channels: Dict[NNCFNodeName, int] = None) -> \
Tuple[Dict[NNCFNodeName, int], Dict[NNCFNodeName, int]]:
"""
Counts the number weights and FLOPs per node in the model for convolution and fully connected layers.
:param graph: NNCFGraph.
:param input_shapes: Dictionary of input dimension shapes for convolutions and
fully connected layers. E.g {node_name: (height, width)}
:param output_shapes: Dictionary of output dimension shapes for convolutions and
fully connected layers. E.g {node_name: (height, width)}
:param conv_op_metatypes: List of metatypes defining convolution operations.
:param linear_op_metatypes: List of metatypes defining linear/fully connected operations.
:param input_channels: Dictionary of input channels number in convolutions.
If not specified, taken from the graph. {node_name: channels_num}
:param output_channels: Dictionary of output channels number in convolutions.
If not specified, taken from the graph. {node_name: channels_num}
:return Dictionary of FLOPs number {node_name: flops_num}
Dictionary of weights number {node_name: weights_num}
"""
flops = {}
weights = {}
input_channels = input_channels or {}
output_channels = output_channels or {}
for node in graph.get_nodes_by_metatypes(conv_op_metatypes):
name = node.node_name
num_in_channels = input_channels.get(name, node.layer_attributes.in_channels)
num_out_channels = output_channels.get(name, node.layer_attributes.out_channels)
filters_per_channel = num_out_channels // node.layer_attributes.groups
flops_numpy = 2 * np.prod(node.layer_attributes.kernel_size) * \
num_in_channels * filters_per_channel * np.prod(output_shapes[name])
weights_numpy = np.prod(node.layer_attributes.kernel_size) * num_in_channels * filters_per_channel
flops[name] = flops_numpy.astype(int).item()
weights[name] = weights_numpy.astype(int).item()
for node in graph.get_nodes_by_metatypes(linear_op_metatypes):
name = node.node_name
flops_numpy = 2 * np.prod(input_shapes[name]) * np.prod(output_shapes[name])
weights_numpy = np.prod(input_shapes[name]) * np.prod(output_shapes[name])
flops[name] = flops_numpy.astype(int).item()
weights[name] = weights_numpy.astype(int).item()
return flops, weights
def calculate_in_out_channels_in_uniformly_pruned_model(pruning_groups: List[Cluster[PrunedLayerInfoBase]],
pruning_rate: float,
full_input_channels: Dict[str, int],
full_output_channels: Dict[str, int],
pruning_groups_next_nodes: Dict[int, List[str]]):
"""
Imitates filters pruning by removing `pruning_rate` percent of output filters in each pruning group
and updating corresponding input channels number in `pruning_groups_next_nodes` nodes.
:param pruning_groups: A list of pruning groups.
:param pruning_rate: Target pruning rate.
:param full_input_channels: A dictionary of input channels number in original model.
:param full_output_channels: A dictionary of output channels number in original model.
:param pruning_groups_next_nodes: A dictionary of next nodes of each pruning group.
:return Dictionary of new input channels number {node_name: channels_num}
:return Dictionary of new output channels number {node_name: channels_num}
"""
tmp_in_channels = full_input_channels.copy()
tmp_out_channels = full_output_channels.copy()
for group in pruning_groups:
layer_name = group.elements[0].node_name
assert all(tmp_out_channels[layer_name] == tmp_out_channels[node.node_name] for node in
group.elements)
# Prune all nodes in cluster (by output channels)
old_out_channels = full_output_channels[layer_name]
num_of_sparse_elems = get_rounded_pruned_element_number(old_out_channels, pruning_rate)
new_out_channels_num = old_out_channels - num_of_sparse_elems
for minfo in group.elements:
tmp_out_channels[minfo.node_name] = new_out_channels_num
# Prune in_channels in all next nodes of cluster
for node_name in pruning_groups_next_nodes[group.id]:
tmp_in_channels[node_name] -= num_of_sparse_elems
return tmp_in_channels, tmp_out_channels
class PruningOperationsMetatypeRegistry(Registry):
def __init__(self, name):
super().__init__(name)
self._op_name_to_op_class = {}
def register(self, name=None):
name_ = name
super_register = super()._register
def wrap(obj):
cls_name = name_
if cls_name is None:
cls_name = obj.__name__
super_register(obj, cls_name)
op_names = obj.get_all_op_aliases()
for name in op_names:
if name not in self._op_name_to_op_class:
self._op_name_to_op_class[name] = obj
else:
assert self._op_name_to_op_class[name] == obj, \
'Inconsistent operator type registry - single patched op name maps to multiple metatypes!'
return obj
return wrap
def get_operator_metatype_by_op_name(self, op_name: str):
if op_name in self._op_name_to_op_class:
return self._op_name_to_op_class[op_name]
return None
def is_depthwise_conv(node: NNCFNode) -> bool:
return isinstance(node.layer_attributes, ConvolutionLayerAttributes) \
and node.layer_attributes.groups == node.layer_attributes.in_channels \
and (node.layer_attributes.out_channels % node.layer_attributes.in_channels == 0) \
and node.layer_attributes.in_channels > 1
def is_conv_with_downsampling(node: NNCFNode) -> bool:
layer_attrs = node.layer_attributes
if isinstance(layer_attrs, ConvolutionLayerAttributes):
return not np.all(np.array(layer_attrs.stride) == 1) \
and not layer_attrs.transpose
return False
def get_input_masks(node: NNCFNode, graph: NNCFGraph) -> List[Optional[NNCFTensor]]:
"""
Returns input masks for all inputs of given NNCFNode.
:param node: Given NNCFNode.
:param graph: Graph to work with.
:return: Input masks.
"""
input_masks = [input_node.data['output_mask'] for input_node in graph.get_previous_nodes(node)]
return input_masks
def identity_mask_propagation(node: NNCFNode, graph: NNCFGraph) -> None:
"""
Propagates input mask through NNCFNode.
:param node: Graph node to perform identity mask propagation on.
:param graph: Graph to work with.
"""
input_masks = get_input_masks(node, graph)
if not input_masks:
# In case for disconnected NNCFGraph
input_masks = [None]
assert len(input_masks) == 1
node.data['input_masks'] = input_masks
node.data['output_mask'] = input_masks[0]
| [
"math.ceil",
"numpy.prod",
"numpy.array",
"functools.partial"
] | [((2023, 2114), 'functools.partial', 'partial', (['traverse_function'], {'type_check_fn': '(lambda x: x in sources_types)', 'visited': 'visited'}), '(traverse_function, type_check_fn=lambda x: x in sources_types,\n visited=visited)\n', (2030, 2114), False, 'from functools import partial\n'), ((3217, 3305), 'functools.partial', 'partial', (['traverse_function'], {'type_check_fn': '(lambda x: x not in types)', 'visited': 'visited'}), '(traverse_function, type_check_fn=lambda x: x not in types, visited=\n visited)\n', (3224, 3305), False, 'from functools import partial\n'), ((4238, 4329), 'functools.partial', 'partial', (['traverse_function'], {'type_check_fn': '(lambda x: x in sources_types)', 'visited': 'visited'}), '(traverse_function, type_check_fn=lambda x: x in sources_types,\n visited=visited)\n', (4245, 4329), False, 'from functools import partial\n'), ((6325, 6412), 'functools.partial', 'partial', (['traverse_function'], {'type_check_fn': '(lambda x: x in op_types)', 'visited': 'visited'}), '(traverse_function, type_check_fn=lambda x: x in op_types, visited=\n visited)\n', (6332, 6412), False, 'from functools import partial\n'), ((7304, 7391), 'functools.partial', 'partial', (['traverse_function'], {'type_check_fn': '(lambda x: x in op_types)', 'visited': 'visited'}), '(traverse_function, type_check_fn=lambda x: x in op_types, visited=\n visited)\n', (7311, 7391), False, 'from functools import partial\n'), ((5236, 5292), 'math.ceil', 'math.ceil', (['((total - total * sparsity_rate) / multiple_of)'], {}), '((total - total * sparsity_rate) / multiple_of)\n', (5245, 5292), False, 'import math\n'), ((14548, 14576), 'numpy.prod', 'np.prod', (['output_shapes[name]'], {}), '(output_shapes[name])\n', (14555, 14576), True, 'import numpy as np\n'), ((14948, 14976), 'numpy.prod', 'np.prod', (['output_shapes[name]'], {}), '(output_shapes[name])\n', (14955, 14976), True, 'import numpy as np\n'), ((15001, 15028), 'numpy.prod', 'np.prod', (['input_shapes[name]'], {}), '(input_shapes[name])\n', (15008, 15028), True, 'import numpy as np\n'), ((15031, 15059), 'numpy.prod', 'np.prod', (['output_shapes[name]'], {}), '(output_shapes[name])\n', (15038, 15059), True, 'import numpy as np\n'), ((14601, 14643), 'numpy.prod', 'np.prod', (['node.layer_attributes.kernel_size'], {}), '(node.layer_attributes.kernel_size)\n', (14608, 14643), True, 'import numpy as np\n'), ((14918, 14945), 'numpy.prod', 'np.prod', (['input_shapes[name]'], {}), '(input_shapes[name])\n', (14925, 14945), True, 'import numpy as np\n'), ((14439, 14481), 'numpy.prod', 'np.prod', (['node.layer_attributes.kernel_size'], {}), '(node.layer_attributes.kernel_size)\n', (14446, 14481), True, 'import numpy as np\n'), ((18941, 18969), 'numpy.array', 'np.array', (['layer_attrs.stride'], {}), '(layer_attrs.stride)\n', (18949, 18969), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
"""
The aim of this class is to produce an implementation of Boson Sampling algorithm
as in [2], but for uniform losses. This would allow us to apply the losses to the
k non-approximated particles and, by that, use generalized Cliffords strategy on
the non-lossy interferometer. We won't need to expand the matrix into 2m x 2m!
"""
from .simulation_strategy_interface import SimulationStrategyInterface
from .generalized_cliffords_b_simulation_strategy import (
GeneralizedCliffordsBSimulationStrategy,
BSPermanentCalculatorInterface,
)
from numpy import (
ndarray,
hstack,
zeros_like,
complex128,
eye,
pi,
ones,
exp,
diag,
arange,
)
from numpy.random import choice, rand, shuffle
from typing import List
from scipy.special import binom
from ..boson_sampling_utilities.boson_sampling_utilities import generate_lossy_inputs
from multiprocessing import cpu_count
import multiprocessing
from concurrent.futures import ProcessPoolExecutor as Pool
from copy import deepcopy
from ..quantum_computations_utilities import compute_qft_matrix
class LossyStateApproximationSimulationStrategy(SimulationStrategyInterface):
def __init__(
self,
bs_permanent_calculator: BSPermanentCalculatorInterface,
uniform_transmissivity: float,
hierarchy_level: int,
threads_number: int = -1,
):
# Required for lossy approximated state preparation
self._approximated_input_state_part_possibilities = None
self._approximated_input_state_part_possibilities_weights = None
self._not_approximated_lossy_mixed_state_parts = None
self._not_approximated_lossy_mixed_state_parts_weights = None
# Required for general simulation
self._hierarchy_level = hierarchy_level
self._uniform_transmissivity = uniform_transmissivity
self._threads_number = self._get_proper_threads_number(threads_number)
self._permanent_calculator = (
bs_permanent_calculator # Should contain an UNITARY (no losses here!)
)
self._qft_matrix = self._get_qft_matrix()
@staticmethod
def _get_proper_threads_number(threads_number: int) -> int:
if threads_number < 1 or threads_number > cpu_count():
return cpu_count()
else:
return threads_number
def simulate(self, input_state: ndarray, samples_number: int = 1) -> List[ndarray]:
if samples_number < 1:
return []
self._prepare_not_approximated_lossy_mixed_state(
input_state[: self._hierarchy_level] # Not approximated state part
)
self._prepare_approximated_input_state(
input_state[self._hierarchy_level :] # Approximated state part
)
number_of_samples_for_each_thread = self._compute_number_of_samples_for_each_thread(
samples_number
)
# Context is required on Linux systems, as the default (fork) produces undesired results! Spawn is default
# on osX and Windows and works as expected.
multiprocessing_context = multiprocessing.get_context("spawn")
with Pool(mp_context=multiprocessing_context) as p:
samples_lists = p.map(
self._simulate_in_parallel, number_of_samples_for_each_thread
)
samples = [sample for samples_list in samples_lists for sample in samples_list]
return samples
def _prepare_not_approximated_lossy_mixed_state(
self, not_approximated_input_state_part: ndarray
) -> None:
self._prepare_not_approximated_lossy_mixed_state_parts(
not_approximated_input_state_part
)
self._prepare_not_approximated_lossy_mixed_state_parts_weights()
def _prepare_not_approximated_lossy_mixed_state_parts(
self, not_approximated_input_state_part: ndarray
) -> None:
self._not_approximated_lossy_mixed_state_parts = []
for number_of_particles_left in range(
sum(not_approximated_input_state_part) + 1
):
self._not_approximated_lossy_mixed_state_parts.extend(
generate_lossy_inputs(
not_approximated_input_state_part, number_of_particles_left
)
)
def _prepare_not_approximated_lossy_mixed_state_parts_weights(self) -> None:
# Do note that this method HAS TO be called after lossy mixed state parts are
# computed.
possible_weights = self._get_possible_lossy_inputs_weights(
# Last part is always whole (not approximated) input state part
self._not_approximated_lossy_mixed_state_parts[-1]
)
self._not_approximated_lossy_mixed_state_parts_weights = []
n = sum(self._not_approximated_lossy_mixed_state_parts[-1])
for state_part in self._not_approximated_lossy_mixed_state_parts:
self._not_approximated_lossy_mixed_state_parts_weights.append(
possible_weights[int(sum(state_part))] / binom(n, sum(state_part))
)
def _get_possible_lossy_inputs_weights(self, input_state: ndarray) -> List[float]:
weights = []
# I'll use the same notation as in [1], for readability.
n = int(sum(input_state)) # Initial number of particles.
eta = self._uniform_transmissivity
for l in range(n + 1):
# l denotes number of particles left in the state
weights.append(binom(n, l) * eta ** l * (1 - eta) ** (n - l))
return weights
def _prepare_approximated_input_state(
self, approximated_input_state_part: ndarray
) -> None:
# Assume exact simulation if hierarchy level is not specified.
if not 0 <= self._hierarchy_level < self._permanent_calculator.matrix.shape[0]:
self._approximated_input_state_part_possibilities = [[]]
self._approximated_input_state_part_possibilities_weights = [1]
return
self._prepare_approximated_input_state_parts(approximated_input_state_part)
self._prepare_approximated_input_state_parts_weights()
def _prepare_approximated_input_state_parts(
self, approximated_input_state_part: ndarray
) -> None:
self._approximated_input_state_part_possibilities = []
for number_of_particles_left in range(
int(sum(approximated_input_state_part)) + 1
):
state_part_possibility = zeros_like(approximated_input_state_part)
state_part_possibility[0] = number_of_particles_left
self._approximated_input_state_part_possibilities.append(
state_part_possibility
)
def _prepare_approximated_input_state_parts_weights(self):
self._approximated_input_state_part_possibilities_weights = self._get_possible_lossy_inputs_weights(
# Last part contains all possible particles.
self._approximated_input_state_part_possibilities[-1]
)
@staticmethod
def _distribute_uniformly(val: int, bins: int) -> List[int]:
# TODO TR: Might be but in a more general file.
distributed_values = []
for v in range(bins):
distributed_values.append(val // bins)
for i in range(val % bins):
distributed_values[i] += 1
return distributed_values
def _compute_number_of_samples_for_each_thread(
self, samples_number: int
) -> List[int]:
return self._distribute_uniformly(samples_number, self._threads_number)
def _simulate_in_parallel(self, samples_number: int = 1) -> List[ndarray]:
""" This method produces given number of samples from lossy approximated
(separable) state. It's meant to be run in parallel.
"""
samples = []
helper_strategy = GeneralizedCliffordsBSimulationStrategy(
deepcopy(self._permanent_calculator)
)
for _ in range(samples_number):
lossy_input = self._get_input_state_for_sampling()
approximate_sampling_matrix = self._get_matrix_for_approximate_sampling()
helper_strategy.set_new_matrix(approximate_sampling_matrix)
samples.append(helper_strategy.simulate(lossy_input)[0])
return samples
def _get_input_state_for_sampling(self):
approximated_part = self._approximated_input_state_part_possibilities[
choice(
range(len(self._approximated_input_state_part_possibilities)),
p=self._approximated_input_state_part_possibilities_weights,
)
]
not_approximated_part = self._not_approximated_lossy_mixed_state_parts[
choice(
range(len(self._not_approximated_lossy_mixed_state_parts)),
p=self._not_approximated_lossy_mixed_state_parts_weights,
)
]
return hstack([not_approximated_part, approximated_part])
# Symmetrization fix
def _permuted_interferometer_matrix(self) -> ndarray:
permutation = arange(
self._permanent_calculator.matrix.shape[0]
) # We work with unitary matrices.
shuffle(permutation)
return self._permanent_calculator.matrix[:, permutation]
def _get_matrix_for_approximate_sampling(self) -> ndarray:
# TODO TR: THIS WILL BE REWRITTEN AFTER MERGING WITH BRUTE-FORCE BRANCH
random_phases_matrix = self._get_random_phases_matrix()
return (
self._permuted_interferometer_matrix()
@ random_phases_matrix
@ self._qft_matrix
)
def _get_qft_matrix(self):
modes_number = self._permanent_calculator.matrix.shape[0]
small_qft_matrix = compute_qft_matrix(modes_number - self._hierarchy_level)
qft_matrix = eye(modes_number, dtype=complex128)
qft_matrix[
self._hierarchy_level : modes_number, self._hierarchy_level : modes_number
] = small_qft_matrix
return qft_matrix
def _get_random_phases_matrix(self) -> ndarray:
modes_number = self._permanent_calculator.matrix.shape[0]
random_phases = ones(modes_number, dtype=complex128)
random_phases[self._hierarchy_level : modes_number] = exp(
1j * 2 * pi * rand(modes_number - self._hierarchy_level)
)
return diag(random_phases)
| [
"numpy.eye",
"numpy.ones",
"numpy.random.rand",
"numpy.hstack",
"scipy.special.binom",
"multiprocessing.get_context",
"multiprocessing.cpu_count",
"numpy.diag",
"concurrent.futures.ProcessPoolExecutor",
"copy.deepcopy",
"numpy.zeros_like",
"numpy.arange",
"numpy.random.shuffle"
] | [((3137, 3173), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (3164, 3173), False, 'import multiprocessing\n'), ((8938, 8988), 'numpy.hstack', 'hstack', (['[not_approximated_part, approximated_part]'], {}), '([not_approximated_part, approximated_part])\n', (8944, 8988), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((9095, 9145), 'numpy.arange', 'arange', (['self._permanent_calculator.matrix.shape[0]'], {}), '(self._permanent_calculator.matrix.shape[0])\n', (9101, 9145), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((9210, 9230), 'numpy.random.shuffle', 'shuffle', (['permutation'], {}), '(permutation)\n', (9217, 9230), False, 'from numpy.random import choice, rand, shuffle\n'), ((9851, 9886), 'numpy.eye', 'eye', (['modes_number'], {'dtype': 'complex128'}), '(modes_number, dtype=complex128)\n', (9854, 9886), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((10194, 10230), 'numpy.ones', 'ones', (['modes_number'], {'dtype': 'complex128'}), '(modes_number, dtype=complex128)\n', (10198, 10230), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((10394, 10413), 'numpy.diag', 'diag', (['random_phases'], {}), '(random_phases)\n', (10398, 10413), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((2317, 2328), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2326, 2328), False, 'from multiprocessing import cpu_count\n'), ((3188, 3228), 'concurrent.futures.ProcessPoolExecutor', 'Pool', ([], {'mp_context': 'multiprocessing_context'}), '(mp_context=multiprocessing_context)\n', (3192, 3228), True, 'from concurrent.futures import ProcessPoolExecutor as Pool\n'), ((6497, 6538), 'numpy.zeros_like', 'zeros_like', (['approximated_input_state_part'], {}), '(approximated_input_state_part)\n', (6507, 6538), False, 'from numpy import ndarray, hstack, zeros_like, complex128, eye, pi, ones, exp, diag, arange\n'), ((7920, 7956), 'copy.deepcopy', 'deepcopy', (['self._permanent_calculator'], {}), '(self._permanent_calculator)\n', (7928, 7956), False, 'from copy import deepcopy\n'), ((2285, 2296), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2294, 2296), False, 'from multiprocessing import cpu_count\n'), ((10325, 10367), 'numpy.random.rand', 'rand', (['(modes_number - self._hierarchy_level)'], {}), '(modes_number - self._hierarchy_level)\n', (10329, 10367), False, 'from numpy.random import choice, rand, shuffle\n'), ((5510, 5521), 'scipy.special.binom', 'binom', (['n', 'l'], {}), '(n, l)\n', (5515, 5521), False, 'from scipy.special import binom\n')] |
# -*- coding: utf-8 -*-
"""Tests of early stopping."""
import unittest
from typing import List
import numpy
import pytest
import torch
from torch.optim import Adam
from pykeen.datasets import Nations
from pykeen.evaluation import RankBasedEvaluator
from pykeen.models import Model, TransE
from pykeen.stoppers.early_stopping import EarlyStopper, is_improvement
from pykeen.trackers import MLFlowResultTracker
from pykeen.training import SLCWATrainingLoop
from tests.mocks import MockEvaluator, MockModel
try:
import mlflow
except ImportError:
mlflow = None
class TestRandom(unittest.TestCase):
"""Random tests for early stopper."""
def test_is_improvement(self):
"""Test is_improvement()."""
for best_value, current_value, larger_is_better, relative_delta, is_better in [
# equal value; larger is better
(1.0, 1.0, True, 0.0, False),
# equal value; smaller is better
(1.0, 1.0, False, 0.0, False),
# larger is better; improvement
(1.0, 1.1, True, 0.0, True),
# larger is better; improvement; but not significant
(1.0, 1.1, True, 0.1, False),
]:
with self.subTest(
best_value=best_value,
current_value=current_value,
larger_is_better=larger_is_better,
relative_delta=relative_delta,
is_better=is_better,
):
self.assertEqual(is_better, is_improvement(
best_value=best_value,
current_value=current_value,
larger_is_better=larger_is_better,
relative_delta=relative_delta,
))
class LogCallWrapper:
"""An object which wraps functions and checks whether they have been called."""
def __init__(self):
self.called = set()
def wrap(self, func):
"""Wrap the function."""
id_func = id(func)
def wrapped(*args, **kwargs):
self.called.add(id_func)
return func(*args, **kwargs)
return wrapped
def was_called(self, func) -> bool:
"""Report whether the previously wrapped function has been called."""
return id(func) in self.called
class TestEarlyStopping(unittest.TestCase):
"""Tests for early stopping."""
#: The window size used by the early stopper
patience: int = 2
#: The mock losses the mock evaluator will return
mock_losses: List[float] = [10.0, 9.0, 8.0, 8.0, 8.0, 8.0]
#: The (zeroed) index - 1 at which stopping will occur
stop_constant: int = 4
#: The minimum improvement
delta: float = 0.0
#: The best results
best_results: List[float] = [10.0, 9.0, 8.0, 8.0, 8.0]
def setUp(self):
"""Prepare for testing the early stopper."""
# Set automatic_memory_optimization to false for tests
self.mock_evaluator = MockEvaluator(self.mock_losses, automatic_memory_optimization=False)
nations = Nations()
self.model = MockModel(triples_factory=nations.training)
self.stopper = EarlyStopper(
model=self.model,
evaluator=self.mock_evaluator,
training_triples_factory=nations.training,
evaluation_triples_factory=nations.validation,
patience=self.patience,
relative_delta=self.delta,
larger_is_better=False,
)
def test_initialization(self):
"""Test warm-up phase."""
for epoch in range(self.patience):
should_stop = self.stopper.should_stop(epoch=epoch)
assert not should_stop
def test_result_processing(self):
"""Test that the mock evaluation of the early stopper always gives the right loss."""
for epoch in range(len(self.mock_losses)):
# Step early stopper
should_stop = self.stopper.should_stop(epoch=epoch)
if not should_stop:
# check storing of results
assert self.stopper.results == self.mock_losses[:epoch + 1]
# check ring buffer
if epoch >= self.patience:
assert self.stopper.best_metric == self.best_results[epoch]
def test_should_stop(self):
"""Test that the stopper knows when to stop."""
for epoch in range(self.stop_constant):
self.assertFalse(self.stopper.should_stop(epoch=epoch))
self.assertTrue(self.stopper.should_stop(epoch=epoch))
@unittest.skipUnless(mlflow is not None, reason='MLFlow not installed')
def test_result_logging_with_mlflow(self):
"""Test whether the MLFLow result logger works."""
self.stopper.result_tracker = MLFlowResultTracker()
wrapper = LogCallWrapper()
real_log_metrics = self.stopper.result_tracker.mlflow.log_metrics
self.stopper.result_tracker.mlflow.log_metrics = wrapper.wrap(real_log_metrics)
self.stopper.should_stop(epoch=0)
assert wrapper.was_called(real_log_metrics)
class TestDeltaEarlyStopping(TestEarlyStopping):
"""Test early stopping with a tiny delta."""
mock_losses: List[float] = [10.0, 9.0, 8.0, 7.99, 7.98, 7.97]
stop_constant: int = 4
delta: float = 0.1
best_results: List[float] = [10.0, 9.0, 8.0, 8.0, 8.0]
class TestEarlyStoppingRealWorld(unittest.TestCase):
"""Test early stopping on a real-world use case of training TransE with Adam."""
#: The window size used by the early stopper
patience: int = 2
#: The (zeroed) index - 1 at which stopping will occur
stop_constant: int = 4
#: The minimum improvement
relative_delta: float = 0.1
#: The random seed to use for reproducibility
seed: int = 42
#: The maximum number of epochs to train. Should be large enough to allow for early stopping.
max_num_epochs: int = 1000
#: The epoch at which the stop should happen. Depends on the choice of random seed.
stop_epoch: int = 30
#: The batch size to use.
batch_size: int = 128
def setUp(self) -> None:
"""Set up the real world early stopping test."""
# Fix seed for reproducibility
torch.manual_seed(seed=self.seed)
numpy.random.seed(seed=self.seed)
@pytest.mark.slow
def test_early_stopping(self):
"""Tests early stopping."""
# Set automatic_memory_optimization to false during testing
nations = Nations()
model: Model = TransE(triples_factory=nations.training)
evaluator = RankBasedEvaluator(automatic_memory_optimization=False)
stopper = EarlyStopper(
model=model,
evaluator=evaluator,
training_triples_factory=nations.training,
evaluation_triples_factory=nations.validation,
patience=self.patience,
relative_delta=self.relative_delta,
metric='mean_rank',
)
training_loop = SLCWATrainingLoop(
model=model,
triples_factory=nations.training,
optimizer=Adam(params=model.get_grad_params()),
automatic_memory_optimization=False,
)
losses = training_loop.train(
triples_factory=nations.training,
num_epochs=self.max_num_epochs,
batch_size=self.batch_size,
stopper=stopper,
use_tqdm=False,
)
self.assertEqual(stopper.number_results, (len(losses) + self.patience * stopper.frequency) // stopper.frequency)
self.assertEqual(
self.stop_epoch,
(len(losses) + 2 * stopper.frequency),
msg='Did not stop early like it should have',
)
| [
"pykeen.datasets.Nations",
"torch.manual_seed",
"tests.mocks.MockEvaluator",
"pykeen.stoppers.early_stopping.EarlyStopper",
"unittest.skipUnless",
"pykeen.models.TransE",
"pykeen.evaluation.RankBasedEvaluator",
"tests.mocks.MockModel",
"numpy.random.seed",
"pykeen.stoppers.early_stopping.is_improv... | [((4527, 4597), 'unittest.skipUnless', 'unittest.skipUnless', (['(mlflow is not None)'], {'reason': '"""MLFlow not installed"""'}), "(mlflow is not None, reason='MLFlow not installed')\n", (4546, 4597), False, 'import unittest\n'), ((2941, 3009), 'tests.mocks.MockEvaluator', 'MockEvaluator', (['self.mock_losses'], {'automatic_memory_optimization': '(False)'}), '(self.mock_losses, automatic_memory_optimization=False)\n', (2954, 3009), False, 'from tests.mocks import MockEvaluator, MockModel\n'), ((3028, 3037), 'pykeen.datasets.Nations', 'Nations', ([], {}), '()\n', (3035, 3037), False, 'from pykeen.datasets import Nations\n'), ((3059, 3102), 'tests.mocks.MockModel', 'MockModel', ([], {'triples_factory': 'nations.training'}), '(triples_factory=nations.training)\n', (3068, 3102), False, 'from tests.mocks import MockEvaluator, MockModel\n'), ((3126, 3365), 'pykeen.stoppers.early_stopping.EarlyStopper', 'EarlyStopper', ([], {'model': 'self.model', 'evaluator': 'self.mock_evaluator', 'training_triples_factory': 'nations.training', 'evaluation_triples_factory': 'nations.validation', 'patience': 'self.patience', 'relative_delta': 'self.delta', 'larger_is_better': '(False)'}), '(model=self.model, evaluator=self.mock_evaluator,\n training_triples_factory=nations.training, evaluation_triples_factory=\n nations.validation, patience=self.patience, relative_delta=self.delta,\n larger_is_better=False)\n', (3138, 3365), False, 'from pykeen.stoppers.early_stopping import EarlyStopper, is_improvement\n'), ((4742, 4763), 'pykeen.trackers.MLFlowResultTracker', 'MLFlowResultTracker', ([], {}), '()\n', (4761, 4763), False, 'from pykeen.trackers import MLFlowResultTracker\n'), ((6194, 6227), 'torch.manual_seed', 'torch.manual_seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (6211, 6227), False, 'import torch\n'), ((6236, 6269), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (6253, 6269), False, 'import numpy\n'), ((6450, 6459), 'pykeen.datasets.Nations', 'Nations', ([], {}), '()\n', (6457, 6459), False, 'from pykeen.datasets import Nations\n'), ((6483, 6523), 'pykeen.models.TransE', 'TransE', ([], {'triples_factory': 'nations.training'}), '(triples_factory=nations.training)\n', (6489, 6523), False, 'from pykeen.models import Model, TransE\n'), ((6544, 6599), 'pykeen.evaluation.RankBasedEvaluator', 'RankBasedEvaluator', ([], {'automatic_memory_optimization': '(False)'}), '(automatic_memory_optimization=False)\n', (6562, 6599), False, 'from pykeen.evaluation import RankBasedEvaluator\n'), ((6618, 6848), 'pykeen.stoppers.early_stopping.EarlyStopper', 'EarlyStopper', ([], {'model': 'model', 'evaluator': 'evaluator', 'training_triples_factory': 'nations.training', 'evaluation_triples_factory': 'nations.validation', 'patience': 'self.patience', 'relative_delta': 'self.relative_delta', 'metric': '"""mean_rank"""'}), "(model=model, evaluator=evaluator, training_triples_factory=\n nations.training, evaluation_triples_factory=nations.validation,\n patience=self.patience, relative_delta=self.relative_delta, metric=\n 'mean_rank')\n", (6630, 6848), False, 'from pykeen.stoppers.early_stopping import EarlyStopper, is_improvement\n'), ((1498, 1634), 'pykeen.stoppers.early_stopping.is_improvement', 'is_improvement', ([], {'best_value': 'best_value', 'current_value': 'current_value', 'larger_is_better': 'larger_is_better', 'relative_delta': 'relative_delta'}), '(best_value=best_value, current_value=current_value,\n larger_is_better=larger_is_better, relative_delta=relative_delta)\n', (1512, 1634), False, 'from pykeen.stoppers.early_stopping import EarlyStopper, is_improvement\n')] |
from dataset import Dataset
import sqlite3
import dlib
import cv2
import os
import numpy as np
import pandas as pd
from loggers import Log
class AflwDataset(Dataset):
"""Class that abstracts Aflw dataset.
"""
def __init__(self,config):
# self.conn = sqlite3.connect("/home/mtk/dataset/aflw-files/aflw/data/aflw.sqlite")
super(AflwDataset,self).__init__(config)
""" method that resizes image to the same resolution
image which have width and height equal or less than
values specified by max_size.
e.g
img = np.zeros((200,300))
img = resize_down_image(img,(100,100))
img.shape # (66,100)
"""
def resize_down_image(self,img,max_img_shape):
img_h,img_w = img.shape[0:2]
w, h = img_w,img_h
if max_img_shape[0]<h:
w = (max_img_shape[0]/float(h)) * w
h = max_img_shape[0]
if max_img_shape[1]<w:
h = (max_img_shape[1]/float(w)) * h
w = max_img_shape[1]
if h == img_h:
return img,1
else:
scale = img_h/h
img = cv2.resize(img,(int(w),int(h)))
return img,scale
def selective_search(self,img,min_size=(2200),max_img_size=(500,500),debug=False):
cand_rects = []
img,scale = self.resize_down_image(img,max_img_size)
dlib.find_candidate_object_locations(img,cand_rects,min_size=min_size)
rects = [(int(crect.left() * scale),
int(crect.top()* scale),
int(crect.right()* scale),
int(crect.bottom()* scale),
) for crect in cand_rects]
for rect in rects:
cv2.rectangle(img,(rect[0],rect[1]),(rect[2],rect[3]),(255,0,0),2)
cv2.imshow("Image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def load_dataset(self):
if self.config.label == "detection":
if not self.contain_dataset_files():
self.meet_convention()
Log.DEBUG_OUT = True
Log.DEBUG("Loading pickle files")
Log.DEBUG_OUT =False
self.train_dataset = self.get_meta(os.path.join(self.config.dataset_dir,"train.pkl"))
self.test_dataset = self.get_meta(os.path.join(self.config.dataset_dir,"test.pkl"))
if os.path.exists(os.path.join(self.config.dataset_dir,"validation.pkl")):
self.validation_dataset = self.get_meta(os.path.join(self.config.dataset_dir,"validation.pkl"))
else:
self.validation_dataset = None
frameinfo = getframeinfo(currentframe())
Log.WARNING("Unable to find validation dataset",file_name=__name__,line_number=frameinfo.lineno)
self.train_dataset = self.fix_labeling_issue(self.train_dataset)
self.test_dataset = self.fix_labeling_issue(self.test_dataset)
self.validation_dataset = self.fix_labeling_issue(self.validation_dataset)
Log.DEBUG_OUT = True
Log.DEBUG("Loaded train, test and validation dataset")
Log.DEBUG_OUT =False
test_indexes = np.arange(len(self.test_dataset))
np.random.shuffle(test_indexes)
validation_indexes = np.arange(len(self.validation_dataset))
np.random.shuffle(validation_indexes)
self.test_dataset = self.test_dataset.iloc[test_indexes].reset_index(drop=True)
self.validation_dataset = self.validation_dataset.iloc[validation_indexes].reset_index(drop=True)
self.test_dataset = self.test_dataset[:1000]
self.validation_dataset = self.validation_dataset[:100]
Log.DEBUG_OUT = True
Log.DEBUG("Loading test images")
Log.DEBUG_OUT =False
self.test_dataset_images = self.load_images(self.test_dataset).astype(np.float32)/255
Log.DEBUG_OUT = True
Log.DEBUG("Loading validation images")
Log.DEBUG_OUT =False
self.validation_dataset_images = self.load_images(self.validation_dataset).astype(np.float32)/255
self.test_detection = self.test_dataset["is_face"].as_matrix()
self.dataset_loaded = True
Log.DEBUG_OUT = True
Log.DEBUG("Loaded all dataset and images")
Log.DEBUG_OUT =False
else:
raise NotImplementedError("Not implemented for labels:"+str(self.labels))
def generator(self,batch_size):
raise NotImplementedError("Not implmented!")
def detection_data_genenerator(self,batch_size):
while True:
indexes = np.arange(len(self.train_dataset))
np.random.shuffle(indexes)
for i in range(0,len(indexes)-batch_size,batch_size):
current_indexes = indexes[i:i+batch_size]
current_dataframe = self.train_dataset.iloc[current_indexes].reset_index(drop=True)
current_images = self.load_images(current_dataframe)
X = current_images.astype(np.float32)/255
X = X.reshape(-1,self.config.image_shape[0],self.config.image_shape[1],self.config.image_shape[2])
detection = self.get_column(current_dataframe,"is_face").astype(np.uint8)
detection = np.eye(2)[detection]
yield X,detection
def load_images(self,dataframe):
output_images = np.zeros((len(dataframe),self.config.image_shape[0],self.config.image_shape[1],self.config.image_shape[2]))
for index,row in dataframe.iterrows():
file_location = row["file_location"]
img = cv2.imread(file_location)
if img is None:
print("Unable to read image from ",file_location)
continue
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.resize(img,(self.config.image_shape[0],self.config.image_shape[1]))
output_images[index] = img.reshape(self.config.image_shape)
return output_images
def meet_convention(self):
if self.contain_dataset_files():
return
elif os.path.exists(os.path.join(self.config.dataset_dir,"all.pkl")):
dataframe = pd.read_pickle(os.path.join(self.config.dataset_dir,"all.pkl"))
train,test,validation = self.split_train_test_validation(dataframe)
train.to_pickle(os.path.join(self.config.dataset_dir,"train.pkl"))
test.to_pickle(os.path.join(self.config.dataset_dir,"test.pkl"))
validation.to_pickle(os.path.join(self.config.dataset_dir,"validation.pkl"))
else:
dataframe = self.load_face_non_face_dataset()
train,test,validation = self.split_train_test_validation(dataframe)
train.to_pickle(os.path.join(self.config.dataset_dir,"train.pkl"))
test.to_pickle(os.path.join(self.config.dataset_dir,"test.pkl"))
validation.to_pickle(os.path.join(self.config.dataset_dir,"validation.pkl"))
dataframe.to_pickle(os.path.join(self.config.dataset_dir,"all.pkl"))
def load_face_non_face_dataset(self):
output_file_locations = []
output_is_face = []
for img_path in os.listdir(os.path.join(self.config.dataset_dir,"face")):
output_file_locations+=[os.path.join(self.config.dataset_dir,"face",img_path)]
output_is_face+=[1]
for img_path in os.listdir(os.path.join(self.config.dataset_dir,"non-face")):
output_file_locations+=[os.path.join(self.config.dataset_dir,"non-face",img_path)]
output_is_face+=[0]
output_df = pd.DataFrame(columns=["file_location","is_face"])
output_df["file_location"] = output_file_locations
output_df["is_face"] = output_is_face
return output_df
def fix_labeling_issue(self,dataset):
return dataset
def rect_intersection(self,rect1,rect2):
x_overlap = max(0, min(rect1[2], rect2[2]) - max(rect1[0], rect2[0]));
y_overlap = max(0, min(rect1[3], rect2[3]) - max(rect1[1], rect2[1]));
overlapArea = x_overlap * y_overlap;
return overlapArea
def rect_union(self,rect1,rect2):
assert rect1.shape == (4,) , "rect1 shape should be (4,) and it is "+str(rect1.shape)
assert rect2.shape == (4,) , "rect2 shape should be (4,) and it is "+str(rect2.shape)
width1 = np.abs(rect1[0]-rect1[2])
height1 = np.abs(rect1[1]-rect1[3])
width2 = np.abs(rect2[0]-rect2[2])
height2 = np.abs(rect2[1]-rect2[3])
area1 = width1 * height1
area2 = width2 * height2
return area1+area2 - self.rect_intersection(rect1,rect2)
def bb_intersection_over_union(self,boxA, boxB):
intr = self.rect_intersection(boxA,boxB)
if(intr<=0):
return 0
runion = rect_union(boxA,boxB)
if(runion<=0):
return 0
iou = intr / float(runion)
return iou
def get_dataset_name(self):
return "aflw"
class Rect(object):
def __init__(self,x,y,w,h):
self.x = x
self.y = y
self.w = w
self.h = h
def area(self):
return self.w * self.h
def intersection(self,rect):
x_overlap = max(0, min(rect1[2], rect2[2]) - max(self.x, rect.x));
y_overlap = max(0, min(rect1[3], rect2[3]) - max(rect1[1], rect2[1]));
overlapArea = x_overlap * y_overlap;
return overlapArea
def union(self,rect):
pass
def iou(self,rect):
pass
def __str__(self):
return "("+str(self.x)+","+self.y+") (" +str(self.w)+","+self.h+")" | [
"cv2.rectangle",
"numpy.abs",
"numpy.eye",
"cv2.imread",
"dlib.find_candidate_object_locations",
"os.path.join",
"loggers.Log.WARNING",
"cv2.imshow",
"loggers.Log.DEBUG",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"pandas.DataFrame",
"cv2.resize",
"cv2.waitKey",
"numpy.random.shuffle"
] | [((1376, 1448), 'dlib.find_candidate_object_locations', 'dlib.find_candidate_object_locations', (['img', 'cand_rects'], {'min_size': 'min_size'}), '(img, cand_rects, min_size=min_size)\n', (1412, 1448), False, 'import dlib\n'), ((1765, 1789), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (1775, 1789), False, 'import cv2\n'), ((1797, 1811), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1808, 1811), False, 'import cv2\n'), ((1820, 1843), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1841, 1843), False, 'import cv2\n'), ((7671, 7721), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file_location', 'is_face']"}), "(columns=['file_location', 'is_face'])\n", (7683, 7721), True, 'import pandas as pd\n'), ((8455, 8482), 'numpy.abs', 'np.abs', (['(rect1[0] - rect1[2])'], {}), '(rect1[0] - rect1[2])\n', (8461, 8482), True, 'import numpy as np\n'), ((8499, 8526), 'numpy.abs', 'np.abs', (['(rect1[1] - rect1[3])'], {}), '(rect1[1] - rect1[3])\n', (8505, 8526), True, 'import numpy as np\n'), ((8543, 8570), 'numpy.abs', 'np.abs', (['(rect2[0] - rect2[2])'], {}), '(rect2[0] - rect2[2])\n', (8549, 8570), True, 'import numpy as np\n'), ((8587, 8614), 'numpy.abs', 'np.abs', (['(rect2[1] - rect2[3])'], {}), '(rect2[1] - rect2[3])\n', (8593, 8614), True, 'import numpy as np\n'), ((1690, 1764), 'cv2.rectangle', 'cv2.rectangle', (['img', '(rect[0], rect[1])', '(rect[2], rect[3])', '(255, 0, 0)', '(2)'], {}), '(img, (rect[0], rect[1]), (rect[2], rect[3]), (255, 0, 0), 2)\n', (1703, 1764), False, 'import cv2\n'), ((2050, 2083), 'loggers.Log.DEBUG', 'Log.DEBUG', (['"""Loading pickle files"""'], {}), "('Loading pickle files')\n", (2059, 2083), False, 'from loggers import Log\n'), ((3029, 3083), 'loggers.Log.DEBUG', 'Log.DEBUG', (['"""Loaded train, test and validation dataset"""'], {}), "('Loaded train, test and validation dataset')\n", (3038, 3083), False, 'from loggers import Log\n'), ((3190, 3221), 'numpy.random.shuffle', 'np.random.shuffle', (['test_indexes'], {}), '(test_indexes)\n', (3207, 3221), True, 'import numpy as np\n'), ((3307, 3344), 'numpy.random.shuffle', 'np.random.shuffle', (['validation_indexes'], {}), '(validation_indexes)\n', (3324, 3344), True, 'import numpy as np\n'), ((3719, 3751), 'loggers.Log.DEBUG', 'Log.DEBUG', (['"""Loading test images"""'], {}), "('Loading test images')\n", (3728, 3751), False, 'from loggers import Log\n'), ((3928, 3966), 'loggers.Log.DEBUG', 'Log.DEBUG', (['"""Loading validation images"""'], {}), "('Loading validation images')\n", (3937, 3966), False, 'from loggers import Log\n'), ((4269, 4311), 'loggers.Log.DEBUG', 'Log.DEBUG', (['"""Loaded all dataset and images"""'], {}), "('Loaded all dataset and images')\n", (4278, 4311), False, 'from loggers import Log\n'), ((4698, 4724), 'numpy.random.shuffle', 'np.random.shuffle', (['indexes'], {}), '(indexes)\n', (4715, 4724), True, 'import numpy as np\n'), ((5647, 5672), 'cv2.imread', 'cv2.imread', (['file_location'], {}), '(file_location)\n', (5657, 5672), False, 'import cv2\n'), ((5810, 5847), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (5822, 5847), False, 'import cv2\n'), ((5865, 5938), 'cv2.resize', 'cv2.resize', (['img', '(self.config.image_shape[0], self.config.image_shape[1])'], {}), '(img, (self.config.image_shape[0], self.config.image_shape[1]))\n', (5875, 5938), False, 'import cv2\n'), ((7268, 7313), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""face"""'], {}), "(self.config.dataset_dir, 'face')\n", (7280, 7313), False, 'import os\n'), ((7473, 7522), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""non-face"""'], {}), "(self.config.dataset_dir, 'non-face')\n", (7485, 7522), False, 'import os\n'), ((2164, 2214), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""train.pkl"""'], {}), "(self.config.dataset_dir, 'train.pkl')\n", (2176, 2214), False, 'import os\n'), ((2261, 2310), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""test.pkl"""'], {}), "(self.config.dataset_dir, 'test.pkl')\n", (2273, 2310), False, 'import os\n'), ((2341, 2396), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""validation.pkl"""'], {}), "(self.config.dataset_dir, 'validation.pkl')\n", (2353, 2396), False, 'import os\n'), ((2648, 2750), 'loggers.Log.WARNING', 'Log.WARNING', (['"""Unable to find validation dataset"""'], {'file_name': '__name__', 'line_number': 'frameinfo.lineno'}), "('Unable to find validation dataset', file_name=__name__,\n line_number=frameinfo.lineno)\n", (2659, 2750), False, 'from loggers import Log\n'), ((6157, 6205), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""all.pkl"""'], {}), "(self.config.dataset_dir, 'all.pkl')\n", (6169, 6205), False, 'import os\n'), ((7351, 7406), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""face"""', 'img_path'], {}), "(self.config.dataset_dir, 'face', img_path)\n", (7363, 7406), False, 'import os\n'), ((7560, 7619), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""non-face"""', 'img_path'], {}), "(self.config.dataset_dir, 'non-face', img_path)\n", (7572, 7619), False, 'import os\n'), ((2454, 2509), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""validation.pkl"""'], {}), "(self.config.dataset_dir, 'validation.pkl')\n", (2466, 2509), False, 'import os\n'), ((5309, 5318), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5315, 5318), True, 'import numpy as np\n'), ((6246, 6294), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""all.pkl"""'], {}), "(self.config.dataset_dir, 'all.pkl')\n", (6258, 6294), False, 'import os\n'), ((6403, 6453), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""train.pkl"""'], {}), "(self.config.dataset_dir, 'train.pkl')\n", (6415, 6453), False, 'import os\n'), ((6487, 6536), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""test.pkl"""'], {}), "(self.config.dataset_dir, 'test.pkl')\n", (6499, 6536), False, 'import os\n'), ((6576, 6631), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""validation.pkl"""'], {}), "(self.config.dataset_dir, 'validation.pkl')\n", (6588, 6631), False, 'import os\n'), ((6812, 6862), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""train.pkl"""'], {}), "(self.config.dataset_dir, 'train.pkl')\n", (6824, 6862), False, 'import os\n'), ((6896, 6945), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""test.pkl"""'], {}), "(self.config.dataset_dir, 'test.pkl')\n", (6908, 6945), False, 'import os\n'), ((6985, 7040), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""validation.pkl"""'], {}), "(self.config.dataset_dir, 'validation.pkl')\n", (6997, 7040), False, 'import os\n'), ((7079, 7127), 'os.path.join', 'os.path.join', (['self.config.dataset_dir', '"""all.pkl"""'], {}), "(self.config.dataset_dir, 'all.pkl')\n", (7091, 7127), False, 'import os\n')] |
import json
import os
import random
import unittest
import numpy as np
from property_set.property_set import PropertySet
class ZTest(unittest.TestCase):
def setUp(self):
self.ps = PropertySet()
self.ps.MW = 16.04 / 1000
address = os.path.join(
os.path.dirname(__file__), os.pardir, 'databases', "z.json")
with open(address) as fp:
points = json.load(fp)
self.points = [random.choice(points) for _ in range(20)]
self.interp = []
for i in range(20):
index = random.randint(0, len(points) - 1)
if index + 1 == len(points):
continue
self.interp.append([points[index], points[index + 1]])
def testZ(self):
for i in self.points:
self.ps.P = i[0] * 10**5
self.ps.T = i[1] + 273.15
self.ps.invalidate_cache()
self.assertLessEqual(abs(self.ps.Z - i[2]), i[2] * 0.001)
def testZInterp(self):
for i in self.interp:
p = [np.average([i[0][j], i[1][j]]) for j in range(3)]
self.ps.P = p[0] * 10**5
self.ps.T = p[1] + 273.15
self.ps.invalidate_cache()
# we cant check exactly just check the interpolated value is between the two bounds.
# check to make shure calculated z is between the two points.
one = i[0][2] - self.ps.Z
two = self.ps.Z - i[1][2]
self.assertGreater(one * two, 0,
"z:{},P:{},T:{} not between {k[0][0]}-{k[1][0]},{k[0][1]}-{k[1][1]}"
.format(self.ps.Z, p[0], p[1], k=i))
| [
"random.choice",
"numpy.average",
"os.path.dirname",
"json.load",
"property_set.property_set.PropertySet"
] | [((196, 209), 'property_set.property_set.PropertySet', 'PropertySet', ([], {}), '()\n', (207, 209), False, 'from property_set.property_set import PropertySet\n'), ((288, 313), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (303, 313), False, 'import os\n'), ((405, 418), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (414, 418), False, 'import json\n'), ((443, 464), 'random.choice', 'random.choice', (['points'], {}), '(points)\n', (456, 464), False, 'import random\n'), ((1037, 1067), 'numpy.average', 'np.average', (['[i[0][j], i[1][j]]'], {}), '([i[0][j], i[1][j]])\n', (1047, 1067), True, 'import numpy as np\n')] |
#
# Generate matrix with transition times
#
import numpy as np
import pandas as pd
import pyEpiabm as pe
from pyEpiabm.property import InfectionStatus
from pyEpiabm.utility import InverseCdf
class TransitionTimeMatrix:
"""Class to generate the matrix with transition times
"""
def __init__(self):
"""Initialises the transition time matrix the same way as for the
:class:`StateTransitionMatrix`, i.e. with the right labels for
the rows and the columns, but with -1 as the default value.
"""
nb_states = len(InfectionStatus)
zero_trans = np.full((nb_states, nb_states), -1.0)
labels = [status.name for status in InfectionStatus]
self.matrix = pd.DataFrame(zero_trans,
columns=labels,
index=labels,
dtype='object')
def create_transition_time_matrix(self):
"""Fills the transition time matrix with :class:`InverseCdf` objects,
where the distributions of times of transition are defined. For
example, the element ij in the matrix is the :class:`InverseCdf`
object for defining the transition time of someone with current
infection status associated with the row i to move to the infection
status associated with the columns j. Transitions that we do not
expect to happen are assigned a value of -1.0 in the matrix, so it
will not pass silently if these are accessed accidently.
Returns
-------
pd.DataFrame
Matrix in the form of a dataframe
"""
matrix = TransitionTimeMatrix().matrix
matrix.loc['Exposed', 'InfectASympt'] =\
InverseCdf(pe.Parameters.instance().latent_period,
pe.Parameters.instance().latent_period_iCDF)
matrix.loc['Exposed', 'InfectMild'] =\
InverseCdf(pe.Parameters.instance().latent_period,
pe.Parameters.instance().latent_period_iCDF)
matrix.loc['Exposed', 'InfectGP'] =\
InverseCdf(pe.Parameters.instance().latent_period,
pe.Parameters.instance().latent_period_iCDF)
matrix.loc['InfectASympt', 'Recovered'] =\
InverseCdf(pe.Parameters.instance().asympt_infect_period,
pe.Parameters.instance().asympt_infect_icdf)
matrix.loc['InfectMild', 'Recovered'] =\
InverseCdf(pe.Parameters.instance().mean_mild_to_recov,
pe.Parameters.instance().mild_to_recov_icdf)
matrix.loc['InfectGP', 'Recovered'] =\
InverseCdf(pe.Parameters.instance().mean_gp_to_recov,
pe.Parameters.instance().gp_to_recov_icdf)
matrix.loc['InfectGP', 'InfectHosp'] =\
InverseCdf(pe.Parameters.instance().mean_gp_to_hosp,
pe.Parameters.instance().gp_to_hosp_icdf)
matrix.loc['InfectGP', 'Dead'] =\
InverseCdf(pe.Parameters.instance().mean_gp_to_death,
pe.Parameters.instance().gp_to_death_icdf)
matrix.loc['InfectHosp', 'Recovered'] =\
InverseCdf(pe.Parameters.instance().mean_hosp_to_recov,
pe.Parameters.instance().hosp_to_recov_icdf)
matrix.loc['InfectHosp', 'InfectICU'] =\
InverseCdf(pe.Parameters.instance().mean_hosp_to_icu,
pe.Parameters.instance().hosp_to_icu_icdf)
matrix.loc['InfectHosp', 'Dead'] =\
InverseCdf(pe.Parameters.instance().mean_hosp_to_death,
pe.Parameters.instance().hosp_to_death_icdf)
matrix.loc['InfectICU', 'InfectICURecov'] =\
InverseCdf(pe.Parameters.instance().mean_icu_to_icurecov,
pe.Parameters.instance().icu_to_icurecov_icdf)
matrix.loc['InfectICU', 'Dead'] =\
InverseCdf(pe.Parameters.instance().mean_icu_to_death,
pe.Parameters.instance().icu_to_death_icdf)
matrix.loc['InfectICURecov', 'Recovered'] =\
InverseCdf(pe.Parameters.instance().mean_icurecov_to_recov,
pe.Parameters.instance().icurecov_to_recov)
return matrix
def update_transition_time_with_float(
self,
current_infection_status_row: InfectionStatus,
next_infection_status_column: InfectionStatus,
new_transition_time: float):
"""Method to manually update a transition time in the
transition time matrix.
Parameters
----------
current_infection_status_row : InfectionStatus
Infection status corresponding to
the row where the probability will be updated
next_infection_status_column : InfectionStatus
Infection status corresponding to
the column where the probability will be updated
new_transition_time : float
Updated transition time value
"""
try:
if (current_infection_status_row not in InfectionStatus) or \
(next_infection_status_column not in InfectionStatus):
raise ValueError('Row and column inputs must be contained in' +
' the InfectionStatus enum')
except TypeError:
raise ValueError('Row and column inputs must be contained in' +
' the InfectionStatus enum')
if new_transition_time < 0:
raise ValueError('New transition time must be larger than' +
' or equal to 0')
# Extract row and column names from enum and update
# transition time matrix with single value
row = current_infection_status_row.name
column = next_infection_status_column.name
self.matrix.loc[row, column] = new_transition_time
def update_transition_time_with_icdf(
self,
current_infection_status_row: InfectionStatus,
next_infection_status_column: InfectionStatus,
new_transition_time_icdf: np.ndarray,
new_transition_time_icdf_mean: float):
"""Method to manually update a transition time in the
transition time matrix.
Parameters
----------
current_infection_status_row : InfectionStatus
Infection status corresponding to
the row where the probability will be updated
next_infection_status_column : InfectionStatus
Infection status corresponding to
the column where the probability will be updated
new_transition_time_icdf : list
The associated list of icdf values if wanting to
specify a new distribution for a transition time
new_transition_time_icdf_mean : float
The mean of the icdf if specifying a new distribution
for a transition time
"""
try:
if (current_infection_status_row not in InfectionStatus) or \
(next_infection_status_column not in InfectionStatus):
raise ValueError('Row and column inputs must be contained in' +
' the InfectionStatus enum')
except TypeError:
raise ValueError('Row and column inputs must be contained in' +
' the InfectionStatus enum')
if new_transition_time_icdf_mean < 0:
raise ValueError('New transition time mean must be larger than' +
' or equal to 0')
if len(new_transition_time_icdf) in [0, 1]:
raise ValueError('List of icdf values must have at least two' +
' elements')
for elem in new_transition_time_icdf:
if elem < 0:
raise ValueError('List of icdf values must only contain' +
' non-negative numbers')
# Extract row and column names from enum and update
# transition time matrix with :class: `InverseCdf`
# object
row = current_infection_status_row.name
column = next_infection_status_column.name
icdf = InverseCdf(
new_transition_time_icdf_mean,
new_transition_time_icdf)
self.matrix.loc[row, column] = icdf
| [
"pandas.DataFrame",
"numpy.full",
"pyEpiabm.utility.InverseCdf",
"pyEpiabm.Parameters.instance"
] | [((601, 638), 'numpy.full', 'np.full', (['(nb_states, nb_states)', '(-1.0)'], {}), '((nb_states, nb_states), -1.0)\n', (608, 638), True, 'import numpy as np\n'), ((722, 792), 'pandas.DataFrame', 'pd.DataFrame', (['zero_trans'], {'columns': 'labels', 'index': 'labels', 'dtype': '"""object"""'}), "(zero_trans, columns=labels, index=labels, dtype='object')\n", (734, 792), True, 'import pandas as pd\n'), ((8355, 8422), 'pyEpiabm.utility.InverseCdf', 'InverseCdf', (['new_transition_time_icdf_mean', 'new_transition_time_icdf'], {}), '(new_transition_time_icdf_mean, new_transition_time_icdf)\n', (8365, 8422), False, 'from pyEpiabm.utility import InverseCdf\n'), ((1760, 1784), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (1782, 1784), True, 'import pyEpiabm as pe\n'), ((1823, 1847), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (1845, 1847), True, 'import pyEpiabm as pe\n'), ((1938, 1962), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (1960, 1962), True, 'import pyEpiabm as pe\n'), ((2001, 2025), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2023, 2025), True, 'import pyEpiabm as pe\n'), ((2114, 2138), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2136, 2138), True, 'import pyEpiabm as pe\n'), ((2177, 2201), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2199, 2201), True, 'import pyEpiabm as pe\n'), ((2296, 2320), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2318, 2320), True, 'import pyEpiabm as pe\n'), ((2366, 2390), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2388, 2390), True, 'import pyEpiabm as pe\n'), ((2483, 2507), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2505, 2507), True, 'import pyEpiabm as pe\n'), ((2551, 2575), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2573, 2575), True, 'import pyEpiabm as pe\n'), ((2666, 2690), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2688, 2690), True, 'import pyEpiabm as pe\n'), ((2732, 2756), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2754, 2756), True, 'import pyEpiabm as pe\n'), ((2846, 2870), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2868, 2870), True, 'import pyEpiabm as pe\n'), ((2911, 2935), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (2933, 2935), True, 'import pyEpiabm as pe\n'), ((3018, 3042), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3040, 3042), True, 'import pyEpiabm as pe\n'), ((3084, 3108), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3106, 3108), True, 'import pyEpiabm as pe\n'), ((3199, 3223), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3221, 3223), True, 'import pyEpiabm as pe\n'), ((3267, 3291), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3289, 3291), True, 'import pyEpiabm as pe\n'), ((3384, 3408), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3406, 3408), True, 'import pyEpiabm as pe\n'), ((3450, 3474), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3472, 3474), True, 'import pyEpiabm as pe\n'), ((3560, 3584), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3582, 3584), True, 'import pyEpiabm as pe\n'), ((3628, 3652), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3650, 3652), True, 'import pyEpiabm as pe\n'), ((3749, 3773), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3771, 3773), True, 'import pyEpiabm as pe\n'), ((3819, 3843), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3841, 3843), True, 'import pyEpiabm as pe\n'), ((3932, 3956), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (3954, 3956), True, 'import pyEpiabm as pe\n'), ((3999, 4023), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (4021, 4023), True, 'import pyEpiabm as pe\n'), ((4119, 4143), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (4141, 4143), True, 'import pyEpiabm as pe\n'), ((4191, 4215), 'pyEpiabm.Parameters.instance', 'pe.Parameters.instance', ([], {}), '()\n', (4213, 4215), True, 'import pyEpiabm as pe\n')] |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reverb-based adders."""
from typing import Dict, Sequence, Union
from acme import types
from acme.adders.reverb import base
from acme.tf import utils as tf2_utils
import jax.numpy as jnp
import numpy as np
import tree
def zeros_like(x: Union[np.ndarray, int, float, np.number]):
"""Returns a zero-filled object of the same (d)type and shape as the input.
The difference between this and `np.zeros_like()` is that this works well
with `np.number`, `int`, `float`, and `jax.numpy.DeviceArray` objects without
converting them to `np.ndarray`s.
Args:
x: The object to replace with 0s.
Returns:
A zero-filed object of the same (d)type and shape as the input.
"""
if isinstance(x, (int, float, np.number)):
return type(x)(0)
elif isinstance(x, jnp.DeviceArray):
return jnp.zeros_like(x)
elif isinstance(x, np.ndarray):
return np.zeros_like(x)
else:
raise ValueError(
f'Input ({type(x)}) must be either a numpy array, an int, or a float.')
def final_step_like(step: base.Step,
next_observation: types.NestedArray) -> base.Step:
"""Return a list of steps with the final step zero-filled."""
# Make zero-filled components so we can fill out the last step.
zero_action, zero_reward, zero_discount, zero_extras = tree.map_structure(
zeros_like, (step.action, step.reward, step.discount, step.extras))
# Return a final step that only has next_observation.
return base.Step(
observation=next_observation,
action=zero_action,
reward=zero_reward,
discount=zero_discount,
start_of_episode=False,
extras=zero_extras)
def calculate_priorities(
priority_fns: base.PriorityFnMapping,
steps: Union[base.Step, Sequence[base.Step]]) -> Dict[str, float]:
"""Helper used to calculate the priority of a sequence of steps.
This converts the sequence of steps into a PriorityFnInput tuple where the
components of each step (actions, observations, etc.) are stacked along the
time dimension.
Priorities are calculated for the sequence or transition that starts from
step[0].next_observation. As a result, the stack of observations comes from
steps[0:] whereas all other components (e.g. actions, rewards, discounts,
extras) corresponds to steps[1:].
Note: this means that all components other than the observation will be
ignored from step[0]. This also means that step[0] is allowed to correspond to
an "initial step" in which case the action, reward, discount, and extras are
each None, which is handled properly by this function.
Args:
priority_fns: a mapping from table names to priority functions (i.e. a
callable of type PriorityFn). The given function will be used to generate
the priority (a float) for the given table.
steps: a list of Step objects used to compute the priorities.
Returns:
A dictionary mapping from table names to the priority (a float) for the
given collection of steps.
"""
if isinstance(steps, list):
steps = tf2_utils.stack_sequence_fields(steps)
if any([priority_fn is not None for priority_fn in priority_fns.values()]):
# Stack the steps and wrap them as PrioityFnInput.
fn_input = base.PriorityFnInput(*steps)
return {
table: (priority_fn(fn_input) if priority_fn else 1.0)
for table, priority_fn in priority_fns.items()
}
| [
"acme.tf.utils.stack_sequence_fields",
"acme.adders.reverb.base.Step",
"jax.numpy.zeros_like",
"acme.adders.reverb.base.PriorityFnInput",
"tree.map_structure",
"numpy.zeros_like"
] | [((1934, 2024), 'tree.map_structure', 'tree.map_structure', (['zeros_like', '(step.action, step.reward, step.discount, step.extras)'], {}), '(zeros_like, (step.action, step.reward, step.discount,\n step.extras))\n', (1952, 2024), False, 'import tree\n'), ((2094, 2251), 'acme.adders.reverb.base.Step', 'base.Step', ([], {'observation': 'next_observation', 'action': 'zero_action', 'reward': 'zero_reward', 'discount': 'zero_discount', 'start_of_episode': '(False)', 'extras': 'zero_extras'}), '(observation=next_observation, action=zero_action, reward=\n zero_reward, discount=zero_discount, start_of_episode=False, extras=\n zero_extras)\n', (2103, 2251), False, 'from acme.adders.reverb import base\n'), ((3668, 3706), 'acme.tf.utils.stack_sequence_fields', 'tf2_utils.stack_sequence_fields', (['steps'], {}), '(steps)\n', (3699, 3706), True, 'from acme.tf import utils as tf2_utils\n'), ((3856, 3884), 'acme.adders.reverb.base.PriorityFnInput', 'base.PriorityFnInput', (['*steps'], {}), '(*steps)\n', (3876, 3884), False, 'from acme.adders.reverb import base\n'), ((1447, 1464), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['x'], {}), '(x)\n', (1461, 1464), True, 'import jax.numpy as jnp\n'), ((1510, 1526), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1523, 1526), True, 'import numpy as np\n')] |
import numpy as np
import scipy.io.wavfile as wav
import librosa
import os,sys,shutil,argparse,copy,pickle
import math,scipy
from faceformer import Faceformer
from transformers import Wav2Vec2FeatureExtractor,Wav2Vec2Processor
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import tempfile
from subprocess import call
os.environ['PYOPENGL_PLATFORM'] = 'osmesa' # egl
import pyrender
from psbody.mesh import Mesh
import trimesh
@torch.no_grad()
def test_model(args):
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
#build model
model = Faceformer(args)
model.load_state_dict(torch.load(os.path.join(args.dataset, '{}.pth'.format(args.model_name))))
model = model.to(torch.device(args.device))
model.eval()
template_file = os.path.join(args.dataset, args.template_path)
with open(template_file, 'rb') as fin:
templates = pickle.load(fin,encoding='latin1')
train_subjects_list = [i for i in args.train_subjects.split(" ")]
one_hot_labels = np.eye(len(train_subjects_list))
iter = train_subjects_list.index(args.condition)
one_hot = one_hot_labels[iter]
one_hot = np.reshape(one_hot,(-1,one_hot.shape[0]))
one_hot = torch.FloatTensor(one_hot).to(device=args.device)
temp = templates[args.subject]
template = temp.reshape((-1))
template = np.reshape(template,(-1,template.shape[0]))
template = torch.FloatTensor(template).to(device=args.device)
wav_path = args.wav_path
test_name = os.path.basename(wav_path).split(".")[0]
speech_array, sampling_rate = librosa.load(os.path.join(wav_path), sr=16000)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
audio_feature = np.squeeze(processor(speech_array,sampling_rate=16000).input_values)
audio_feature = np.reshape(audio_feature,(-1,audio_feature.shape[0]))
audio_feature = torch.FloatTensor(audio_feature).to(device=args.device)
prediction = model.predict(audio_feature, template, one_hot)
prediction = prediction.squeeze() # (seq_len, V*3)
np.save(os.path.join(args.result_path, test_name), prediction.detach().cpu().numpy())
# The implementation of rendering is borrowed from VOCA: https://github.com/TimoBolkart/voca/blob/master/utils/rendering.py
def render_mesh_helper(args,mesh, t_center, rot=np.zeros(3), tex_img=None, z_offset=0):
if args.dataset == "BIWI":
camera_params = {'c': np.array([400, 400]),
'k': np.array([-0.19816071, 0.92822711, 0, 0, 0]),
'f': np.array([4754.97941935 / 8, 4754.97941935 / 8])}
elif args.dataset == "vocaset":
camera_params = {'c': np.array([400, 400]),
'k': np.array([-0.19816071, 0.92822711, 0, 0, 0]),
'f': np.array([4754.97941935 / 2, 4754.97941935 / 2])}
frustum = {'near': 0.01, 'far': 3.0, 'height': 800, 'width': 800}
mesh_copy = Mesh(mesh.v, mesh.f)
mesh_copy.v[:] = cv2.Rodrigues(rot)[0].dot((mesh_copy.v-t_center).T).T+t_center
intensity = 2.0
rgb_per_v = None
primitive_material = pyrender.material.MetallicRoughnessMaterial(
alphaMode='BLEND',
baseColorFactor=[0.3, 0.3, 0.3, 1.0],
metallicFactor=0.8,
roughnessFactor=0.8
)
tri_mesh = trimesh.Trimesh(vertices=mesh_copy.v, faces=mesh_copy.f, vertex_colors=rgb_per_v)
render_mesh = pyrender.Mesh.from_trimesh(tri_mesh, material=primitive_material,smooth=True)
if args.background_black:
scene = pyrender.Scene(ambient_light=[.2, .2, .2], bg_color=[0, 0, 0])
else:
scene = pyrender.Scene(ambient_light=[.2, .2, .2], bg_color=[255, 255, 255])
camera = pyrender.IntrinsicsCamera(fx=camera_params['f'][0],
fy=camera_params['f'][1],
cx=camera_params['c'][0],
cy=camera_params['c'][1],
znear=frustum['near'],
zfar=frustum['far'])
scene.add(render_mesh, pose=np.eye(4))
camera_pose = np.eye(4)
camera_pose[:3,3] = np.array([0, 0, 1.0-z_offset])
scene.add(camera, pose=[[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]])
angle = np.pi / 6.0
pos = camera_pose[:3,3]
light_color = np.array([1., 1., 1.])
light = pyrender.DirectionalLight(color=light_color, intensity=intensity)
light_pose = np.eye(4)
light_pose[:3,3] = pos
scene.add(light, pose=light_pose.copy())
light_pose[:3,3] = cv2.Rodrigues(np.array([angle, 0, 0]))[0].dot(pos)
scene.add(light, pose=light_pose.copy())
light_pose[:3,3] = cv2.Rodrigues(np.array([-angle, 0, 0]))[0].dot(pos)
scene.add(light, pose=light_pose.copy())
light_pose[:3,3] = cv2.Rodrigues(np.array([0, -angle, 0]))[0].dot(pos)
scene.add(light, pose=light_pose.copy())
light_pose[:3,3] = cv2.Rodrigues(np.array([0, angle, 0]))[0].dot(pos)
scene.add(light, pose=light_pose.copy())
flags = pyrender.RenderFlags.SKIP_CULL_FACES
try:
r = pyrender.OffscreenRenderer(viewport_width=frustum['width'], viewport_height=frustum['height'])
color, _ = r.render(scene, flags=flags)
except:
print('pyrender: Failed rendering frame')
color = np.zeros((frustum['height'], frustum['width'], 3), dtype='uint8')
return color[..., ::-1]
def render_sequence(args):
wav_path = args.wav_path
test_name = os.path.basename(wav_path).split(".")[0]
predicted_vertices_path = os.path.join(args.result_path,test_name+".npy")
if args.dataset == "BIWI":
template_file = os.path.join(args.dataset, args.render_template_path, "BIWI.ply")
elif args.dataset == "vocaset":
template_file = os.path.join(args.dataset, args.render_template_path, "FLAME_sample.ply")
print("rendering: ", test_name)
template = Mesh(filename=template_file)
predicted_vertices = np.load(predicted_vertices_path)
predicted_vertices = np.reshape(predicted_vertices,(-1,args.vertice_dim//3,3))
output_path = args.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
num_frames = predicted_vertices.shape[0]
tmp_video_file = tempfile.NamedTemporaryFile('w', suffix='.mp4', dir=output_path)
writer = cv2.VideoWriter(tmp_video_file.name, cv2.VideoWriter_fourcc(*'mp4v'), args.fps, (800, 800), True)
center = np.mean(predicted_vertices[0], axis=0)
for i_frame in range(num_frames):
render_mesh = Mesh(predicted_vertices[i_frame], template.f)
pred_img = render_mesh_helper(args,render_mesh, center)
pred_img = pred_img.astype(np.uint8)
writer.write(pred_img)
writer.release()
file_name = test_name+"_"+args.subject+"_condition_"+args.condition
video_fname = os.path.join(output_path, file_name+'.mp4')
cmd = ('ffmpeg' + ' -i {0} -pix_fmt yuv420p -qscale 0 {1}'.format(
tmp_video_file.name, video_fname)).split()
call(cmd)
def main():
parser = argparse.ArgumentParser(description='FaceFormer: Speech-Driven 3D Facial Animation with Transformers')
parser.add_argument("--model_name", type=str, default="biwi")
parser.add_argument("--dataset", type=str, default="BIWI", help='vocaset or BIWI')
parser.add_argument("--fps", type=float, default=25, help='frame rate - 30 for vocaset; 25 for BIWI')
parser.add_argument("--feature_dim", type=int, default=128, help='64 for vocaset; 128 for BIWI')
parser.add_argument("--period", type=int, default=25, help='period in PPE - 30 for vocaset; 25 for BIWI')
parser.add_argument("--vertice_dim", type=int, default=23370*3, help='number of vertices - 5023*3 for vocaset; 23370*3 for BIWI')
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--train_subjects", type=str, default="F2 F3 F4 M3 M4 M5")
parser.add_argument("--test_subjects", type=str, default="F1 F5 F6 F7 F8 M1 M2 M6")
parser.add_argument("--output_path", type=str, default="demo/output", help='path of the rendered video sequence')
parser.add_argument("--wav_path", type=str, default="demo/wav/test.wav", help='path of the input audio signal')
parser.add_argument("--result_path", type=str, default="demo/result", help='path of the predictions')
parser.add_argument("--condition", type=str, default="M3", help='select a conditioning subject from train_subjects')
parser.add_argument("--subject", type=str, default="M1", help='select a subject from test_subjects or train_subjects')
parser.add_argument("--background_black", type=bool, default=True, help='whether to use black background')
parser.add_argument("--template_path", type=str, default="templates.pkl", help='path of the personalized templates')
parser.add_argument("--render_template_path", type=str, default="templates", help='path of the mesh in BIWI/FLAME topology')
args = parser.parse_args()
test_model(args)
render_sequence(args)
if __name__=="__main__":
main()
| [
"numpy.array",
"pyrender.material.MetallicRoughnessMaterial",
"psbody.mesh.Mesh",
"pyrender.IntrinsicsCamera",
"numpy.mean",
"os.path.exists",
"numpy.reshape",
"argparse.ArgumentParser",
"subprocess.call",
"cv2.VideoWriter_fourcc",
"tempfile.NamedTemporaryFile",
"transformers.Wav2Vec2Processor... | [((462, 477), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (475, 477), False, 'import torch\n'), ((613, 629), 'faceformer.Faceformer', 'Faceformer', (['args'], {}), '(args)\n', (623, 629), False, 'from faceformer import Faceformer\n'), ((816, 862), 'os.path.join', 'os.path.join', (['args.dataset', 'args.template_path'], {}), '(args.dataset, args.template_path)\n', (828, 862), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((1189, 1232), 'numpy.reshape', 'np.reshape', (['one_hot', '(-1, one_hot.shape[0])'], {}), '(one_hot, (-1, one_hot.shape[0]))\n', (1199, 1232), True, 'import numpy as np\n'), ((1394, 1439), 'numpy.reshape', 'np.reshape', (['template', '(-1, template.shape[0])'], {}), '(template, (-1, template.shape[0]))\n', (1404, 1439), True, 'import numpy as np\n'), ((1688, 1752), 'transformers.Wav2Vec2Processor.from_pretrained', 'Wav2Vec2Processor.from_pretrained', (['"""facebook/wav2vec2-base-960h"""'], {}), "('facebook/wav2vec2-base-960h')\n", (1721, 1752), False, 'from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor\n'), ((1862, 1917), 'numpy.reshape', 'np.reshape', (['audio_feature', '(-1, audio_feature.shape[0])'], {}), '(audio_feature, (-1, audio_feature.shape[0]))\n', (1872, 1917), True, 'import numpy as np\n'), ((2376, 2387), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2384, 2387), True, 'import numpy as np\n'), ((2987, 3007), 'psbody.mesh.Mesh', 'Mesh', (['mesh.v', 'mesh.f'], {}), '(mesh.v, mesh.f)\n', (2991, 3007), False, 'from psbody.mesh import Mesh\n'), ((3159, 3308), 'pyrender.material.MetallicRoughnessMaterial', 'pyrender.material.MetallicRoughnessMaterial', ([], {'alphaMode': '"""BLEND"""', 'baseColorFactor': '[0.3, 0.3, 0.3, 1.0]', 'metallicFactor': '(0.8)', 'roughnessFactor': '(0.8)'}), "(alphaMode='BLEND',\n baseColorFactor=[0.3, 0.3, 0.3, 1.0], metallicFactor=0.8,\n roughnessFactor=0.8)\n", (3202, 3308), False, 'import pyrender\n'), ((3397, 3483), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'mesh_copy.v', 'faces': 'mesh_copy.f', 'vertex_colors': 'rgb_per_v'}), '(vertices=mesh_copy.v, faces=mesh_copy.f, vertex_colors=\n rgb_per_v)\n', (3412, 3483), False, 'import trimesh\n'), ((3497, 3575), 'pyrender.Mesh.from_trimesh', 'pyrender.Mesh.from_trimesh', (['tri_mesh'], {'material': 'primitive_material', 'smooth': '(True)'}), '(tri_mesh, material=primitive_material, smooth=True)\n', (3523, 3575), False, 'import pyrender\n'), ((3793, 3976), 'pyrender.IntrinsicsCamera', 'pyrender.IntrinsicsCamera', ([], {'fx': "camera_params['f'][0]", 'fy': "camera_params['f'][1]", 'cx': "camera_params['c'][0]", 'cy': "camera_params['c'][1]", 'znear': "frustum['near']", 'zfar': "frustum['far']"}), "(fx=camera_params['f'][0], fy=camera_params['f'][1\n ], cx=camera_params['c'][0], cy=camera_params['c'][1], znear=frustum[\n 'near'], zfar=frustum['far'])\n", (3818, 3976), False, 'import pyrender\n'), ((4220, 4229), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4226, 4229), True, 'import numpy as np\n'), ((4254, 4286), 'numpy.array', 'np.array', (['[0, 0, 1.0 - z_offset]'], {}), '([0, 0, 1.0 - z_offset])\n', (4262, 4286), True, 'import numpy as np\n'), ((4525, 4550), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4533, 4550), True, 'import numpy as np\n'), ((4560, 4625), 'pyrender.DirectionalLight', 'pyrender.DirectionalLight', ([], {'color': 'light_color', 'intensity': 'intensity'}), '(color=light_color, intensity=intensity)\n', (4585, 4625), False, 'import pyrender\n'), ((4644, 4653), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4650, 4653), True, 'import numpy as np\n'), ((5744, 5794), 'os.path.join', 'os.path.join', (['args.result_path', "(test_name + '.npy')"], {}), "(args.result_path, test_name + '.npy')\n", (5756, 5794), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((6126, 6154), 'psbody.mesh.Mesh', 'Mesh', ([], {'filename': 'template_file'}), '(filename=template_file)\n', (6130, 6154), False, 'from psbody.mesh import Mesh\n'), ((6180, 6212), 'numpy.load', 'np.load', (['predicted_vertices_path'], {}), '(predicted_vertices_path)\n', (6187, 6212), True, 'import numpy as np\n'), ((6238, 6300), 'numpy.reshape', 'np.reshape', (['predicted_vertices', '(-1, args.vertice_dim // 3, 3)'], {}), '(predicted_vertices, (-1, args.vertice_dim // 3, 3))\n', (6248, 6300), True, 'import numpy as np\n'), ((6472, 6536), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {'suffix': '""".mp4"""', 'dir': 'output_path'}), "('w', suffix='.mp4', dir=output_path)\n", (6499, 6536), False, 'import tempfile\n'), ((6666, 6704), 'numpy.mean', 'np.mean', (['predicted_vertices[0]'], {'axis': '(0)'}), '(predicted_vertices[0], axis=0)\n', (6673, 6704), True, 'import numpy as np\n'), ((7065, 7110), 'os.path.join', 'os.path.join', (['output_path', "(file_name + '.mp4')"], {}), "(output_path, file_name + '.mp4')\n", (7077, 7110), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((7234, 7243), 'subprocess.call', 'call', (['cmd'], {}), '(cmd)\n', (7238, 7243), False, 'from subprocess import call\n'), ((7270, 7377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FaceFormer: Speech-Driven 3D Facial Animation with Transformers"""'}), "(description=\n 'FaceFormer: Speech-Driven 3D Facial Animation with Transformers')\n", (7293, 7377), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((511, 543), 'os.path.exists', 'os.path.exists', (['args.result_path'], {}), '(args.result_path)\n', (525, 543), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((553, 582), 'os.makedirs', 'os.makedirs', (['args.result_path'], {}), '(args.result_path)\n', (564, 582), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((751, 776), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (763, 776), False, 'import torch\n'), ((926, 961), 'pickle.load', 'pickle.load', (['fin'], {'encoding': '"""latin1"""'}), "(fin, encoding='latin1')\n", (937, 961), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((1638, 1660), 'os.path.join', 'os.path.join', (['wav_path'], {}), '(wav_path)\n', (1650, 1660), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((2125, 2166), 'os.path.join', 'os.path.join', (['args.result_path', 'test_name'], {}), '(args.result_path, test_name)\n', (2137, 2166), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((3622, 3687), 'pyrender.Scene', 'pyrender.Scene', ([], {'ambient_light': '[0.2, 0.2, 0.2]', 'bg_color': '[0, 0, 0]'}), '(ambient_light=[0.2, 0.2, 0.2], bg_color=[0, 0, 0])\n', (3636, 3687), False, 'import pyrender\n'), ((3711, 3782), 'pyrender.Scene', 'pyrender.Scene', ([], {'ambient_light': '[0.2, 0.2, 0.2]', 'bg_color': '[255, 255, 255]'}), '(ambient_light=[0.2, 0.2, 0.2], bg_color=[255, 255, 255])\n', (3725, 3782), False, 'import pyrender\n'), ((5284, 5383), 'pyrender.OffscreenRenderer', 'pyrender.OffscreenRenderer', ([], {'viewport_width': "frustum['width']", 'viewport_height': "frustum['height']"}), "(viewport_width=frustum['width'], viewport_height\n =frustum['height'])\n", (5310, 5383), False, 'import pyrender\n'), ((5847, 5912), 'os.path.join', 'os.path.join', (['args.dataset', 'args.render_template_path', '"""BIWI.ply"""'], {}), "(args.dataset, args.render_template_path, 'BIWI.ply')\n", (5859, 5912), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((6343, 6370), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (6357, 6370), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((6380, 6404), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (6391, 6404), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((6592, 6623), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (6614, 6623), False, 'import cv2\n'), ((6766, 6811), 'psbody.mesh.Mesh', 'Mesh', (['predicted_vertices[i_frame]', 'template.f'], {}), '(predicted_vertices[i_frame], template.f)\n', (6770, 6811), False, 'from psbody.mesh import Mesh\n'), ((1245, 1271), 'torch.FloatTensor', 'torch.FloatTensor', (['one_hot'], {}), '(one_hot)\n', (1262, 1271), False, 'import torch\n'), ((1453, 1480), 'torch.FloatTensor', 'torch.FloatTensor', (['template'], {}), '(template)\n', (1470, 1480), False, 'import torch\n'), ((1936, 1968), 'torch.FloatTensor', 'torch.FloatTensor', (['audio_feature'], {}), '(audio_feature)\n', (1953, 1968), False, 'import torch\n'), ((2477, 2497), 'numpy.array', 'np.array', (['[400, 400]'], {}), '([400, 400])\n', (2485, 2497), True, 'import numpy as np\n'), ((2529, 2573), 'numpy.array', 'np.array', (['[-0.19816071, 0.92822711, 0, 0, 0]'], {}), '([-0.19816071, 0.92822711, 0, 0, 0])\n', (2537, 2573), True, 'import numpy as np\n'), ((2605, 2653), 'numpy.array', 'np.array', (['[4754.97941935 / 8, 4754.97941935 / 8]'], {}), '([4754.97941935 / 8, 4754.97941935 / 8])\n', (2613, 2653), True, 'import numpy as np\n'), ((4190, 4199), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4196, 4199), True, 'import numpy as np\n'), ((5505, 5570), 'numpy.zeros', 'np.zeros', (["(frustum['height'], frustum['width'], 3)"], {'dtype': '"""uint8"""'}), "((frustum['height'], frustum['width'], 3), dtype='uint8')\n", (5513, 5570), True, 'import numpy as np\n'), ((5973, 6046), 'os.path.join', 'os.path.join', (['args.dataset', 'args.render_template_path', '"""FLAME_sample.ply"""'], {}), "(args.dataset, args.render_template_path, 'FLAME_sample.ply')\n", (5985, 6046), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((1550, 1576), 'os.path.basename', 'os.path.basename', (['wav_path'], {}), '(wav_path)\n', (1566, 1576), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((2721, 2741), 'numpy.array', 'np.array', (['[400, 400]'], {}), '([400, 400])\n', (2729, 2741), True, 'import numpy as np\n'), ((2773, 2817), 'numpy.array', 'np.array', (['[-0.19816071, 0.92822711, 0, 0, 0]'], {}), '([-0.19816071, 0.92822711, 0, 0, 0])\n', (2781, 2817), True, 'import numpy as np\n'), ((2849, 2897), 'numpy.array', 'np.array', (['[4754.97941935 / 2, 4754.97941935 / 2]'], {}), '([4754.97941935 / 2, 4754.97941935 / 2])\n', (2857, 2897), True, 'import numpy as np\n'), ((5673, 5699), 'os.path.basename', 'os.path.basename', (['wav_path'], {}), '(wav_path)\n', (5689, 5699), False, 'import os, sys, shutil, argparse, copy, pickle\n'), ((4768, 4791), 'numpy.array', 'np.array', (['[angle, 0, 0]'], {}), '([angle, 0, 0])\n', (4776, 4791), True, 'import numpy as np\n'), ((4889, 4913), 'numpy.array', 'np.array', (['[-angle, 0, 0]'], {}), '([-angle, 0, 0])\n', (4897, 4913), True, 'import numpy as np\n'), ((5010, 5034), 'numpy.array', 'np.array', (['[0, -angle, 0]'], {}), '([0, -angle, 0])\n', (5018, 5034), True, 'import numpy as np\n'), ((5131, 5154), 'numpy.array', 'np.array', (['[0, angle, 0]'], {}), '([0, angle, 0])\n', (5139, 5154), True, 'import numpy as np\n'), ((3029, 3047), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot'], {}), '(rot)\n', (3042, 3047), False, 'import cv2\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import astropy.units as u
from astropy.visualization import quantity_support
from gammapy.maps import MapAxis, MapAxes
from .core import IRF
__all__ = ["EffectiveAreaTable2D"]
class EffectiveAreaTable2D(IRF):
"""2D effective area table.
Data format specification: :ref:`gadf:aeff_2d`
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Field of view offset axis.
data : `~astropy.units.Quantity`
Effective area
meta : dict
Meta data
Examples
--------
Here's an example you can use to learn about this class:
>>> from gammapy.irf import EffectiveAreaTable2D
>>> filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
>>> aeff = EffectiveAreaTable2D.read(filename, hdu='EFFECTIVE AREA')
>>> print(aeff)
EffectiveAreaTable2D
--------------------
<BLANKLINE>
axes : ['energy_true', 'offset']
shape : (42, 6)
ndim : 2
unit : m2
dtype : >f4
<BLANKLINE>
Here's another one, created from scratch, without reading a file:
>>> from gammapy.irf import EffectiveAreaTable2D
>>> from gammapy.maps import MapAxis
>>> energy_axis_true = MapAxis.from_energy_bounds("0.1 TeV", "100 TeV", nbin=30, name="energy_true")
>>> offset_axis = MapAxis.from_bounds(0, 5, nbin=4, name="offset")
>>> aeff = EffectiveAreaTable2D(axes=[energy_axis_true, offset_axis], data=1e10, unit="cm2")
>>> print(aeff)
EffectiveAreaTable2D
--------------------
<BLANKLINE>
axes : ['energy_true', 'offset']
shape : (30, 4)
ndim : 2
unit : cm2
dtype : float64
<BLANKLINE>
"""
tag = "aeff_2d"
required_axes = ["energy_true", "offset"]
def plot_energy_dependence(self, ax=None, offset=None, **kwargs):
"""Plot effective area versus energy for a given offset.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axis
offset : `~astropy.coordinates.Angle`
Offset
kwargs : dict
Forwarded tp plt.plot()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axis
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
if offset is None:
off_min, off_max = self.axes["offset"].bounds
offset = np.linspace(off_min, off_max, 4)
energy_axis = self.axes["energy_true"]
for off in offset:
area = self.evaluate(offset=off, energy_true=energy_axis.center)
label = f"offset = {off:.1f}"
with quantity_support():
ax.plot(energy_axis.center, area, label=label, **kwargs)
energy_axis.format_plot_xaxis(ax=ax)
ax.set_ylabel(f"Effective Area [{ax.yaxis.units}]")
ax.legend()
return ax
def plot_offset_dependence(self, ax=None, energy=None, **kwargs):
"""Plot effective area versus offset for a given energy.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axis
energy : `~astropy.units.Quantity`
Energy
**kwargs : dict
Keyword argument passed to `~matplotlib.pyplot.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axis
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
if energy is None:
energy_axis = self.axes["energy_true"]
e_min, e_max = energy_axis.center[[0, -1]]
energy = np.geomspace(e_min, e_max, 4)
offset_axis = self.axes["offset"]
for ee in energy:
area = self.evaluate(offset=offset_axis.center, energy_true=ee)
area /= np.nanmax(area)
if np.isnan(area).all():
continue
label = f"energy = {ee:.1f}"
with quantity_support():
ax.plot(offset_axis.center, area, label=label, **kwargs)
offset_axis.format_plot_xaxis(ax=ax)
ax.set_ylim(0, 1.1)
ax.set_ylabel("Relative Effective Area")
ax.legend(loc="best")
return ax
def plot(self, ax=None, add_cbar=True, **kwargs):
"""Plot effective area image."""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.axes["energy_true"]
offset = self.axes["offset"]
aeff = self.evaluate(
offset=offset.center, energy_true=energy.center[:, np.newaxis]
)
vmin, vmax = np.nanmin(aeff.value), np.nanmax(aeff.value)
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("edgecolors", "face")
kwargs.setdefault("vmin", vmin)
kwargs.setdefault("vmax", vmax)
with quantity_support():
caxes = ax.pcolormesh(energy.edges, offset.edges, aeff.value.T, **kwargs)
energy.format_plot_xaxis(ax=ax)
offset.format_plot_yaxis(ax=ax)
if add_cbar:
label = f"Effective Area [{aeff.unit}]"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot(ax=axes[2])
self.plot_energy_dependence(ax=axes[0])
self.plot_offset_dependence(ax=axes[1])
plt.tight_layout()
@classmethod
def from_parametrization(cls, energy_axis_true=None, instrument="HESS"):
r"""Create parametrized effective area.
Parametrizations of the effective areas of different Cherenkov
telescopes taken from Appendix B of Abramowski et al. (2010), see
https://ui.adsabs.harvard.edu/abs/2010MNRAS.402.1342A .
.. math::
A_{eff}(E) = g_1 \left(\frac{E}{\mathrm{MeV}}\right)^{-g_2}\exp{\left(-\frac{g_3}{E}\right)}
This method does not model the offset dependence of the effective area,
but just assumes that it is constant.
Parameters
----------
energy_axis_true : `MapAxis`
Energy binning, analytic function is evaluated at log centers
instrument : {'HESS', 'HESS2', 'CTA'}
Instrument name
Returns
-------
aeff : `EffectiveAreaTable2D`
Effective area table
"""
# Put the parameters g in a dictionary.
# Units: g1 (cm^2), g2 (), g3 (MeV)
pars = {
"HESS": [6.85e9, 0.0891, 5e5],
"HESS2": [2.05e9, 0.0891, 1e5],
"CTA": [1.71e11, 0.0891, 1e5],
}
if instrument not in pars.keys():
ss = f"Unknown instrument: {instrument}\n"
ss += f"Valid instruments: {list(pars.keys())}"
raise ValueError(ss)
if energy_axis_true is None:
energy_axis_true = MapAxis.from_energy_bounds(
"2 GeV", "200 TeV", nbin=20, per_decade=True, name="energy_true"
)
g1, g2, g3 = pars[instrument]
offset_axis = MapAxis.from_edges([0., 5.] * u.deg, name="offset")
axes = MapAxes([energy_axis_true, offset_axis])
coords = axes.get_coord()
energy, offset = coords["energy_true"].to_value("MeV"), coords["offset"]
data = np.ones_like(offset.value) * g1 * energy ** (-g2) * np.exp(-g3 / energy)
# TODO: fake offset dependence?
meta = {"TELESCOP": instrument}
return cls(axes=axes, data=data, unit="cm2", meta=meta)
| [
"numpy.ones_like",
"astropy.visualization.quantity_support",
"gammapy.maps.MapAxis.from_edges",
"matplotlib.pyplot.gca",
"gammapy.maps.MapAxis.from_energy_bounds",
"numpy.geomspace",
"numpy.exp",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"... | [((5452, 5499), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': 'figsize'}), '(nrows=1, ncols=3, figsize=figsize)\n', (5464, 5499), True, 'import matplotlib.pyplot as plt\n'), ((5634, 5652), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5650, 5652), True, 'import matplotlib.pyplot as plt\n'), ((7291, 7344), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['([0.0, 5.0] * u.deg)'], {'name': '"""offset"""'}), "([0.0, 5.0] * u.deg, name='offset')\n", (7309, 7344), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((7358, 7398), 'gammapy.maps.MapAxes', 'MapAxes', (['[energy_axis_true, offset_axis]'], {}), '([energy_axis_true, offset_axis])\n', (7365, 7398), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((2399, 2408), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2406, 2408), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2570), 'numpy.linspace', 'np.linspace', (['off_min', 'off_max', '(4)'], {}), '(off_min, off_max, 4)\n', (2549, 2570), True, 'import numpy as np\n'), ((3563, 3572), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3570, 3572), True, 'import matplotlib.pyplot as plt\n'), ((3750, 3779), 'numpy.geomspace', 'np.geomspace', (['e_min', 'e_max', '(4)'], {}), '(e_min, e_max, 4)\n', (3762, 3779), True, 'import numpy as np\n'), ((3946, 3961), 'numpy.nanmax', 'np.nanmax', (['area'], {}), '(area)\n', (3955, 3961), True, 'import numpy as np\n'), ((4496, 4505), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4503, 4505), True, 'import matplotlib.pyplot as plt\n'), ((4745, 4766), 'numpy.nanmin', 'np.nanmin', (['aeff.value'], {}), '(aeff.value)\n', (4754, 4766), True, 'import numpy as np\n'), ((4768, 4789), 'numpy.nanmax', 'np.nanmax', (['aeff.value'], {}), '(aeff.value)\n', (4777, 4789), True, 'import numpy as np\n'), ((4975, 4993), 'astropy.visualization.quantity_support', 'quantity_support', ([], {}), '()\n', (4991, 4993), False, 'from astropy.visualization import quantity_support\n'), ((7106, 7202), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""2 GeV"""', '"""200 TeV"""'], {'nbin': '(20)', 'per_decade': '(True)', 'name': '"""energy_true"""'}), "('2 GeV', '200 TeV', nbin=20, per_decade=True,\n name='energy_true')\n", (7132, 7202), False, 'from gammapy.maps import MapAxis, MapAxes\n'), ((7582, 7602), 'numpy.exp', 'np.exp', (['(-g3 / energy)'], {}), '(-g3 / energy)\n', (7588, 7602), True, 'import numpy as np\n'), ((2783, 2801), 'astropy.visualization.quantity_support', 'quantity_support', ([], {}), '()\n', (2799, 2801), False, 'from astropy.visualization import quantity_support\n'), ((4082, 4100), 'astropy.visualization.quantity_support', 'quantity_support', ([], {}), '()\n', (4098, 4100), False, 'from astropy.visualization import quantity_support\n'), ((3977, 3991), 'numpy.isnan', 'np.isnan', (['area'], {}), '(area)\n', (3985, 3991), True, 'import numpy as np\n'), ((7530, 7556), 'numpy.ones_like', 'np.ones_like', (['offset.value'], {}), '(offset.value)\n', (7542, 7556), True, 'import numpy as np\n')] |
import os
import time
import h5py
import multiprocessing as mp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import astropy.units as u
from matplotlib.backends.backend_pdf import PdfPages
from astropy.io import fits
# from astropy.wcs import WCS
# from astropy.coordinates import Angle
from astropy.constants import c, N_A
# from corner import corner
from sklearn.linear_model import LinearRegression
# from .idc_voronoi import voronoi_m
from .idc_functions import SEMBB, BEMBB, WD, PowerLaw, B_fast
from .idc_functions import best_fit_and_error, normalize_pdf, save_fits_gz
from .idc_fitting_old import kappa_calibration as kc_old
from .z0mg_RSRF import z0mg_RSRF
plt.ioff()
# Model properties
ndims = {'SE': 3, 'FB': 2, 'WD': 3, 'BE': 3, 'PL': 4}
# Parameters for each model
parameters = {'SE': ['dust.surface.density', 'dust.temperature', 'beta'],
'FB': ['dust.surface.density', 'dust.temperature'],
'BE': ['dust.surface.density', 'dust.temperature', 'beta2'],
'WD': ['dust.surface.density', 'dust.temperature',
'warm.dust.fraction'],
'PL': ['dust.surface.density', 'alpha', 'gamma', 'logUmin']}
axis_ids = {'SE': {'dust.surface.density': 1, 'dust.temperature': 0,
'beta': 2},
'FB': {'dust.surface.density': 1, 'dust.temperature': 0},
'BE': {'dust.surface.density': 1, 'dust.temperature': 0,
'beta2': 2},
'WD': {'dust.surface.density': 1, 'dust.temperature': 0,
'warm.dust.fraction': 2},
'PL': {'dust.surface.density': 1, 'alpha': 0, 'gamma': 2,
'logUmin': 3}}
# Grid parameters: min, max, step
"""
grid_para = {'dust.surface.density': [-4., 1., 0.025],
'dust.temperature': [5., 50., 0.5],
'beta': [-1.0, 4.0, 0.1],
'beta2': [-1.0, 4.0, 0.1],
'warm.dust.fraction': [0.0, 0.05, 0.002],
'alpha': [1.1, 3.0, 0.1], # Remember to avoid alpha==1
'gamma': [-4, 0, 0.2],
'logUmin': [-2, 1.5, 0.1]}
"""
grid_para = {'dust.surface.density': [-4., 1., 0.025],
'dust.temperature': [5., 50., 0.5],
'beta': [-1.0, 4.0, 0.1],
'beta2': [-1.0, 4.0, 0.25],
'warm.dust.fraction': [0.0, 0.05, 0.002],
'alpha': [1.1, 5.1, 0.2], # Remember to avoid alpha==1
'gamma': [-3, 0, 0.2],
'logUmin': [-1.5, 1.5, 0.2]}
# Parameter properties
# 0: is log; 1: 1d array; 2: units[normal/log]
v_prop = {'dust.surface.density':
[True, -1,
[r'$\Sigma_d$ $[M_\odot {\rm pc}^{-2}]$',
r'$\log(\Sigma_d$ $[M_\odot {\rm pc}^{-2}])$']],
'dust.temperature': [False, -1, [r'$T_d$ [K]']],
'beta': [False, -1, [r'$\beta$']],
'beta2': [False, -1, [r'$\beta_2$']],
'warm.dust.fraction': [False, -1, [r'$f_w$']],
'alpha': [False, -1, [r'$\alpha$']],
'gamma': [True, -1, [r'$\gamma$', r'$\log(\gamma)$']],
'logUmin': [False, -1, [r'$\log(U)_{min}$']],
'chi2': [False, -1, [r'$\chi^2$']]}
for p in grid_para.keys():
v_prop[p][1] = np.arange(grid_para[p][0], grid_para[p][1], grid_para[p][2])
# Band and instrument properties
all_instr = ['pacs', 'spire', 'mips']
band_wl = {'pacs70': 70.0, 'pacs100': 100.0, 'pacs160': 160.0,
'spire250': 250.0, 'spire350': 350.0, 'spire500': 500.0,
'mips24': 24.0, 'mips70': 70.0, 'mips160': 160.0}
band_cap = {'pacs70': 'PACS_70', 'pacs100': 'PACS_100', 'pacs160': 'PACS_160',
'spire250': 'SPIRE_250', 'spire350': 'SPIRE_350',
'spire500': 'SPIRE_500',
'mips24': 'MIPS_24', 'mips70': 'MIPS_70', 'mips160': 'MIPS_160'}
band_instr = {'pacs70': 'pacs', 'pacs100': 'pacs', 'pacs160': 'pacs',
'spire250': 'spire', 'spire350': 'spire', 'spire500': 'spire',
'mips24': 'mips', 'mips70': 'mips', 'mips160': 'mips'}
# For calibration error
# MIPS: from Spitzer cookbook
cau = {'pacs': 10.0 / 100.0, 'spire': 8.0 / 100.0, 'mips': 2.0 / 100.0}
cru = {'pacs70': 2.0 / 100, 'pacs100': 2.0 / 100, 'pacs160': 2.0 / 100,
'spire250': 1.5 / 100, 'spire350': 1.5 / 100, 'spire500': 1.5 / 100,
'mips24': 4.0 / 100, 'mips70': 7.0 / 100, 'mips160': 12.0 / 100}
# For integrals
FWHM = {'SPIRE_500': 36.09, 'SPIRE_350': 24.88, 'SPIRE_250': 18.15,
'Gauss_25': 25, 'PACS_160': 11.18, 'PACS_100': 7.04,
'HERACLES': 13}
def diskmask_UTOMO18(name, ress,
bands=['pacs100', 'pacs160', 'spire250', 'spire350',
'spire500'],
datapath='data/UTOMO18_dust/',
projectpath='Projects/UTOMO18/'):
for res in ress:
masks = []
respath = datapath + name + '/' + res + '/'
fns = os.listdir(respath)
for band in bands:
for fn in fns:
temp = fn.split('_')
if len(temp) < 4:
continue
elif temp[-4] == band:
if temp[-1] == 'mask.fits':
temp, hdr = fits.getdata(respath + fn, header=True)
masks.append(temp.astype(bool))
assert len(masks) == len(bands)
diskmask = np.all(masks, axis=0)
fn = respath + name + '_diskmask.fits'
fits.writeto(fn, diskmask.astype(int), hdr, overwrite=True)
def bkgcov_UTOMO18(name, res_good, res_all,
bands=['pacs100', 'pacs160', 'spire250', 'spire350',
'spire500'],
datapath='data/UTOMO18_dust/',
projectpath='Projects/UTOMO18/'):
print('Generating bkgcov for', name)
res_good_num = [int(r.strip('res_').strip('pc')) for r in res_good]
res_all_num = [int(r.strip('res_').strip('pc')) for r in res_all]
nwl = len(bands)
bkgcov_good = []
bands_wl = ['100', '160', '250', '350', '500']
for res in res_good:
print('--good res:', res)
seds = []
respath = datapath + name + '/' + res + '/'
mask_fn = respath + name + '_diskmask.fits'
if not os.path.isfile(mask_fn):
diskmask_UTOMO18(name, res_good)
diskmask = fits.getdata(mask_fn).astype(bool)
fns = os.listdir(respath)
for band in bands:
for fn in fns:
temp = fn.split('_')
if len(temp) < 4:
continue
elif temp[-4] == band:
if temp[-1] != 'mask.fits':
seds.append(fits.getdata(respath + fn))
assert len(seds) == nwl
seds = np.array(seds)
non_nanmask = np.all(np.isfinite(seds), axis=0)
bkgmask = (~diskmask) * non_nanmask
# implement outlier rejection
outliermask = np.zeros_like(bkgmask, dtype=bool)
for i in range(nwl):
AD = np.abs(seds[i] - np.median(seds[i][bkgmask]))
MAD = np.median(AD[bkgmask])
with np.errstate(invalid='ignore'):
outliermask += AD > 3 * MAD
bkgmask = bkgmask * (~outliermask)
# assert np.sum(bkgmask) > 10
bkgcov = np.cov(seds[:, bkgmask])
if name == 'm31':
bkgcov[0, 3] = 0
bkgcov[0, 4] = 0
bkgcov[3, 0] = 0
bkgcov[4, 0] = 0
if name == 'm33':
for i in range(5):
for j in range(5):
if i != j:
bkgcov[i, j] = 0
bkgcov_good.append(bkgcov)
fn = respath + name + '_bkgcov.fits'
fits.writeto(fn, bkgcov, overwrite=True)
bkgcov_check = []
for res in res_all[:-1]:
print('--check res:', res)
seds = []
respath = datapath + name + '/' + res + '/'
mask_fn = respath + name + '_diskmask.fits'
if not os.path.isfile(mask_fn):
diskmask_UTOMO18(name, res_good)
diskmask = fits.getdata(mask_fn).astype(bool)
fns = os.listdir(respath)
for band in bands:
for fn in fns:
temp = fn.split('_')
if len(temp) < 4:
continue
elif temp[-4] == band:
if temp[-1] != 'mask.fits':
seds.append(fits.getdata(respath + fn))
assert len(seds) == nwl
seds = np.array(seds)
non_nanmask = np.all(np.isfinite(seds), axis=0)
bkgmask = (~diskmask) * non_nanmask
# implement outlier rejection
outliermask = np.zeros_like(bkgmask, dtype=bool)
for i in range(nwl):
AD = np.abs(seds[i] - np.median(seds[i][bkgmask]))
MAD = np.median(AD[bkgmask])
with np.errstate(invalid='ignore'):
outliermask += AD > 3 * MAD
bkgmask = bkgmask * (~outliermask)
# assert np.sum(bkgmask) > 10
bkgcov = np.cov(seds[:, bkgmask])
bkgcov_check.append(bkgcov)
bkgcov_check = np.array(bkgcov_check)
# build linear models
bkgcov_good = np.array(bkgcov_good)
models = np.empty([nwl, nwl], dtype=object)
for i in range(nwl):
for j in range(nwl):
models[i, j] = LinearRegression(fit_intercept=True)
if (name == 'm31') and \
([i, j] in ([0, 3], [0, 4], [3, 0], [4, 0])):
pass
elif (name == 'm33') and (i != j):
pass
else:
models[i, j].fit(np.log10(res_good_num).reshape([-1, 1]),
np.log10(np.abs(bkgcov_good[:, i, j]))
.reshape([-1, 1]))
# fit the rest (or maybe all)
bkgcov_fit = []
for r in range(len(res_all)):
res = res_all[r]
print('----fit res:', res)
res_num = res_all_num[r]
bkgcov = np.zeros([nwl, nwl])
for i in range(nwl):
for j in range(nwl):
if (name == 'm31') and \
([i, j] in ([0, 3], [0, 4], [3, 0], [4, 0])):
pass
elif (name == 'm33') and (i != j):
pass
else:
bkgcov[i, j] = \
models[i, j].predict(np.log10(res_num))[0, 0]
bkgcov[i, j] = 10**bkgcov[i, j]
bkgcov_fit.append(bkgcov)
if res not in res_good:
respath = datapath + name + '/' + res + '/'
fn = respath + name + '_bkgcov.fits'
fits.writeto(fn, bkgcov, overwrite=True)
bkgcov_fit = np.array(bkgcov_fit)
# plot true and fitting
ylims = {'lmc': (5*10**(-4), 10**(1)),
'smc': (2*10**(-7), 7*10**(0)),
'm31': (10**(-5), 1.5*10**(1)),
'm33': (10**(-3), 10**(1))}
fig, ax = plt.subplots(nwl, nwl, figsize=(10, 10))
for i in range(nwl):
for j in range(nwl):
ax[i, j].loglog(res_all_num, bkgcov_fit[:, i, j],
marker='+', ms=10, color='b', lw=0.5)
ax[i, j].scatter(res_all_num[:-1], bkgcov_check[:, i, j],
s=20, marker='x', color='r')
ax[i, j].set_ylim(ylims[name])
ax[i, j].plot([res_good_num[-1]] * 2, ax[i, j].get_ylim(), 'k--',
alpha=0.3)
ax[i, j].set_title(bands_wl[i] + '-' + bands_wl[j], color='k',
x=0.95, y=0.8, ha='right')
if i == 4:
ax[i, j].set_xlabel('resolution (pc)')
else:
ax[i, j].set_xticklabels([])
if j != 0:
ax[i, j].set_yticklabels([])
fig.tight_layout()
fig.savefig('output/' + name + '.png')
plt.close('all')
def fit_dust_density(name, beta_f, bands,
lambdac_f=300.0, method_abbr='FB', del_model=False,
fake=False, nop=5, targetSN=5, Voronoi=False,
project_name='UTOMO18', save_pdfs=True, rand_cube=False,
observe_fns=[], mask_fn='', subdir=None,
notes='', galactic_integrated=False,
better_bkgcov=None, res_arcsec=None,
import_beta=False, beta_in=None, input_avg_SED=False,
avg_SED=[]):
assert len(observe_fns) == len(bands)
randcubesize = 100
#
nwl = len(bands)
diskmask, hdr = fits.getdata(mask_fn, header=True)
diskmask = diskmask.astype(bool)
list_shape = list(diskmask.shape)
sed = np.empty(list_shape + [nwl])
for i in range(nwl):
sed[:, :, i] = fits.getdata(observe_fns[i], header=False)
non_nanmask = np.all(np.isfinite(sed), axis=-1)
diskmask = diskmask * non_nanmask
bkgmask = (~diskmask) * non_nanmask
# method_abbr: SE, FB, BE, WD, PL
#
# Reading wavelength #
#
wl = np.array([band_wl[b] for b in bands])
# Define cali_mat2
cali_mat2 = np.zeros([nwl, nwl])
for instr in all_instr:
instr_bands = [bi for bi in range(nwl) if
band_instr[bands[bi]] == instr]
for bi in instr_bands:
cali_mat2[bi, bi] += cru[bands[bi]]
for bj in instr_bands:
cali_mat2[bi, bj] += cau[instr]
cali_mat2 = cali_mat2**2
#
# Reading calibration #
#
if import_beta:
beta_in = np.round_(beta_in, 2)
beta_unique = np.unique(beta_in[diskmask])
assert len(beta_unique) < 100 # Please not too many...
kappa160s = {}
for b in beta_unique:
fn = 'hdf5_MBBDust/Calibration_' + str(round(b, 2)) + '.h5'
try:
with h5py.File(fn, 'r') as hf:
grp = hf[method_abbr]
kappa160s[b] = grp['kappa160'][()]
except (KeyError, NameError, OSError):
print('This method is not calibrated yet!!',
'Starting calibration...')
kc_old(method_abbr, beta_f=b, lambdac_f=lambdac_f,
nop=nop)
with h5py.File(fn, 'r') as hf:
grp = hf[method_abbr]
kappa160s[b] = grp['kappa160'][()]
else:
fn = 'hdf5_MBBDust/Calibration_' + str(round(beta_f, 2)) + '.h5'
try:
with h5py.File(fn, 'r') as hf:
grp = hf[method_abbr]
kappa160 = grp['kappa160'][()]
except (KeyError, NameError, OSError):
print('This method is not calibrated yet!!',
'Starting calibration...')
kappa_calibration(method_abbr, beta_f=beta_f, lambdac_f=lambdac_f,
nop=nop)
with h5py.File(fn, 'r') as hf:
grp = hf[method_abbr]
kappa160 = grp['kappa160'][()]
#
""" Read HERSCHEL SED and diskmask """
#
betastr = 'free' if method_abbr == 'SE' else str(round(beta_f, 2))
longname = name + ' ' + method_abbr + '.beta=' + betastr + ' ' + notes
#
print('################################################')
print(longname + ' fitting (' + time.ctime() + ')')
print('################################################')
# Dust density in Solar Mass / pc^2
# kappa_lambda in cm^2 / g
# SED in MJy / sr
if better_bkgcov is None:
# implement outlier rejection
outliermask = np.zeros_like(bkgmask, dtype=bool)
for i in range(nwl):
AD = np.abs(sed[:, :, i] - np.median(sed[bkgmask][i]))
MAD = np.median(AD[bkgmask])
with np.errstate(invalid='ignore'):
outliermask += AD > 3 * MAD
bkgmask = bkgmask * (~outliermask)
new_bkgmask = bkgmask * (~outliermask)
# assert np.sum(bkgmask) > 10
bkgcov = np.cov(sed[new_bkgmask].T)
else:
bkgcov = better_bkgcov
#
if galactic_integrated:
# bkgcov = better_bkgcov
bkgcov = np.zeros([nwl, nwl]) # Power law approximation
if input_avg_SED:
sed_avg = np.array(avg_SED)
else:
sed_avg = np.array([np.mean(sed[:, :, i][diskmask]) for i in
range(nwl)])
sed = sed_avg.reshape(1, 1, nwl)
diskmask = np.ones([1, 1]).astype(int)
list_shape = [1, 1]
"""
spire500_beamsize = 1804.31
ctr = np.array(list_shape) // 2
#
num_pix_inte = np.sum(diskmask)
sed_avg = np.array([np.mean(sed[:, :, i][diskmask]) for i in
range(nwl)])
sed = sed_avg.reshape(1, 1, nwl)
diskmask = np.ones([1, 1]).astype(int)
list_shape = [1, 1]
#
ps = np.zeros(2)
w = WCS(hdr, naxis=2)
xs, ys = \
w.wcs_pix2world([ctr[0] - 1, ctr[0] + 1, ctr[0], ctr[0]],
[ctr[1], ctr[1], ctr[1] - 1, ctr[1] + 1], 1)
ps[0] = np.abs(xs[0] - xs[1]) / 2 * \
np.cos(Angle((ys[0] + ys[1]) * u.deg).rad / 2)
ps[1] = np.abs(ys[3] - ys[2]) / 2
ps *= u.degree.to(u.arcsec)
ps = np.mean(ps)
resolution_element = np.pi * (FWHM['SPIRE_500'] / 2)**2 / ps**2
num_res = num_pix_inte / resolution_element
if num_res > 1:
bkgcov /= num_res
"""
#
""" Voronoi binning """
#
if Voronoi:
assert False
#
""" Build or load SED models """
# Should be a for loop or something like that here
if import_beta:
modelss = {}
for be in beta_unique:
models = []
if del_model:
for b in bands:
fn = 'models/' + b + '_' + method_abbr + '.beta=' + \
str(round(be, 2)) + '.fits.gz'
if os.path.isfile(fn):
os.remove(fn)
for b in bands:
fn = 'models/' + b + '_' + method_abbr + '.beta=' + \
str(round(be, 2)) + '.fits.gz'
if not os.path.isfile(fn):
if method_abbr in ['SE']:
filelist = os.listdir('models')
new_fn = ''
for f in filelist:
temp = f.split('_')
if len(temp) > 1:
if (temp[0] == b) and \
(temp[1][:2] == method_abbr):
new_fn = f
break
if new_fn == '':
models_creation(method_abbr, be, lambdac_f,
band_instr[b], kappa160s[be], nop)
else:
fn = new_fn
else:
models_creation(method_abbr, be, lambdac_f,
band_instr[b], kappa160s[be], nop)
models.append(fits.getdata(fn))
models = np.array(models)
models = np.moveaxis(models, 0, -1)
modelss[be] = models
else:
models = []
if del_model:
for b in bands:
fn = 'models/' + b + '_' + method_abbr + '.beta=' + \
str(round(beta_f, 2)) + '.fits.gz'
if os.path.isfile(fn):
os.remove(fn)
for b in bands:
fn = 'models/' + b + '_' + method_abbr + '.beta=' + \
str(round(beta_f, 2)) + '.fits.gz'
if not os.path.isfile(fn):
if method_abbr in ['SE']:
filelist = os.listdir('models')
new_fn = ''
for f in filelist:
temp = f.split('_')
if len(temp) > 1:
if (temp[0] == b) and (temp[1][:2] == method_abbr):
new_fn = f
break
if new_fn == '':
models_creation(method_abbr, beta_f, lambdac_f,
band_instr[b], kappa160, nop)
else:
fn = new_fn
else:
models_creation(method_abbr, beta_f, lambdac_f,
band_instr[b], kappa160, nop)
models.append(fits.getdata(fn))
models = np.array(models)
models = np.moveaxis(models, 0, -1)
#
""" Real fitting starts """
#
axis_id = axis_ids[method_abbr]
#
temp_log = np.full([3] + list_shape, np.nan)
temp_linear = np.full([2] + list_shape, np.nan)
recovered_sed = np.full([nwl] + list_shape, np.nan)
chi2_map = np.full(sed.shape[:2], np.nan)
v_map, v_min, v_max = {}, {}, {}
for p in parameters[method_abbr]:
v_map[p] = np.full_like(temp_log, np.nan) if v_prop[p][0] else \
np.full_like(temp_linear, np.nan)
v_min[p] = np.full(list_shape, np.nan)
v_max[p] = np.full(list_shape, np.nan)
del temp_log, temp_linear
#
if (method_abbr == 'PL') and galactic_integrated:
logSigmads, alphas, gammas, Umins = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['alpha'][1],
10**v_prop['gamma'][1],
10**v_prop['logUmin'][1])
logUbars = np.log10((1 - gammas) * Umins + gammas * Umins *
np.log(10**3 / Umins) / (1 - Umins / 10**3))
idx = np.argsort(logUbars.flatten())
logUbars_sorted = logUbars.flatten()[idx]
logUbar_map = np.full(list_shape, np.nan)
logUbar_min = np.full(list_shape, np.nan)
logUbar_max = np.full(list_shape, np.nan)
del logSigmads, alphas, gammas, Umins, logUbars
#
if save_pdfs:
v_pdf = {}
for p in parameters[method_abbr]:
v_pdf[p] = np.full([len(v_prop[p][1])] + list_shape, np.nan)
if rand_cube:
v_real = {}
for p in parameters[method_abbr]:
v_real[p] = np.full([randcubesize] + list_shape, np.nan)
#
# Pre fitting variable definitions
#
if Voronoi:
pass # haha...
else:
steps = np.arange(diskmask.size)[diskmask.flatten() == 1]
total_steps = len(steps)
def mp_fitting(mpid, mp_var, mp_pdf, mp_rec_sed, mp_chi2, mp_realcube,
mp_logUbar):
qi = int(total_steps * mpid / nop)
qf = int(total_steps * (mpid + 1) / nop)
progress = 0.0
for q in range(qi, qf):
if (q - qi) / (qf - qi) > progress:
print(' --mpid', mpid, 'at', str(int(progress * 100)) +
'% (' + time.ctime() + ')')
progress += 0.1
k = steps[q]
if Voronoi:
pass # haha...
else:
i, j = np.unravel_index(k, list_shape)
#
""" Calculate covariance matrix """
#
sed_vec = sed[i, j].reshape(1, nwl)
calcov = sed_vec.T * cali_mat2 * sed_vec
cov_n1 = np.linalg.inv(bkgcov + calcov)
#
""" Calculate chi^2 values """
#
if import_beta:
diff = modelss[beta_in[i, j]] - sed[i, j]
else:
diff = models - sed[i, j]
shape0 = list(diff.shape)[:-1]
shape1 = shape0 + [1, nwl]
shape2 = shape0 + [nwl, 1]
chi2 = np.matmul(np.matmul(diff.reshape(shape1), cov_n1),
diff.reshape(shape2)).reshape(shape0)
if np.any(chi2 < 0):
mask = chi2 < 0
print(mpid, 'chi2 < 0!!!\n',
mpid, 'chi2 =', chi2[mask][0], '\n',
mpid, 'diff =', diff[mask][0], '\n',
mpid, 'model =', models[mask][0], '\n',
mpid, 'SED =', sed[i, j], '\n',
mpid, 'cov_n1 =', cov_n1)
pr = np.exp(-0.5 * chi2)
#
""" Save fitting results """
#
_vars = []
_pdfs = []
for p in parameters[method_abbr]:
temp_pdf = normalize_pdf(pr, axis_id[p])
_vars.append(best_fit_and_error(v_prop[p][1], temp_pdf,
islog=v_prop[p][0],
minmax=True))
if save_pdfs:
_pdfs.append(temp_pdf)
mp_var[q] = _vars
""" Save Ubar """
if (method_abbr == 'PL') and galactic_integrated:
pr_sorted = pr.flatten()[idx] / np.sum(pr)
# exp
pexp = np.sum(logUbars_sorted * pr_sorted)
# 1684
csp = np.cumsum(pr_sorted)
csp = csp / csp[-1]
p16, p84 = np.interp([0.16, 0.84], csp, logUbars_sorted)
_logubar = [pexp, p16, p84]
mp_logUbar[q] = _logubar
del pr_sorted
""" Save PDFs """
if save_pdfs:
mp_pdf[q] = _pdfs
""" Save randomly chosen randcubesize points from cube """
if rand_cube:
# realization pool
rs = np.random.choice(np.arange(chi2.size), randcubesize,
replace=True,
p=pr.flatten() / np.sum(pr))
npara = len(parameters[method_abbr])
_randcube = np.full([npara, randcubesize], np.nan)
for ri in range(randcubesize):
coor = np.unravel_index(rs[ri], chi2.shape)
for pi in range(npara):
p = parameters[method_abbr][pi]
_randcube[pi, ri] = v_prop[p][1][coor[axis_id[p]]]
""" Save the cube """
mp_realcube[q] = _randcube
#
""" Recover SED from best fit. Save SED and chi^2 values """
#
if import_beta:
if method_abbr == 'SE':
rec_sed = SEMBB(wl, _vars[0][0], _vars[1][0], _vars[2][0],
kappa160=kappa160s[beta_in[i, j]])
elif method_abbr == 'FB':
rec_sed = SEMBB(wl, _vars[0][0], _vars[1][0],
beta_in[i, j],
kappa160=kappa160s[beta_in[i, j]])
elif method_abbr == 'BE':
rec_sed = BEMBB(wl, _vars[0][0], _vars[1][0],
beta_in[i, j], lambdac_f, _vars[2][0],
kappa160=kappa160s[beta_in[i, j]])
elif method_abbr == 'WD':
rec_sed = WD(wl, _vars[0][0], _vars[1][0], beta_in[i, j],
_vars[2][0],
kappa160=kappa160s[beta_in[i, j]])
elif method_abbr == 'PL':
rec_sed = PowerLaw(wl, _vars[0][0], _vars[1][0],
_vars[2][0], _vars[3][0],
beta=beta_in[i, j],
kappa160=kappa160s[beta_in[i, j]])
else:
if method_abbr == 'SE':
rec_sed = SEMBB(wl, _vars[0][0], _vars[1][0], _vars[2][0],
kappa160=kappa160)
elif method_abbr == 'FB':
rec_sed = SEMBB(wl, _vars[0][0], _vars[1][0], beta_f,
kappa160=kappa160)
elif method_abbr == 'BE':
rec_sed = BEMBB(wl, _vars[0][0], _vars[1][0], beta_f,
lambdac_f, _vars[2][0], kappa160=kappa160)
elif method_abbr == 'WD':
rec_sed = WD(wl, _vars[0][0], _vars[1][0], beta_f,
_vars[2][0], kappa160=kappa160)
elif method_abbr == 'PL':
rec_sed = PowerLaw(wl, _vars[0][0], _vars[1][0],
_vars[2][0], _vars[3][0], beta=beta_f,
kappa160=kappa160)
mp_rec_sed[q] = rec_sed
diff = rec_sed - sed[i, j]
shape1 = [1, nwl]
shape2 = [nwl, 1]
chi2 = np.matmul(np.matmul(diff.reshape(shape1), cov_n1),
diff.reshape(shape2)).reshape(1)
mp_chi2[q] = chi2
print("Start fitting", longname, "dust surface density... (" +
time.ctime() + ')')
print("Total steps:", total_steps)
print("Total number of cores:", nop)
mp_chi2 = mp.Manager().list([0.] * total_steps)
mp_logUbar = mp.Manager().list([0., 0., 0.] * total_steps)
mp_rec_sed = mp.Manager().list([[0., 0., 0., 0., 0.]] * total_steps)
mp_var = mp.Manager().list([[0., 0., 0.]] * total_steps)
mp_pdf = mp.Manager().list([[0., 0., 0.]] * total_steps) \
if save_pdfs else -1
mp_realcube = mp.Manager().list([[0., 0., 0.]] * total_steps) \
if rand_cube else -1
processes = [mp.Process(target=mp_fitting,
args=(mpid, mp_var, mp_pdf, mp_rec_sed, mp_chi2,
mp_realcube, mp_logUbar))
for mpid in range(nop)]
for p in processes:
p.start()
for p in processes:
p.join()
del processes
print("Fitting finished. Start loading results from cores.")
progress = 0
for q in range(total_steps):
if (q + 1) / total_steps > progress:
print(' --', str(int(progress * 100)) +
'% (' + time.ctime() + ')')
progress += 0.1
k = steps[q]
i, j = np.unravel_index(k, diskmask.shape)
rid = 0
for p in parameters[method_abbr]:
v_map[p][0, i, j] = mp_var[q][rid][0]
v_min[p][i, j] = mp_var[q][rid][1]
v_max[p][i, j] = mp_var[q][rid][2]
rid += 1
#
""" Save Ubar """
if (method_abbr == 'PL') and galactic_integrated:
logUbar_map[i, j] = mp_logUbar[q][0]
logUbar_min[i, j] = mp_logUbar[q][1]
logUbar_max[i, j] = mp_logUbar[q][2]
#
""" Save PDFs """
#
rid = 0
if save_pdfs:
for p in parameters[method_abbr]:
v_pdf[p][:, i, j] = mp_pdf[q][rid]
rid += 1
#
""" Save the randomly selected cube """
#
rid = 0
if rand_cube:
for p in parameters[method_abbr]:
v_real[p][:, i, j] = mp_realcube[q][rid]
rid += 1
#
""" Recover SED from best fit. Save SED and chi^2 values """
#
recovered_sed[:, i, j] = mp_rec_sed[q]
chi2_map[i, j] = mp_chi2[q]
# build error map
for p in parameters[method_abbr]:
if v_prop[p][0]: # log case
v_map[p][1] = np.max([np.log10(v_max[p]) - np.log10(v_map[p][0]),
np.log10(v_map[p][0]) - np.log10(v_min[p])],
axis=0)
v_map[p][2] = np.max([v_max[p] - v_map[p][0],
v_map[p][0] - v_min[p]], axis=0)
else: # linear case
v_map[p][1] = np.max([v_max[p] - v_map[p][0],
v_map[p][0] - v_min[p]], axis=0)
print("Loading finished.")
#
outputpath = 'Projects/' + project_name + '/'
if not os.path.isdir(outputpath):
os.mkdir(outputpath)
outputpath += name + '/'
if not os.path.isdir(outputpath):
os.mkdir(outputpath)
if subdir is not None:
outputpath += subdir + '/'
if not os.path.isdir(outputpath):
os.mkdir(outputpath)
#
fnhead = outputpath + name + '_'
fnend = '_' + method_abbr + '.beta=' + betastr + '_' + notes + '.fits'
os.system('find ' + outputpath + ' -name "*.gz" -delete')
os.system('find ' + outputpath + ' -name "*.png" -delete')
#
if galactic_integrated:
df = pd.DataFrame()
for bi in range(nwl):
df[bands[bi]] = [sed[0, 0, bi]]
for p in parameters[method_abbr]:
df[p] = [v_map[p][0, 0, 0]]
df[p + '.err'] = [v_map[p][1, 0, 0]]
df[p + '.max'] = [v_max[p][0, 0]]
df[p + '.min'] = [v_min[p][0, 0]]
if method_abbr == 'PL':
df['Tbar'] = 18 * 10**(logUbar_map[0, 0] / (4 + beta_f))
df['Tbar.max'] = 18 * 10**(logUbar_max[0, 0] / (4 + beta_f))
df['Tbar.min'] = 18 * 10**(logUbar_min[0, 0] / (4 + beta_f))
fn = fnhead + 'integrated.csv'
df.to_csv(fn, index=False)
#
for p in parameters[method_abbr]:
if save_pdfs:
hdr2 = hdr.copy()
hdr2['NAXIS'] = 3
hdr2['NAXIS3'] = v_pdf[p].shape[0]
hdr2['BUNIT'] = ''
t1, t2, t3 = round(grid_para[p][0], 1), \
round(grid_para[p][1], 1), round(grid_para[p][2], 3)
hdr2['PDF_MIN'] = t1
hdr2['PDF_MAX'] = t2
hdr2.comments['PDF_MAX'] = 'EXCLUSIVE'
hdr2['PDF_STEP'] = t3
if v_prop[p][0]: # log case
hdr2.comments['PDF_MIN'] = 'IN LOG.'
hdr2.comments['PDF_MAX'] = 'IN LOG. EXCLUSIVE.'
hdr2.comments['PDF_STEP'] = 'IN LOG. EXCLUSIVE.'
hdr2['PDFARRAY'] = '10**np.arange(' + str(t1) + ', ' + \
str(t2) + ', ' + str(t3) + ')'
else: # linear case
hdr2.comments['PDF_MIN'] = 'LINEAR.'
hdr2.comments['PDF_MAX'] = 'LINEAR. EXCLUSIVE.'
hdr2.comments['PDF_STEP'] = 'LINEAR. EXCLUSIVE.'
hdr2['PDFARRAY'] = 'np.arange(' + str(t1) + ', ' + \
str(t2) + ', ' + str(t3) + ')'
fn = fnhead + p + '.pdf' + fnend
save_fits_gz(fn, v_pdf[p], hdr2)
#
if rand_cube:
hdr2 = hdr.copy()
hdr2['NAXIS'] = 3
hdr2['NAXIS3'] = randcubesize
hdr2['BUNIT'] = v_prop[p][2][0]
fn = fnhead + p + '.rlcube' + fnend
if v_prop[p][0]: # log case
with np.errstate(invalid='ignore'):
save_fits_gz(fn, 10**v_real[p], hdr2)
else: # linear case
save_fits_gz(fn, v_real[p], hdr2)
else:
for p in parameters[method_abbr]:
hdr2 = hdr.copy()
hdr2['NAXIS'] = 3
hdr2['NAXIS3'] = 3 if v_prop[p][0] else 2
hdr2['PLANE0'] = 'Expectation value (linear)'
if v_prop[p][0]: # log case
hdr2['PLANE1'] = 'Error map (dex)'
hdr2['PLANE2'] = 'Error map (linear)'
else: # linear case
hdr2['PLANE1'] = 'Error map'
hdr2['BUNIT'] = v_prop[p][2][0]
fn = fnhead + p + fnend
save_fits_gz(fn, v_map[p], hdr2)
#
hdr2 = hdr.copy()
hdr2['NAXIS'] = 2
hdr2['PLANE0'] = 'Maximum possible value (linear)'
hdr2['BUNIT'] = v_prop[p][2][0]
fn = fnhead + p + '.max' + fnend
save_fits_gz(fn, v_max[p], hdr2)
hdr2['PLANE0'] = 'Minimum possible value (linear)'
fn = fnhead + p + '.min' + fnend
save_fits_gz(fn, v_min[p], hdr2)
#
if save_pdfs:
hdr2 = hdr.copy()
hdr2['NAXIS'] = 3
hdr2['NAXIS3'] = v_pdf[p].shape[0]
hdr2['BUNIT'] = ''
t1, t2, t3 = round(grid_para[p][0], 1), \
round(grid_para[p][1], 1), round(grid_para[p][2], 3)
hdr2['PDF_MIN'] = t1
hdr2['PDF_MAX'] = t2
hdr2.comments['PDF_MAX'] = 'EXCLUSIVE'
hdr2['PDF_STEP'] = t3
if v_prop[p][0]: # log case
hdr2.comments['PDF_MIN'] = 'IN LOG.'
hdr2.comments['PDF_MAX'] = 'IN LOG. EXCLUSIVE.'
hdr2.comments['PDF_STEP'] = 'IN LOG. EXCLUSIVE.'
hdr2['PDFARRAY'] = '10**np.arange(' + str(t1) + ', ' + \
str(t2) + ', ' + str(t3) + ')'
else: # linear case
hdr2.comments['PDF_MIN'] = 'LINEAR.'
hdr2.comments['PDF_MAX'] = 'LINEAR. EXCLUSIVE.'
hdr2.comments['PDF_STEP'] = 'LINEAR. EXCLUSIVE.'
hdr2['PDFARRAY'] = 'np.arange(' + str(t1) + ', ' + \
str(t2) + ', ' + str(t3) + ')'
fn = fnhead + p + '.pdf' + fnend
save_fits_gz(fn, v_pdf[p], hdr2)
#
if rand_cube:
hdr2 = hdr.copy()
hdr2['NAXIS'] = 3
hdr2['NAXIS3'] = randcubesize
hdr2['BUNIT'] = v_prop[p][2][0]
fn = fnhead + p + '.rlcube' + fnend
if v_prop[p][0]: # log case
with np.errstate(invalid='ignore'):
save_fits_gz(fn, 10**v_real[p], hdr2)
else: # linear case
save_fits_gz(fn, v_real[p], hdr2)
#
hdr2 = hdr.copy()
hdr2['NAXIS'] = 2
hdr2['PLANE0'] = 'chi-2 map'
hdr2['BUNIT'] = ''
fn = fnhead + 'chi2' + fnend
save_fits_gz(fn, chi2_map, hdr2)
#
hdr2 = hdr.copy()
hdr2['NAXIS'] = 2
hdr2['PLANE0'] = 'bkg mask'
hdr2['BUNIT'] = ''
fn = fnhead + 'bkgmask' + fnend
save_fits_gz(fn, bkgmask.astype(int), hdr2)
#
print(longname, "Datasets saved.")
def models_creation(method_abbr, beta_f, lambdac_f, instr, kappa160, nop):
if instr == 'spire':
bands = ['spire250', 'spire350', 'spire500']
elif instr == 'pacs':
bands = ['pacs100', 'pacs160']
elif instr == 'mips':
bands = ['mips160']
betas, lambdacs = beta_f, lambdac_f
Tds, beta2s, wdfracs, alphas, loggammas, logUmins = 0, 0, 0, 0, 0, 0
if method_abbr == 'PL':
logSigmads, alphas, loggammas, logUmins = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['alpha'][1],
v_prop['gamma'][1],
v_prop['logUmin'][1])
def fitting_model(wl):
return PowerLaw(wl, 10**logSigmads, alphas, 10**loggammas,
logUmins, beta=betas, kappa160=kappa160)
elif method_abbr == 'SE':
logSigmads, Tds, betas = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['dust.temperature'][1],
v_prop['beta'][1])
def fitting_model(wl):
return SEMBB(wl, 10**logSigmads, Tds, betas,
kappa160=kappa160)
elif method_abbr == 'FB':
logSigmads, Tds = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['dust.temperature'][1])
def fitting_model(wl):
return SEMBB(wl, 10**logSigmads, Tds, betas,
kappa160=kappa160)
elif method_abbr == 'BE':
logSigmads, Tds, beta2s = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['dust.temperature'][1],
v_prop['beta2'][1])
def fitting_model(wl):
return BEMBB(wl, 10**logSigmads, Tds, betas, lambdacs, beta2s,
kappa160=kappa160)
elif method_abbr == 'WD':
logSigmads, Tds, wdfracs = \
np.meshgrid(v_prop['dust.surface.density'][1],
v_prop['dust.temperature'][1],
v_prop['warm.dust.fraction'][1])
def fitting_model(wl):
return WD(wl, 10**logSigmads, Tds, betas, wdfracs,
kappa160=kappa160)
print(" --Constructing", instr, "RSRF model... (" + time.ctime() + ")")
_rsrf = pd.read_csv("data/RSRF/" + instr + "_rsrf.csv")
_wl = _rsrf['wavelength'].values
def mp_models_creation(mpid, mp_model, _wl):
qi = int(len(_wl) * mpid / nop)
qf = int(len(_wl) * (mpid + 1) / nop)
progress = 0.0
for q in range(qi, qf):
if (q - qi) / (qf - qi) > progress:
print(' --mpid', mpid, 'at', str(int(progress * 100)) +
'% (' + time.ctime() + ')')
progress += 0.1
w = _wl[q]
mp_model[q] = fitting_model(w)
mp_model = mp.Manager().list([0.] * len(_wl))
processes = [mp.Process(target=mp_models_creation,
args=(mpid, mp_model, _wl)) for mpid in range(nop)]
for p in processes:
p.start()
for p in processes:
p.join()
del processes
#
print("Fitting finished. Start loading results from cores.")
h_models = np.zeros(list(logSigmads.shape) + [len(_wl)])
for q in range(len(_wl)):
progress = 0.0
if (q + 1) / len(_wl) > progress:
print(' --', str(int(progress * 100)) +
'% (' + time.ctime() + ')')
progress += 0.1
h_models[..., q] = mp_model[q]
del mp_model
#
for b in bands:
print("Calculating", b, "RSRF.")
rsps = _rsrf[b].values
models = \
np.sum(h_models * rsps, axis=-1) / \
np.sum(rsps * _wl / band_wl[b])
fn = 'models/' + b + '_' + method_abbr + '.beta=' + \
str(round(beta_f, 2)) + '.fits'
save_fits_gz(fn, models, None)
print("Models saved.")
def kappa_calibration(method_abbr, beta_f, lambdac_f=300.0,
nop=6, quiet=True):
assert False # Haven't corrected the variables yet
MWSED = np.array([0.71, 1.53, 1.08, 0.56, 0.25]) * 0.97
bands_small = np.array(['pacs100', 'pacs160', 'spire250', 'spire350',
'spire500'])
nwl = len(bands_small)
wl = np.array(band_wl[b] for b in bands_small)
bands = np.array(band_cap[b] for b in bands_small)
# Correct mode should be 100-Sum_square with Fixen values
print('################################################')
print(' Calibrating ' + method_abbr + ' (' + time.ctime() + ')')
print('################################################')
logSigmad_step = 0.025
logSigmad_min = -4.
logSigmad_max = 1.
Td_step = 0.5
Td_min = 5.
Td_max = 50.
beta_step = 0.1
beta_min = -1.0
beta_max = 4.0
beta2_step = 0.25
beta2_min = -1.0
beta2_max = 4.0
wdfrac_step = 0.002
wdfrac_min = 0.0
wdfrac_max = 0.05
alpha_step = 0.1 # Remember to avoid alpha==1
alpha_min = 1.1
alpha_max = 3.0
loggamma_step = 0.2
loggamma_min = -4
loggamma_max = 0
logUmin_step = 0.1
logUmin_min = -2.
logUmin_max = 1.5
# Due to memory limit
if method_abbr == 'PL':
logSigmad_min = -2.
logSigmad_max = 0.
logUmin_min = -1.
logUmin_max = 0.
loggamma_max = -1.
#
# MW measurement dataset
#
DCOU = 10.0 / 100.0
DUNU = 1.0 / 100.0
FCOU = 2.0 / 100.0
FUNU = 0.5 / 100.0
MWcali_mat2 = np.array([[DUNU + DCOU, 0, 0, 0, 0],
[0, FCOU + FUNU, FCOU, FCOU, FCOU],
[0, FCOU, FCOU + FUNU, FCOU, FCOU],
[0, FCOU, FCOU, FCOU + FUNU, FCOU],
[0, FCOU, FCOU, FCOU, FCOU + FUNU]])**2
MWSigmaD = (1e20 * 1.0079 * u.g / N_A.value).to(u.M_sun).value * \
((1 * u.pc).to(u.cm).value)**2 / 150.
#
# Build fitting grid
#
for iter_ in range(2):
logSigmads_1d = np.arange(logSigmad_min, logSigmad_max, logSigmad_step)
betas = beta_f
if method_abbr == 'PL':
alphas_1d = np.arange(alpha_min, alpha_max, alpha_step)
logUmins_1d = np.arange(logUmin_min, logUmin_max, logUmin_step)
loggammas_1d = np.arange(loggamma_min, loggamma_max, loggamma_step)
logSigmads, alphas, loggammas, logUmins = \
np.meshgrid(logSigmads_1d, alphas_1d, loggammas_1d,
logUmins_1d)
else:
Tds_1d = np.arange(Td_min, Td_max, Td_step)
if method_abbr == 'SE':
betas_1d = np.arange(beta_min, beta_max, beta_step)
logSigmads, Tds, betas = np.meshgrid(logSigmads_1d, Tds_1d,
betas_1d)
elif method_abbr == 'FB':
logSigmads, Tds = np.meshgrid(logSigmads_1d, Tds_1d)
elif method_abbr == 'BE':
beta2s_1d = np.arange(beta2_min, beta2_max, beta2_step)
logSigmads, Tds, beta2s = \
np.meshgrid(logSigmads_1d, Tds_1d, beta2s_1d)
lambdacs = np.full(Tds.shape, lambdac_f)
elif method_abbr == 'WD':
wdfracs_1d = np.arange(wdfrac_min, wdfrac_max, wdfrac_step)
logSigmads, Tds, wdfracs = np.meshgrid(logSigmads_1d, Tds_1d,
wdfracs_1d)
sigmas = 10**logSigmads
#
# Build models
#
models = np.zeros(list(logSigmads.shape) + [nwl])
# Applying RSRFs to generate fake-observed models
if method_abbr in ['BEMFB', 'BE']:
def fitting_model(wl):
return BEMBB(wl, sigmas, Tds, betas, lambdacs, beta2s,
kappa160=1.)
elif method_abbr in ['WD']:
def fitting_model(wl):
return WD(wl, sigmas, Tds, betas, wdfracs,
kappa160=1.)
elif method_abbr in ['PL']:
def fitting_model(wl):
return PowerLaw(wl, sigmas, alphas, 10**loggammas,
logUmins, beta=betas, kappa160=1.)
else:
def fitting_model(wl):
return SEMBB(wl, sigmas, Tds, betas,
kappa160=1.)
def split_herschel(ri, r_, rounds, _wl, wlr, output):
tic = time.clock()
rw = ri + r_ * nop
lenwls = wlr[rw + 1] - wlr[rw]
last_time = time.clock()
result = np.zeros(list(logSigmads.shape) + [lenwls])
if not quiet:
print(" --process", ri, "starts... (" + time.ctime() +
") (round", (r_ + 1), "of", str(rounds) + ")")
for i in range(lenwls):
result[..., i] = fitting_model(_wl[i + wlr[rw]])
current_time = time.clock()
# print progress every 10 mins
if (current_time > last_time + 600.) and (not quiet):
last_time = current_time
print(" --process", ri,
str(round(100. * (i + 1) / lenwls, 1)) +
"% Done. (round", (r_ + 1), "of", str(rounds) + ")")
output.put((ri, rw, result))
if not quiet:
print(" --process", ri, "Done. Elapsed time:",
round(time.clock()-tic, 3), "s. (" + time.ctime() + ")")
models = np.zeros(list(logSigmads.shape) + [nwl])
timeout = 1e-6
# Applying RSRFs to generate fake-observed models
instrs = ['PACS', 'SPIRE']
parallel_rounds = {'SE': 3, 'FB': 1, 'BE': 3, 'WD': 3, 'PL': 12}
rounds = parallel_rounds[method_abbr]
for instr in range(2):
if not quiet:
print(" --Constructing", instrs[instr], "RSRF model... (" +
time.ctime() + ")")
ttic = time.clock()
_rsrf = pd.read_csv("data/RSRF/" + instrs[instr] + "_RSRF.csv")
_wl = _rsrf['Wavelength'].values
h_models = np.zeros(list(logSigmads.shape) + [len(_wl)])
wlr = [int(ri * len(_wl) / float(nop * rounds)) for ri in
range(nop * rounds + 1)]
if instr == 0:
rsps = [_rsrf['PACS_100'].values,
_rsrf['PACS_160'].values]
range_ = range(0, 2)
elif instr == 1:
rsps = [[], [], _rsrf['SPIRE_250'].values,
_rsrf['SPIRE_350'].values,
_rsrf['SPIRE_500'].values]
range_ = range(2, 5)
del _rsrf
# Parallel code
for r_ in range(rounds):
if not quiet:
print("\n --" + method_abbr, instrs[instr] + ":Round",
(r_ + 1), "of", rounds, '\n')
q = mp.Queue()
processes = [mp.Process(target=split_herschel,
args=(ri, r_, rounds, _wl, wlr, q))
for ri in range(nop)]
for p in processes:
p.start()
for p in processes:
p.join(timeout)
for p in processes:
ri, rw, result = q.get()
if not quiet:
print(" --Got result from process", ri)
h_models[..., wlr[rw]:wlr[rw+1]] = result
del ri, rw, result
del processes, q, p
# Parallel code ends
if not quiet:
print(" --Calculating response function integrals")
for i in range_:
models[..., i] = \
np.sum(h_models * rsps[i], axis=-1) / \
np.sum(rsps[i] * _wl / wl[i])
del _wl, rsps, h_models, range_
if not quiet:
print(" --Done. Elapsed time:", round(time.clock()-ttic, 3),
"s.\n")
#
# Start fitting
#
tic = time.clock()
temp_matrix = np.empty_like(models)
diff = models - MWSED
sed_vec = MWSED.reshape(1, 5)
yerr = MWSED * np.sqrt(np.diagonal(MWcali_mat2))
cov_n1 = np.linalg.inv(sed_vec.T * MWcali_mat2 * sed_vec)
for j in range(nwl):
temp_matrix[..., j] = np.sum(diff * cov_n1[:, j], axis=-1)
chi2 = np.sum(temp_matrix * diff, axis=-1)
r_chi2 = chi2 / (nwl - ndims[method_abbr])
""" Find the (s, t) that gives Maximum likelihood """
am_idx = np.unravel_index(chi2.argmin(), chi2.shape)
""" Probability and mask """
mask = r_chi2 <= np.nanmin(r_chi2) + 50.
pr = np.exp(-0.5 * chi2)
print('\nIteration', str(iter_ + 1))
print('Best fit r_chi^2:', r_chi2[am_idx])
""" kappa 160 """
logkappa160s = logSigmads - np.log10(MWSigmaD)
logkappa160, logkappa160_err, _1, _2, _3, _4 = \
best_fit_and_error(logkappa160s, pr, 'logkappa_160')
kappa160 = 10**logkappa160
logSigmad, _, _1, _2, _3, _4 = \
best_fit_and_error(logSigmads, pr, 'logSigmads')
#
logSigmad_min = logSigmad - 0.2
logSigmad_max = logSigmad + 0.2
# All steps
logSigmad_step = 0.002
Td_step = 0.1
beta_step = 0.02
beta2_step = 0.02
wdfrac_step = 0.0005
alpha_step = 0.01 # Remember to avoid alpha==1
loggamma_step = 0.1
logUmin_step = 0.01
print('Best fit kappa160:', kappa160)
wl_complete = np.linspace(1, 1000, 1000)
#
fn = 'hdf5_MBBDust/Calibration_' + str(round(beta_f, 2)) + '.h5'
hf = h5py.File(fn, 'a')
try:
del hf[method_abbr]
except KeyError:
pass
grp = hf.create_group(method_abbr)
grp['kappa160'] = kappa160
grp['logkappa160'], grp['logkappa160_err'] = \
logkappa160, logkappa160_err
if method_abbr == 'SE':
samples = np.array([logkappa160s[mask], Tds[mask], betas[mask],
r_chi2[mask]])
labels = [r'$\log\kappa_{160}$', r'$T$', r'$\beta$',
r'$\tilde{\chi}^2$']
T, T_err, _1, _2, _3, _4 = \
best_fit_and_error(Tds, pr, 'T')
beta, beta_err, _1, _2, _3, _4 = \
best_fit_and_error(betas, pr, 'beta')
Td_min = T - 1.5
Td_max = T + 1.5
beta_min = beta - 0.3
beta_max = beta + 0.3
grp['T'], grp['T_err'] = T, T_err
grp['beta'], grp['beta_err'] = beta, beta_err
mode_integrated = \
z0mg_RSRF(wl_complete, SEMBB(wl_complete, MWSigmaD, T, beta,
kappa160=kappa160), bands)
model_complete = SEMBB(wl_complete, MWSigmaD, T, beta,
kappa160=kappa160)
gordon_integrated = \
z0mg_RSRF(wl_complete, SEMBB(wl_complete, MWSigmaD, 17.2, 1.96,
9.6 * np.pi), bands)
model_gordon = SEMBB(wl_complete, MWSigmaD, 17.2,
1.96, 9.6 * np.pi)
elif method_abbr == 'FB':
samples = np.array([logkappa160s[mask], Tds[mask], r_chi2[mask]])
labels = [r'$\log\kappa_{160}$', r'$T$', r'$\tilde{\chi}^2$']
T, T_err, _1, _2, _3, _4 = \
best_fit_and_error(Tds, pr, 'T')
Td_min = T - 1.5
Td_max = T + 1.5
grp['T'], grp['T_err'] = T, T_err
mode_integrated = \
z0mg_RSRF(wl_complete, SEMBB(wl_complete, MWSigmaD, T, beta_f,
kappa160=kappa160), bands)
model_complete = SEMBB(wl_complete, MWSigmaD, T, beta_f,
kappa160=kappa160)
gordon_integrated = \
z0mg_RSRF(wl_complete, SEMBB(wl_complete, MWSigmaD, 17.2, 1.96,
9.6 * np.pi), bands)
model_gordon = SEMBB(wl_complete, MWSigmaD, 17.2,
1.96, 9.6 * np.pi)
elif method_abbr == 'BE':
samples = np.array([logkappa160s[mask], Tds[mask], beta2s[mask],
r_chi2[mask]])
labels = [r'$\log\kappa_{160}$', r'$T$', r'$\beta_2$',
r'$\tilde{\chi}^2$']
T, T_err, _1, _2, _3, _4 = \
best_fit_and_error(Tds, pr, 'T')
beta2, beta2_err, _1, _2, _3, _4 = \
best_fit_and_error(beta2s, pr, 'beta2')
Td_min = T - 1.5
Td_max = T + 1.5
beta2_min = beta2 - 0.3
beta2_max = beta2 + 0.3
grp['T'], grp['T_err'] = T, T_err
grp['beta2'], grp['beta2_err'] = beta2, beta2_err
mode_integrated = \
z0mg_RSRF(wl_complete, BEMBB(wl_complete, MWSigmaD, T, beta_f,
lambdac_f, beta2,
kappa160=kappa160), bands)
model_complete = BEMBB(wl_complete, MWSigmaD, T, beta_f,
lambdac_f, beta2, kappa160=kappa160)
e500 = 0.48
gbeta2 = np.log(e500 + 1) / np.log(294. / 500.) + 2.27
gordon_integrated = \
z0mg_RSRF(wl_complete, BEMBB(wl_complete, MWSigmaD, 16.8, 2.27,
294, gbeta2,
11.6 * np.pi), bands)
model_gordon = BEMBB(wl_complete, MWSigmaD, 16.8, 2.27, 294,
gbeta2, 11.6 * np.pi)
elif method_abbr == 'WD':
samples = np.array([logkappa160s[mask], Tds[mask], wdfracs[mask],
r_chi2[mask]])
labels = [r'$\log\kappa_{160}$', r'$T$', r'wdfrac',
r'$\tilde{\chi}^2$']
T, T_err, _1, _2, _3, _4 = \
best_fit_and_error(Tds, pr, 'T')
wdfrac, wdfrac_err, _1, _2, _3, _4 = \
best_fit_and_error(wdfracs, pr, 'wdfrac')
Td_min = T - 1.5
Td_max = T + 1.5
wdfrac_min = 0.0
wdfrac_max = wdfrac + 0.006
grp['T'], grp['T_err'] = T, T_err
grp['wdfrac'], grp['wdfrac_err'] = wdfrac, wdfrac_err
mode_integrated = \
z0mg_RSRF(wl_complete, WD(wl_complete, MWSigmaD, T, beta_f,
wdfrac, kappa160=kappa160), bands)
model_complete = WD(wl_complete, MWSigmaD, T, beta_f, wdfrac,
kappa160=kappa160)
e500 = 0.91
nu500 = (c / 500 / u.um).to(u.Hz).value
gwdfrac = e500 * B_fast(15., nu500) / B_fast(6., nu500)
gordon_integrated = \
z0mg_RSRF(wl_complete, WD(wl_complete, MWSigmaD, 15., 2.9,
gwdfrac, kappa160=517. * np.pi,
WDT=6.), bands)
model_gordon = WD(wl_complete, MWSigmaD, 15., 2.9, gwdfrac,
kappa160=517. * np.pi, WDT=6.)
elif method_abbr == 'PL':
samples = np.array([logkappa160s[mask], loggammas[mask],
alphas[mask], logUmins[mask], r_chi2[mask]])
labels = [r'$\log\kappa_{160}$', r'$\log\gamma$', r'$\alpha$',
r'\log U_{min}', r'$\tilde{\chi}^2$']
loggamma, loggamma_err, _1, _2, _3, _4 = \
best_fit_and_error(loggammas, pr, 'loggamma')
alpha, alpha_err, _1, _2, _3, _4 = \
best_fit_and_error(alphas, pr, 'alpha')
logUmin, logUmin_err, _1, _2, _3, _4 = \
best_fit_and_error(logUmins, pr, 'logUmin')
alpha_min = max(alpha - 0.3, 1.1)
alpha_max = alpha + 0.3
loggamma_min = loggamma - 0.3
loggamma_max = min(loggamma + 0.3, 0.)
logUmin_min = logUmin - 0.1
logUmin_max = logUmin + 0.1
grp['loggamma'], grp['loggamma_err'] = loggamma, loggamma_err
grp['alpha'], grp['alpha_err'] = alpha, alpha_err
grp['logUmin'], grp['logUmin_err'] = logUmin, logUmin_err
mode_integrated = \
z0mg_RSRF(wl_complete, PowerLaw(wl_complete, MWSigmaD, alpha,
10**loggamma, logUmin,
beta=beta_f,
kappa160=kappa160), bands)
model_complete = PowerLaw(wl_complete, MWSigmaD, alpha,
10**loggamma, logUmin, beta=beta_f,
kappa160=kappa160)
gordon_integrated = \
z0mg_RSRF(wl_complete, SEMBB(wl_complete, MWSigmaD, 17.2, 1.96,
9.6 * np.pi), bands)
model_gordon = SEMBB(wl_complete, MWSigmaD, 17.2, 1.96,
9.6 * np.pi)
hf.close()
#
del samples, labels
"""
fig = corner(samples.T, labels=labels, quantities=(0.16, 0.84),
show_titles=True, title_kwargs={"fontsize": 12})
with PdfPages('output/_CALI_' + method_abbr + '.pdf') as pp:
pp.savefig(fig)
"""
fig, ax = plt.subplots(figsize=(10, 7.5))
ax.loglog(wl_complete, model_gordon, label='G14EXP')
ax.loglog(wl, mode_integrated, 'x', ms=15, label='fitting (int)')
ax.loglog(wl_complete, model_complete, label='fitting')
ax.errorbar(wl, MWSED, yerr, label='MWSED')
ax.loglog(wl, gordon_integrated, 'x', ms=15, label='G14 (int)')
ax.legend()
ax.set_ylim(0.03, 3.0)
ax.set_xlim(80, 1000)
ax.set_xlabel(r'SED [$MJy\,sr^{-1}\,(10^{20}(H\,Atom)\,cm^{-2})^{-1}$]')
ax.set_ylabel(r'Wavelength ($\mu m$)')
with PdfPages('output/_CALI_' + method_abbr + '_MODEL.pdf') as pp:
pp.savefig(fig)
print(" --Done. Elapsed time:", round(time.clock()-tic, 3), "s.")
| [
"numpy.log10",
"pandas.read_csv",
"time.clock",
"multiprocessing.Process",
"numpy.log",
"numpy.array",
"numpy.isfinite",
"numpy.cumsum",
"numpy.moveaxis",
"numpy.nanmin",
"numpy.cov",
"numpy.arange",
"os.remove",
"numpy.mean",
"time.ctime",
"os.listdir",
"numpy.full_like",
"numpy.m... | [((690, 700), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (698, 700), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3273), 'numpy.arange', 'np.arange', (['grid_para[p][0]', 'grid_para[p][1]', 'grid_para[p][2]'], {}), '(grid_para[p][0], grid_para[p][1], grid_para[p][2])\n', (3222, 3273), True, 'import numpy as np\n'), ((9057, 9079), 'numpy.array', 'np.array', (['bkgcov_check'], {}), '(bkgcov_check)\n', (9065, 9079), True, 'import numpy as np\n'), ((9124, 9145), 'numpy.array', 'np.array', (['bkgcov_good'], {}), '(bkgcov_good)\n', (9132, 9145), True, 'import numpy as np\n'), ((9159, 9193), 'numpy.empty', 'np.empty', (['[nwl, nwl]'], {'dtype': 'object'}), '([nwl, nwl], dtype=object)\n', (9167, 9193), True, 'import numpy as np\n'), ((10635, 10655), 'numpy.array', 'np.array', (['bkgcov_fit'], {}), '(bkgcov_fit)\n', (10643, 10655), True, 'import numpy as np\n'), ((10872, 10912), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nwl', 'nwl'], {'figsize': '(10, 10)'}), '(nwl, nwl, figsize=(10, 10))\n', (10884, 10912), True, 'import matplotlib.pyplot as plt\n'), ((11793, 11809), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11802, 11809), True, 'import matplotlib.pyplot as plt\n'), ((12471, 12505), 'astropy.io.fits.getdata', 'fits.getdata', (['mask_fn'], {'header': '(True)'}), '(mask_fn, header=True)\n', (12483, 12505), False, 'from astropy.io import fits\n'), ((12591, 12619), 'numpy.empty', 'np.empty', (['(list_shape + [nwl])'], {}), '(list_shape + [nwl])\n', (12599, 12619), True, 'import numpy as np\n'), ((12927, 12964), 'numpy.array', 'np.array', (['[band_wl[b] for b in bands]'], {}), '([band_wl[b] for b in bands])\n', (12935, 12964), True, 'import numpy as np\n'), ((13004, 13024), 'numpy.zeros', 'np.zeros', (['[nwl, nwl]'], {}), '([nwl, nwl])\n', (13012, 13024), True, 'import numpy as np\n'), ((20664, 20697), 'numpy.full', 'np.full', (['([3] + list_shape)', 'np.nan'], {}), '([3] + list_shape, np.nan)\n', (20671, 20697), True, 'import numpy as np\n'), ((20716, 20749), 'numpy.full', 'np.full', (['([2] + list_shape)', 'np.nan'], {}), '([2] + list_shape, np.nan)\n', (20723, 20749), True, 'import numpy as np\n'), ((20770, 20805), 'numpy.full', 'np.full', (['([nwl] + list_shape)', 'np.nan'], {}), '([nwl] + list_shape, np.nan)\n', (20777, 20805), True, 'import numpy as np\n'), ((20821, 20851), 'numpy.full', 'np.full', (['sed.shape[:2]', 'np.nan'], {}), '(sed.shape[:2], np.nan)\n', (20828, 20851), True, 'import numpy as np\n'), ((32201, 32258), 'os.system', 'os.system', (['(\'find \' + outputpath + \' -name "*.gz" -delete\')'], {}), '(\'find \' + outputpath + \' -name "*.gz" -delete\')\n', (32210, 32258), False, 'import os\n'), ((32263, 32321), 'os.system', 'os.system', (['(\'find \' + outputpath + \' -name "*.png" -delete\')'], {}), '(\'find \' + outputpath + \' -name "*.png" -delete\')\n', (32272, 32321), False, 'import os\n'), ((40433, 40480), 'pandas.read_csv', 'pd.read_csv', (["('data/RSRF/' + instr + '_rsrf.csv')"], {}), "('data/RSRF/' + instr + '_rsrf.csv')\n", (40444, 40480), True, 'import pandas as pd\n'), ((42297, 42365), 'numpy.array', 'np.array', (["['pacs100', 'pacs160', 'spire250', 'spire350', 'spire500']"], {}), "(['pacs100', 'pacs160', 'spire250', 'spire350', 'spire500'])\n", (42305, 42365), True, 'import numpy as np\n'), ((42430, 42471), 'numpy.array', 'np.array', (['(band_wl[b] for b in bands_small)'], {}), '(band_wl[b] for b in bands_small)\n', (42438, 42471), True, 'import numpy as np\n'), ((42484, 42526), 'numpy.array', 'np.array', (['(band_cap[b] for b in bands_small)'], {}), '(band_cap[b] for b in bands_small)\n', (42492, 42526), True, 'import numpy as np\n'), ((59791, 59822), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7.5)'}), '(figsize=(10, 7.5))\n', (59803, 59822), True, 'import matplotlib.pyplot as plt\n'), ((4887, 4906), 'os.listdir', 'os.listdir', (['respath'], {}), '(respath)\n', (4897, 4906), False, 'import os\n'), ((5339, 5360), 'numpy.all', 'np.all', (['masks'], {'axis': '(0)'}), '(masks, axis=0)\n', (5345, 5360), True, 'import numpy as np\n'), ((6346, 6365), 'os.listdir', 'os.listdir', (['respath'], {}), '(respath)\n', (6356, 6365), False, 'import os\n'), ((6718, 6732), 'numpy.array', 'np.array', (['seds'], {}), '(seds)\n', (6726, 6732), True, 'import numpy as np\n'), ((6893, 6927), 'numpy.zeros_like', 'np.zeros_like', (['bkgmask'], {'dtype': 'bool'}), '(bkgmask, dtype=bool)\n', (6906, 6927), True, 'import numpy as np\n'), ((7251, 7275), 'numpy.cov', 'np.cov', (['seds[:, bkgmask]'], {}), '(seds[:, bkgmask])\n', (7257, 7275), True, 'import numpy as np\n'), ((7670, 7710), 'astropy.io.fits.writeto', 'fits.writeto', (['fn', 'bkgcov'], {'overwrite': '(True)'}), '(fn, bkgcov, overwrite=True)\n', (7682, 7710), False, 'from astropy.io import fits\n'), ((8072, 8091), 'os.listdir', 'os.listdir', (['respath'], {}), '(respath)\n', (8082, 8091), False, 'import os\n'), ((8444, 8458), 'numpy.array', 'np.array', (['seds'], {}), '(seds)\n', (8452, 8458), True, 'import numpy as np\n'), ((8619, 8653), 'numpy.zeros_like', 'np.zeros_like', (['bkgmask'], {'dtype': 'bool'}), '(bkgmask, dtype=bool)\n', (8632, 8653), True, 'import numpy as np\n'), ((8977, 9001), 'numpy.cov', 'np.cov', (['seds[:, bkgmask]'], {}), '(seds[:, bkgmask])\n', (8983, 9001), True, 'import numpy as np\n'), ((9918, 9938), 'numpy.zeros', 'np.zeros', (['[nwl, nwl]'], {}), '([nwl, nwl])\n', (9926, 9938), True, 'import numpy as np\n'), ((12668, 12710), 'astropy.io.fits.getdata', 'fits.getdata', (['observe_fns[i]'], {'header': '(False)'}), '(observe_fns[i], header=False)\n', (12680, 12710), False, 'from astropy.io import fits\n'), ((12736, 12752), 'numpy.isfinite', 'np.isfinite', (['sed'], {}), '(sed)\n', (12747, 12752), True, 'import numpy as np\n'), ((13427, 13448), 'numpy.round_', 'np.round_', (['beta_in', '(2)'], {}), '(beta_in, 2)\n', (13436, 13448), True, 'import numpy as np\n'), ((13471, 13499), 'numpy.unique', 'np.unique', (['beta_in[diskmask]'], {}), '(beta_in[diskmask])\n', (13480, 13499), True, 'import numpy as np\n'), ((15443, 15477), 'numpy.zeros_like', 'np.zeros_like', (['bkgmask'], {'dtype': 'bool'}), '(bkgmask, dtype=bool)\n', (15456, 15477), True, 'import numpy as np\n'), ((15852, 15878), 'numpy.cov', 'np.cov', (['sed[new_bkgmask].T'], {}), '(sed[new_bkgmask].T)\n', (15858, 15878), True, 'import numpy as np\n'), ((16004, 16024), 'numpy.zeros', 'np.zeros', (['[nwl, nwl]'], {}), '([nwl, nwl])\n', (16012, 16024), True, 'import numpy as np\n'), ((20502, 20518), 'numpy.array', 'np.array', (['models'], {}), '(models)\n', (20510, 20518), True, 'import numpy as np\n'), ((20536, 20562), 'numpy.moveaxis', 'np.moveaxis', (['models', '(0)', '(-1)'], {}), '(models, 0, -1)\n', (20547, 20562), True, 'import numpy as np\n'), ((21065, 21092), 'numpy.full', 'np.full', (['list_shape', 'np.nan'], {}), '(list_shape, np.nan)\n', (21072, 21092), True, 'import numpy as np\n'), ((21112, 21139), 'numpy.full', 'np.full', (['list_shape', 'np.nan'], {}), '(list_shape, np.nan)\n', (21119, 21139), True, 'import numpy as np\n'), ((21288, 21412), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['alpha'][1]", "(10 ** v_prop['gamma'][1])", "(10 ** v_prop['logUmin'][1])"], {}), "(v_prop['dust.surface.density'][1], v_prop['alpha'][1], 10 **\n v_prop['gamma'][1], 10 ** v_prop['logUmin'][1])\n", (21299, 21412), True, 'import numpy as np\n'), ((21735, 21762), 'numpy.full', 'np.full', (['list_shape', 'np.nan'], {}), '(list_shape, np.nan)\n', (21742, 21762), True, 'import numpy as np\n'), ((21785, 21812), 'numpy.full', 'np.full', (['list_shape', 'np.nan'], {}), '(list_shape, np.nan)\n', (21792, 21812), True, 'import numpy as np\n'), ((21835, 21862), 'numpy.full', 'np.full', (['list_shape', 'np.nan'], {}), '(list_shape, np.nan)\n', (21842, 21862), True, 'import numpy as np\n'), ((29373, 29481), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'mp_fitting', 'args': '(mpid, mp_var, mp_pdf, mp_rec_sed, mp_chi2, mp_realcube, mp_logUbar)'}), '(target=mp_fitting, args=(mpid, mp_var, mp_pdf, mp_rec_sed,\n mp_chi2, mp_realcube, mp_logUbar))\n', (29383, 29481), True, 'import multiprocessing as mp\n'), ((30004, 30039), 'numpy.unravel_index', 'np.unravel_index', (['k', 'diskmask.shape'], {}), '(k, diskmask.shape)\n', (30020, 30039), True, 'import numpy as np\n'), ((31790, 31815), 'os.path.isdir', 'os.path.isdir', (['outputpath'], {}), '(outputpath)\n', (31803, 31815), False, 'import os\n'), ((31825, 31845), 'os.mkdir', 'os.mkdir', (['outputpath'], {}), '(outputpath)\n', (31833, 31845), False, 'import os\n'), ((31886, 31911), 'os.path.isdir', 'os.path.isdir', (['outputpath'], {}), '(outputpath)\n', (31899, 31911), False, 'import os\n'), ((31921, 31941), 'os.mkdir', 'os.mkdir', (['outputpath'], {}), '(outputpath)\n', (31929, 31941), False, 'import os\n'), ((32369, 32383), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (32381, 32383), True, 'import pandas as pd\n'), ((38582, 38695), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['alpha'][1]", "v_prop['gamma'][1]", "v_prop['logUmin'][1]"], {}), "(v_prop['dust.surface.density'][1], v_prop['alpha'][1], v_prop[\n 'gamma'][1], v_prop['logUmin'][1])\n", (38593, 38695), True, 'import numpy as np\n'), ((41045, 41110), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'mp_models_creation', 'args': '(mpid, mp_model, _wl)'}), '(target=mp_models_creation, args=(mpid, mp_model, _wl))\n', (41055, 41110), True, 'import multiprocessing as mp\n'), ((42231, 42271), 'numpy.array', 'np.array', (['[0.71, 1.53, 1.08, 0.56, 0.25]'], {}), '([0.71, 1.53, 1.08, 0.56, 0.25])\n', (42239, 42271), True, 'import numpy as np\n'), ((43663, 43852), 'numpy.array', 'np.array', (['[[DUNU + DCOU, 0, 0, 0, 0], [0, FCOU + FUNU, FCOU, FCOU, FCOU], [0, FCOU, \n FCOU + FUNU, FCOU, FCOU], [0, FCOU, FCOU, FCOU + FUNU, FCOU], [0, FCOU,\n FCOU, FCOU, FCOU + FUNU]]'], {}), '([[DUNU + DCOU, 0, 0, 0, 0], [0, FCOU + FUNU, FCOU, FCOU, FCOU], [0,\n FCOU, FCOU + FUNU, FCOU, FCOU], [0, FCOU, FCOU, FCOU + FUNU, FCOU], [0,\n FCOU, FCOU, FCOU, FCOU + FUNU]])\n', (43671, 43852), True, 'import numpy as np\n'), ((44165, 44220), 'numpy.arange', 'np.arange', (['logSigmad_min', 'logSigmad_max', 'logSigmad_step'], {}), '(logSigmad_min, logSigmad_max, logSigmad_step)\n', (44174, 44220), True, 'import numpy as np\n'), ((50276, 50288), 'time.clock', 'time.clock', ([], {}), '()\n', (50286, 50288), False, 'import time\n'), ((50311, 50332), 'numpy.empty_like', 'np.empty_like', (['models'], {}), '(models)\n', (50324, 50332), True, 'import numpy as np\n'), ((50475, 50523), 'numpy.linalg.inv', 'np.linalg.inv', (['(sed_vec.T * MWcali_mat2 * sed_vec)'], {}), '(sed_vec.T * MWcali_mat2 * sed_vec)\n', (50488, 50523), True, 'import numpy as np\n'), ((50639, 50674), 'numpy.sum', 'np.sum', (['(temp_matrix * diff)'], {'axis': '(-1)'}), '(temp_matrix * diff, axis=-1)\n', (50645, 50674), True, 'import numpy as np\n'), ((50948, 50967), 'numpy.exp', 'np.exp', (['(-0.5 * chi2)'], {}), '(-0.5 * chi2)\n', (50954, 50967), True, 'import numpy as np\n'), ((51827, 51853), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(1000)'], {}), '(1, 1000, 1000)\n', (51838, 51853), True, 'import numpy as np\n'), ((51950, 51968), 'h5py.File', 'h5py.File', (['fn', '"""a"""'], {}), "(fn, 'a')\n", (51959, 51968), False, 'import h5py\n'), ((60324, 60378), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (["('output/_CALI_' + method_abbr + '_MODEL.pdf')"], {}), "('output/_CALI_' + method_abbr + '_MODEL.pdf')\n", (60332, 60378), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((6208, 6231), 'os.path.isfile', 'os.path.isfile', (['mask_fn'], {}), '(mask_fn)\n', (6222, 6231), False, 'import os\n'), ((6762, 6779), 'numpy.isfinite', 'np.isfinite', (['seds'], {}), '(seds)\n', (6773, 6779), True, 'import numpy as np\n'), ((7038, 7060), 'numpy.median', 'np.median', (['AD[bkgmask]'], {}), '(AD[bkgmask])\n', (7047, 7060), True, 'import numpy as np\n'), ((7934, 7957), 'os.path.isfile', 'os.path.isfile', (['mask_fn'], {}), '(mask_fn)\n', (7948, 7957), False, 'import os\n'), ((8488, 8505), 'numpy.isfinite', 'np.isfinite', (['seds'], {}), '(seds)\n', (8499, 8505), True, 'import numpy as np\n'), ((8764, 8786), 'numpy.median', 'np.median', (['AD[bkgmask]'], {}), '(AD[bkgmask])\n', (8773, 8786), True, 'import numpy as np\n'), ((9275, 9311), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (9291, 9311), False, 'from sklearn.linear_model import LinearRegression\n'), ((10577, 10617), 'astropy.io.fits.writeto', 'fits.writeto', (['fn', 'bkgcov'], {'overwrite': '(True)'}), '(fn, bkgcov, overwrite=True)\n', (10589, 10617), False, 'from astropy.io import fits\n'), ((15592, 15614), 'numpy.median', 'np.median', (['AD[bkgmask]'], {}), '(AD[bkgmask])\n', (15601, 15614), True, 'import numpy as np\n'), ((16100, 16117), 'numpy.array', 'np.array', (['avg_SED'], {}), '(avg_SED)\n', (16108, 16117), True, 'import numpy as np\n'), ((19076, 19092), 'numpy.array', 'np.array', (['models'], {}), '(models)\n', (19084, 19092), True, 'import numpy as np\n'), ((19114, 19140), 'numpy.moveaxis', 'np.moveaxis', (['models', '(0)', '(-1)'], {}), '(models, 0, -1)\n', (19125, 19140), True, 'import numpy as np\n'), ((20946, 20976), 'numpy.full_like', 'np.full_like', (['temp_log', 'np.nan'], {}), '(temp_log, np.nan)\n', (20958, 20976), True, 'import numpy as np\n'), ((21012, 21045), 'numpy.full_like', 'np.full_like', (['temp_linear', 'np.nan'], {}), '(temp_linear, np.nan)\n', (21024, 21045), True, 'import numpy as np\n'), ((22181, 22225), 'numpy.full', 'np.full', (['([randcubesize] + list_shape)', 'np.nan'], {}), '([randcubesize] + list_shape, np.nan)\n', (22188, 22225), True, 'import numpy as np\n'), ((22343, 22367), 'numpy.arange', 'np.arange', (['diskmask.size'], {}), '(diskmask.size)\n', (22352, 22367), True, 'import numpy as np\n'), ((23231, 23261), 'numpy.linalg.inv', 'np.linalg.inv', (['(bkgcov + calcov)'], {}), '(bkgcov + calcov)\n', (23244, 23261), True, 'import numpy as np\n'), ((23752, 23768), 'numpy.any', 'np.any', (['(chi2 < 0)'], {}), '(chi2 < 0)\n', (23758, 23768), True, 'import numpy as np\n'), ((24146, 24165), 'numpy.exp', 'np.exp', (['(-0.5 * chi2)'], {}), '(-0.5 * chi2)\n', (24152, 24165), True, 'import numpy as np\n'), ((28932, 28944), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (28942, 28944), True, 'import multiprocessing as mp\n'), ((28987, 28999), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (28997, 28999), True, 'import multiprocessing as mp\n'), ((29050, 29062), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (29060, 29062), True, 'import multiprocessing as mp\n'), ((29119, 29131), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (29129, 29131), True, 'import multiprocessing as mp\n'), ((31438, 31502), 'numpy.max', 'np.max', (['[v_max[p] - v_map[p][0], v_map[p][0] - v_min[p]]'], {'axis': '(0)'}), '([v_max[p] - v_map[p][0], v_map[p][0] - v_min[p]], axis=0)\n', (31444, 31502), True, 'import numpy as np\n'), ((31592, 31656), 'numpy.max', 'np.max', (['[v_max[p] - v_map[p][0], v_map[p][0] - v_min[p]]'], {'axis': '(0)'}), '([v_max[p] - v_map[p][0], v_map[p][0] - v_min[p]], axis=0)\n', (31598, 31656), True, 'import numpy as np\n'), ((32019, 32044), 'os.path.isdir', 'os.path.isdir', (['outputpath'], {}), '(outputpath)\n', (32032, 32044), False, 'import os\n'), ((32058, 32078), 'os.mkdir', 'os.mkdir', (['outputpath'], {}), '(outputpath)\n', (32066, 32078), False, 'import os\n'), ((39012, 39113), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['dust.temperature'][1]", "v_prop['beta'][1]"], {}), "(v_prop['dust.surface.density'][1], v_prop['dust.temperature'][1\n ], v_prop['beta'][1])\n", (39023, 39113), True, 'import numpy as np\n'), ((40993, 41005), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (41003, 41005), True, 'import multiprocessing as mp\n'), ((41802, 41834), 'numpy.sum', 'np.sum', (['(h_models * rsps)'], {'axis': '(-1)'}), '(h_models * rsps, axis=-1)\n', (41808, 41834), True, 'import numpy as np\n'), ((41851, 41882), 'numpy.sum', 'np.sum', (['(rsps * _wl / band_wl[b])'], {}), '(rsps * _wl / band_wl[b])\n', (41857, 41882), True, 'import numpy as np\n'), ((44300, 44343), 'numpy.arange', 'np.arange', (['alpha_min', 'alpha_max', 'alpha_step'], {}), '(alpha_min, alpha_max, alpha_step)\n', (44309, 44343), True, 'import numpy as np\n'), ((44370, 44419), 'numpy.arange', 'np.arange', (['logUmin_min', 'logUmin_max', 'logUmin_step'], {}), '(logUmin_min, logUmin_max, logUmin_step)\n', (44379, 44419), True, 'import numpy as np\n'), ((44447, 44499), 'numpy.arange', 'np.arange', (['loggamma_min', 'loggamma_max', 'loggamma_step'], {}), '(loggamma_min, loggamma_max, loggamma_step)\n', (44456, 44499), True, 'import numpy as np\n'), ((44572, 44636), 'numpy.meshgrid', 'np.meshgrid', (['logSigmads_1d', 'alphas_1d', 'loggammas_1d', 'logUmins_1d'], {}), '(logSigmads_1d, alphas_1d, loggammas_1d, logUmins_1d)\n', (44583, 44636), True, 'import numpy as np\n'), ((44700, 44734), 'numpy.arange', 'np.arange', (['Td_min', 'Td_max', 'Td_step'], {}), '(Td_min, Td_max, Td_step)\n', (44709, 44734), True, 'import numpy as np\n'), ((44790, 44830), 'numpy.arange', 'np.arange', (['beta_min', 'beta_max', 'beta_step'], {}), '(beta_min, beta_max, beta_step)\n', (44799, 44830), True, 'import numpy as np\n'), ((44868, 44912), 'numpy.meshgrid', 'np.meshgrid', (['logSigmads_1d', 'Tds_1d', 'betas_1d'], {}), '(logSigmads_1d, Tds_1d, betas_1d)\n', (44879, 44912), True, 'import numpy as np\n'), ((46542, 46554), 'time.clock', 'time.clock', ([], {}), '()\n', (46552, 46554), False, 'import time\n'), ((46653, 46665), 'time.clock', 'time.clock', ([], {}), '()\n', (46663, 46665), False, 'import time\n'), ((48099, 48111), 'time.clock', 'time.clock', ([], {}), '()\n', (48109, 48111), False, 'import time\n'), ((48132, 48187), 'pandas.read_csv', 'pd.read_csv', (["('data/RSRF/' + instrs[instr] + '_RSRF.csv')"], {}), "('data/RSRF/' + instrs[instr] + '_RSRF.csv')\n", (48143, 48187), True, 'import pandas as pd\n'), ((50587, 50623), 'numpy.sum', 'np.sum', (['(diff * cov_n1[:, j])'], {'axis': '(-1)'}), '(diff * cov_n1[:, j], axis=-1)\n', (50593, 50623), True, 'import numpy as np\n'), ((51126, 51144), 'numpy.log10', 'np.log10', (['MWSigmaD'], {}), '(MWSigmaD)\n', (51134, 51144), True, 'import numpy as np\n'), ((52284, 52352), 'numpy.array', 'np.array', (['[logkappa160s[mask], Tds[mask], betas[mask], r_chi2[mask]]'], {}), '([logkappa160s[mask], Tds[mask], betas[mask], r_chi2[mask]])\n', (52292, 52352), True, 'import numpy as np\n'), ((6297, 6318), 'astropy.io.fits.getdata', 'fits.getdata', (['mask_fn'], {}), '(mask_fn)\n', (6309, 6318), False, 'from astropy.io import fits\n'), ((7078, 7107), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (7089, 7107), True, 'import numpy as np\n'), ((8023, 8044), 'astropy.io.fits.getdata', 'fits.getdata', (['mask_fn'], {}), '(mask_fn)\n', (8035, 8044), False, 'from astropy.io import fits\n'), ((8804, 8833), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (8815, 8833), True, 'import numpy as np\n'), ((14367, 14385), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (14376, 14385), False, 'import h5py\n'), ((15178, 15190), 'time.ctime', 'time.ctime', ([], {}), '()\n', (15188, 15190), False, 'import time\n'), ((15632, 15661), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (15643, 15661), True, 'import numpy as np\n'), ((16310, 16325), 'numpy.ones', 'np.ones', (['[1, 1]'], {}), '([1, 1])\n', (16317, 16325), True, 'import numpy as np\n'), ((19398, 19416), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (19412, 19416), False, 'import os\n'), ((19612, 19630), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (19626, 19630), False, 'import os\n'), ((20467, 20483), 'astropy.io.fits.getdata', 'fits.getdata', (['fn'], {}), '(fn)\n', (20479, 20483), False, 'from astropy.io import fits\n'), ((23001, 23032), 'numpy.unravel_index', 'np.unravel_index', (['k', 'list_shape'], {}), '(k, list_shape)\n', (23017, 23032), True, 'import numpy as np\n'), ((24885, 24920), 'numpy.sum', 'np.sum', (['(logUbars_sorted * pr_sorted)'], {}), '(logUbars_sorted * pr_sorted)\n', (24891, 24920), True, 'import numpy as np\n'), ((24966, 24986), 'numpy.cumsum', 'np.cumsum', (['pr_sorted'], {}), '(pr_sorted)\n', (24975, 24986), True, 'import numpy as np\n'), ((25050, 25095), 'numpy.interp', 'np.interp', (['[0.16, 0.84]', 'csp', 'logUbars_sorted'], {}), '([0.16, 0.84], csp, logUbars_sorted)\n', (25059, 25095), True, 'import numpy as np\n'), ((25707, 25745), 'numpy.full', 'np.full', (['[npara, randcubesize]', 'np.nan'], {}), '([npara, randcubesize], np.nan)\n', (25714, 25745), True, 'import numpy as np\n'), ((28817, 28829), 'time.ctime', 'time.ctime', ([], {}), '()\n', (28827, 28829), False, 'import time\n'), ((29180, 29192), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (29190, 29192), True, 'import multiprocessing as mp\n'), ((29277, 29289), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (29287, 29289), True, 'import multiprocessing as mp\n'), ((39360, 39437), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['dust.temperature'][1]"], {}), "(v_prop['dust.surface.density'][1], v_prop['dust.temperature'][1])\n", (39371, 39437), True, 'import numpy as np\n'), ((40401, 40413), 'time.ctime', 'time.ctime', ([], {}), '()\n', (40411, 40413), False, 'import time\n'), ((42703, 42715), 'time.ctime', 'time.ctime', ([], {}), '()\n', (42713, 42715), False, 'import time\n'), ((45026, 45060), 'numpy.meshgrid', 'np.meshgrid', (['logSigmads_1d', 'Tds_1d'], {}), '(logSigmads_1d, Tds_1d)\n', (45037, 45060), True, 'import numpy as np\n'), ((47031, 47043), 'time.clock', 'time.clock', ([], {}), '()\n', (47041, 47043), False, 'import time\n'), ((49077, 49087), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (49085, 49087), True, 'import multiprocessing as mp\n'), ((50432, 50456), 'numpy.diagonal', 'np.diagonal', (['MWcali_mat2'], {}), '(MWcali_mat2)\n', (50443, 50456), True, 'import numpy as np\n'), ((50911, 50928), 'numpy.nanmin', 'np.nanmin', (['r_chi2'], {}), '(r_chi2)\n', (50920, 50928), True, 'import numpy as np\n'), ((53566, 53621), 'numpy.array', 'np.array', (['[logkappa160s[mask], Tds[mask], r_chi2[mask]]'], {}), '([logkappa160s[mask], Tds[mask], r_chi2[mask]])\n', (53574, 53621), True, 'import numpy as np\n'), ((60452, 60464), 'time.clock', 'time.clock', ([], {}), '()\n', (60462, 60464), False, 'import time\n'), ((6991, 7018), 'numpy.median', 'np.median', (['seds[i][bkgmask]'], {}), '(seds[i][bkgmask])\n', (7000, 7018), True, 'import numpy as np\n'), ((8717, 8744), 'numpy.median', 'np.median', (['seds[i][bkgmask]'], {}), '(seds[i][bkgmask])\n', (8726, 8744), True, 'import numpy as np\n'), ((13727, 13745), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (13736, 13745), False, 'import h5py\n'), ((14762, 14780), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (14771, 14780), False, 'import h5py\n'), ((15546, 15572), 'numpy.median', 'np.median', (['sed[bkgmask][i]'], {}), '(sed[bkgmask][i])\n', (15555, 15572), True, 'import numpy as np\n'), ((16164, 16195), 'numpy.mean', 'np.mean', (['sed[:, :, i][diskmask]'], {}), '(sed[:, :, i][diskmask])\n', (16171, 16195), True, 'import numpy as np\n'), ((17836, 17854), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (17850, 17854), False, 'import os\n'), ((18066, 18084), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (18080, 18084), False, 'import os\n'), ((19037, 19053), 'astropy.io.fits.getdata', 'fits.getdata', (['fn'], {}), '(fn)\n', (19049, 19053), False, 'from astropy.io import fits\n'), ((19438, 19451), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (19447, 19451), False, 'import os\n'), ((19705, 19725), 'os.listdir', 'os.listdir', (['"""models"""'], {}), "('models')\n", (19715, 19725), False, 'import os\n'), ((24829, 24839), 'numpy.sum', 'np.sum', (['pr'], {}), '(pr)\n', (24835, 24839), True, 'import numpy as np\n'), ((25471, 25491), 'numpy.arange', 'np.arange', (['chi2.size'], {}), '(chi2.size)\n', (25480, 25491), True, 'import numpy as np\n'), ((25820, 25856), 'numpy.unravel_index', 'np.unravel_index', (['rs[ri]', 'chi2.shape'], {}), '(rs[ri], chi2.shape)\n', (25836, 25856), True, 'import numpy as np\n'), ((39673, 39775), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['dust.temperature'][1]", "v_prop['beta2'][1]"], {}), "(v_prop['dust.surface.density'][1], v_prop['dust.temperature'][1\n ], v_prop['beta2'][1])\n", (39684, 39775), True, 'import numpy as np\n'), ((45119, 45162), 'numpy.arange', 'np.arange', (['beta2_min', 'beta2_max', 'beta2_step'], {}), '(beta2_min, beta2_max, beta2_step)\n', (45128, 45162), True, 'import numpy as np\n'), ((45219, 45264), 'numpy.meshgrid', 'np.meshgrid', (['logSigmads_1d', 'Tds_1d', 'beta2s_1d'], {}), '(logSigmads_1d, Tds_1d, beta2s_1d)\n', (45230, 45264), True, 'import numpy as np\n'), ((45288, 45317), 'numpy.full', 'np.full', (['Tds.shape', 'lambdac_f'], {}), '(Tds.shape, lambdac_f)\n', (45295, 45317), True, 'import numpy as np\n'), ((49117, 49186), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'split_herschel', 'args': '(ri, r_, rounds, _wl, wlr, q)'}), '(target=split_herschel, args=(ri, r_, rounds, _wl, wlr, q))\n', (49127, 49186), True, 'import multiprocessing as mp\n'), ((49949, 49984), 'numpy.sum', 'np.sum', (['(h_models * rsps[i])'], {'axis': '(-1)'}), '(h_models * rsps[i], axis=-1)\n', (49955, 49984), True, 'import numpy as np\n'), ((50009, 50038), 'numpy.sum', 'np.sum', (['(rsps[i] * _wl / wl[i])'], {}), '(rsps[i] * _wl / wl[i])\n', (50015, 50038), True, 'import numpy as np\n'), ((54546, 54615), 'numpy.array', 'np.array', (['[logkappa160s[mask], Tds[mask], beta2s[mask], r_chi2[mask]]'], {}), '([logkappa160s[mask], Tds[mask], beta2s[mask], r_chi2[mask]])\n', (54554, 54615), True, 'import numpy as np\n'), ((14131, 14149), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (14140, 14149), False, 'import h5py\n'), ((17880, 17893), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (17889, 17893), False, 'import os\n'), ((18167, 18187), 'os.listdir', 'os.listdir', (['"""models"""'], {}), "('models')\n", (18177, 18187), False, 'import os\n'), ((21573, 21596), 'numpy.log', 'np.log', (['(10 ** 3 / Umins)'], {}), '(10 ** 3 / Umins)\n', (21579, 21596), True, 'import numpy as np\n'), ((29920, 29932), 'time.ctime', 'time.ctime', ([], {}), '()\n', (29930, 29932), False, 'import time\n'), ((31248, 31266), 'numpy.log10', 'np.log10', (['v_max[p]'], {}), '(v_max[p])\n', (31256, 31266), True, 'import numpy as np\n'), ((31269, 31290), 'numpy.log10', 'np.log10', (['v_map[p][0]'], {}), '(v_map[p][0])\n', (31277, 31290), True, 'import numpy as np\n'), ((31326, 31347), 'numpy.log10', 'np.log10', (['v_map[p][0]'], {}), '(v_map[p][0])\n', (31334, 31347), True, 'import numpy as np\n'), ((31350, 31368), 'numpy.log10', 'np.log10', (['v_min[p]'], {}), '(v_min[p])\n', (31358, 31368), True, 'import numpy as np\n'), ((34684, 34713), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (34695, 34713), True, 'import numpy as np\n'), ((37509, 37538), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (37520, 37538), True, 'import numpy as np\n'), ((40049, 40164), 'numpy.meshgrid', 'np.meshgrid', (["v_prop['dust.surface.density'][1]", "v_prop['dust.temperature'][1]", "v_prop['warm.dust.fraction'][1]"], {}), "(v_prop['dust.surface.density'][1], v_prop['dust.temperature'][1\n ], v_prop['warm.dust.fraction'][1])\n", (40060, 40164), True, 'import numpy as np\n'), ((41569, 41581), 'time.ctime', 'time.ctime', ([], {}), '()\n', (41579, 41581), False, 'import time\n'), ((45377, 45423), 'numpy.arange', 'np.arange', (['wdfrac_min', 'wdfrac_max', 'wdfrac_step'], {}), '(wdfrac_min, wdfrac_max, wdfrac_step)\n', (45386, 45423), True, 'import numpy as np\n'), ((45463, 45509), 'numpy.meshgrid', 'np.meshgrid', (['logSigmads_1d', 'Tds_1d', 'wdfracs_1d'], {}), '(logSigmads_1d, Tds_1d, wdfracs_1d)\n', (45474, 45509), True, 'import numpy as np\n'), ((56092, 56162), 'numpy.array', 'np.array', (['[logkappa160s[mask], Tds[mask], wdfracs[mask], r_chi2[mask]]'], {}), '([logkappa160s[mask], Tds[mask], wdfracs[mask], r_chi2[mask]])\n', (56100, 56162), True, 'import numpy as np\n'), ((5184, 5223), 'astropy.io.fits.getdata', 'fits.getdata', (['(respath + fn)'], {'header': '(True)'}), '(respath + fn, header=True)\n', (5196, 5223), False, 'from astropy.io import fits\n'), ((22827, 22839), 'time.ctime', 'time.ctime', ([], {}), '()\n', (22837, 22839), False, 'import time\n'), ((25614, 25624), 'numpy.sum', 'np.sum', (['pr'], {}), '(pr)\n', (25620, 25624), True, 'import numpy as np\n'), ((40859, 40871), 'time.ctime', 'time.ctime', ([], {}), '()\n', (40869, 40871), False, 'import time\n'), ((46815, 46827), 'time.ctime', 'time.ctime', ([], {}), '()\n', (46825, 46827), False, 'import time\n'), ((47560, 47572), 'time.clock', 'time.clock', ([], {}), '()\n', (47570, 47572), False, 'import time\n'), ((47591, 47603), 'time.ctime', 'time.ctime', ([], {}), '()\n', (47601, 47603), False, 'import time\n'), ((48060, 48072), 'time.ctime', 'time.ctime', ([], {}), '()\n', (48070, 48072), False, 'import time\n'), ((50165, 50177), 'time.clock', 'time.clock', ([], {}), '()\n', (50175, 50177), False, 'import time\n'), ((55623, 55639), 'numpy.log', 'np.log', (['(e500 + 1)'], {}), '(e500 + 1)\n', (55629, 55639), True, 'import numpy as np\n'), ((55642, 55663), 'numpy.log', 'np.log', (['(294.0 / 500.0)'], {}), '(294.0 / 500.0)\n', (55648, 55663), True, 'import numpy as np\n'), ((57624, 57719), 'numpy.array', 'np.array', (['[logkappa160s[mask], loggammas[mask], alphas[mask], logUmins[mask], r_chi2[\n mask]]'], {}), '([logkappa160s[mask], loggammas[mask], alphas[mask], logUmins[mask],\n r_chi2[mask]])\n', (57632, 57719), True, 'import numpy as np\n'), ((6643, 6669), 'astropy.io.fits.getdata', 'fits.getdata', (['(respath + fn)'], {}), '(respath + fn)\n', (6655, 6669), False, 'from astropy.io import fits\n'), ((8369, 8395), 'astropy.io.fits.getdata', 'fits.getdata', (['(respath + fn)'], {}), '(respath + fn)\n', (8381, 8395), False, 'from astropy.io import fits\n'), ((9555, 9577), 'numpy.log10', 'np.log10', (['res_good_num'], {}), '(res_good_num)\n', (9563, 9577), True, 'import numpy as np\n'), ((10317, 10334), 'numpy.log10', 'np.log10', (['res_num'], {}), '(res_num)\n', (10325, 10334), True, 'import numpy as np\n'), ((9638, 9666), 'numpy.abs', 'np.abs', (['bkgcov_good[:, i, j]'], {}), '(bkgcov_good[:, i, j])\n', (9644, 9666), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2021 MSAM Lab - University of Waterloo"
__license__ = "BSD-3-Clause"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from typing import Any, List, Callable, Dict
from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal
from PyQt5 import QtWidgets
import numpy as np
class SingleEntry(QObject):
update_val = pyqtSignal(str, str)
def __init__(self, name: str, ui_dict: Dict[str, str] = None, default_val: Any = None, parent_key: str = None) -> None:
QObject.__init__(self, None)
self.parent_key = parent_key
self._value = default_val
self._temp_val = self._value
self.name = name
if ui_dict is None:
self.ui = {}
else:
self.ui = ui_dict
def copy(self):
cls = self.__class__
return cls(self.name, self.ui, self._value, self.parent_key)
@property
def value(self) -> Any:
return self._value
@value.setter
def value(self, value: Any) -> None:
if self._value != value:
self._temp_val = value
self._value = value
self.update_val.emit(self.name, self.parent_key)
def _update_temp(self, val: Any):
raise NotImplementedError
@pyqtSlot()
def commit_value(self) -> None:
self._value = self._temp_val
self.update_val.emit(self.name, self.parent_key)
def write_settings(self, q_path: str) -> None:
settings = QSettings()
settings.setValue("%s/%s/%s" %
(q_path, self.name, 'value'), self._value)
self.write_ui_settings(q_path)
def write_ui_settings(self, q_path: str) -> None:
settings = QSettings()
for ui_label, ui_val in self.ui.items():
settings.setValue("%s/%s/%s/%s" %
(q_path, self.name, 'ui', ui_label), ui_val)
def load_ui_settings(self, q_path: str) -> None:
settings = QSettings()
settings.beginGroup("%s/%s/%s" % (q_path, self.name, 'ui'))
for ui_label in settings.childKeys():
self.ui[ui_label] = settings.value(ui_label)
def load_entry(self, q_path: str):
raise NotImplementedError
def create_ui_entry(self) -> QtWidgets.QWidget:
layout = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(self.ui['label'])
layout.insertWidget(0, label)
if 'unit' in self.ui.keys():
unit_label = QtWidgets.QLabel(self.ui['unit'])
layout.insertWidget(2, unit_label)
layout.insertSpacing(-1, 20)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
return widget
class IntEntry(SingleEntry):
@pyqtSlot(int)
def _update_temp(self, val: int) -> None:
self._temp_val = val
def write_settings(self, q_path: str) -> None:
SingleEntry.write_settings(self, q_path)
settings = QSettings()
settings.setValue("%s/%s/%s" % (q_path, self.name, 'type'), 'int')
def load_entry(self, q_path: str) -> None:
settings = QSettings()
self._value = settings.value(
"%s/%s/%s" % (q_path, self.name, 'value'), type=int)
self._temp_val = self._value
self.load_ui_settings(q_path)
def create_ui_entry(self) -> QtWidgets.QWidget:
widget = SingleEntry.create_ui_entry(self)
input_widget = QtWidgets.QSpinBox()
if 'range' in self.ui.keys():
input_widget.setMinimum(int(self.ui['range'][0]))
input_widget.setMaximum(int(self.ui['range'][1]))
input_widget.setValue(self._value)
input_widget.valueChanged.connect(self._update_temp)
widget.layout().insertWidget(1, input_widget)
return widget
class FloatEntry(SingleEntry):
@pyqtSlot(float)
def _update_temp(self, val: float) -> None:
self._temp_val = val
def write_settings(self, q_path: str) -> None:
SingleEntry.write_settings(self, q_path)
settings = QSettings()
settings.setValue("%s/%s/%s" % (q_path, self.name, 'type'), 'float')
def load_entry(self, q_path: str) -> None:
settings = QSettings()
self._value = settings.value(
"%s/%s/%s" % (q_path, self.name, 'value'), type=float)
self._temp_val = self._value
self.load_ui_settings(q_path)
def create_ui_entry(self) -> QtWidgets.QWidget:
widget = SingleEntry.create_ui_entry(self)
input_widget = QtWidgets.QDoubleSpinBox()
if 'range' in self.ui.keys():
input_widget.setMinimum(float(self.ui['range'][0]))
input_widget.setMaximum(float(self.ui['range'][1]))
input_widget.setValue(self._value)
input_widget.valueChanged.connect(self._update_temp)
widget.layout().insertWidget(1, input_widget)
return widget
class EnumEntry(SingleEntry):
@pyqtSlot(int)
def _update_temp(self, val: int) -> None:
self._temp_val = val
def write_settings(self, q_path: str) -> None:
SingleEntry.write_settings(self, q_path)
settings = QSettings()
settings.setValue("%s/%s/%s" % (q_path, self.name, 'type'), 'enum')
def load_entry(self, q_path: str) -> None:
settings = QSettings()
self._value = settings.value(
"%s/%s/%s" % (q_path, self.name, 'value'), type=int)
self._temp_val = self._value
self.load_ui_settings(q_path)
def create_ui_entry(self) -> QtWidgets.QWidget:
widget = SingleEntry.create_ui_entry(self)
input_widget = QtWidgets.QComboBox()
input_widget.addItems(self.ui['enum_list'])
input_widget.setCurrentIndex(self._value)
input_widget.currentIndexChanged.connect(self._update_temp)
widget.layout().insertWidget(1, input_widget)
return widget
class ArrayEntry(SingleEntry):
def __init__(self, name: str, ui_dict: Dict[str, str] = None, parent_key: str = None):
QObject.__init__(self, None)
self.parent_key = parent_key
self.name = name
if ui_dict is None:
self.ui = {}
else:
self.ui = ui_dict
@property
def value(self) -> np.array:
return self._value
@value.setter
def value(self, value):
if np.all(self._value != value):
self._value = value
self._temp_val = value
self.update_val.emit(self.name, self.parent_key)
class ArrayIntEntry(ArrayEntry):
def __init__(self, name: str, ui: Dict[str, str] = None, default_val: List[int] = None,
parent_key: str = None) -> None:
ArrayEntry.__init__(self, name, ui, parent_key)
if isinstance(default_val, list):
self._value = np.array(default_val, dtype=int)
else:
self._value = default_val
self._temp_val = self._value
def _update_temp(self, idx: int) -> Callable[[int], None]:
@pyqtSlot(int)
def f(val: int):
self._temp_val[idx] = val
return f
def write_settings(self, q_path: str) -> None:
settings = QSettings()
settings.setValue("%s/%s/%s" %
(q_path, self.name, 'value'), self._value.tolist())
settings.setValue("%s/%s/%s" %
(q_path, self.name, 'type'), 'array<int>')
self.write_ui_settings(q_path)
def load_entry(self, q_path: str) -> None:
settings = QSettings()
val = settings.value("%s/%s/%s" % (q_path, self.name, 'value'))
self._value = np.array(val, dtype=int)
self._temp_val = self._value.copy()
self.load_ui_settings(q_path)
def create_ui_entry(self) -> QtWidgets.QWidget:
widget = ArrayEntry.create_ui_entry(self)
row_layout = QtWidgets.QHBoxLayout()
for idx, val in enumerate(self._value):
i_input = QtWidgets.QSpinBox()
if 'range' in self.ui.keys():
i_input.setMinimum(int(self.ui['range'][idx][0]))
i_input.setMaximum(int(self.ui['range'][idx][1]))
i_input.setValue(val)
i_input.valueChanged.connect(self._update_temp(idx))
row_layout.addWidget(i_input)
widget.layout().insertLayout(1, row_layout)
return widget
class ArrayFloatEntry(ArrayEntry):
def __init__(self, name: str, ui: Dict[str, str] = None, default_val: List[float] = None,
parent_key: str = None) -> None:
ArrayEntry.__init__(self, name, ui, parent_key)
if isinstance(default_val, list):
self._value = np.array(default_val, dtype=int)
else:
self._value = default_val
self._temp_val = self._value
def _update_temp(self, idx: int) -> Callable[[int], None]:
@pyqtSlot(float)
def f(val):
self._temp_val[idx] = val
return f
def write_settings(self, q_path: str) -> None:
settings = QSettings()
settings.setValue("%s/%s/%s" %
(q_path, self.name, 'value'), self._value.tolist())
settings.setValue("%s/%s/%s" %
(q_path, self.name, 'type'), 'array<float>')
self.write_ui_settings(q_path)
def load_entry(self, q_path: str) -> None:
settings = QSettings()
val = settings.value("%s/%s/%s" % (q_path, self.name, 'value'))
self._value = np.array(val, dtype=float)
self._temp_val = self._value.copy()
self.load_ui_settings(q_path)
def create_ui_entry(self) -> QtWidgets.QWidget:
widget = ArrayEntry.create_ui_entry(self)
row_layout = QtWidgets.QHBoxLayout()
for idx, val in enumerate(self._value):
i_input = QtWidgets.QSpinBox()
if 'range' in self.ui.keys():
i_input.setMinimum(float(self.ui['range'][idx][0]))
i_input.setMaximum(float(self.ui['range'][idx][1]))
i_input.setValue(val)
i_input.valueChanged.connect(self._update_temp(idx))
row_layout.addWidget(i_input)
widget.layout().insertLayout(1, row_layout)
return widget
MAP_TYPE = {
'int': IntEntry,
'float': FloatEntry,
'enum': EnumEntry,
'array<int>': ArrayIntEntry,
'array<float>': ArrayFloatEntry
}
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtWidgets.QSpinBox",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QObject.__init__",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QHBoxLayout",
"numpy.array",
"PyQt5.QtCore.QSettings",
"PyQt5.QtWidgets.QLab... | [((378, 398), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['str', 'str'], {}), '(str, str)\n', (388, 398), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((1280, 1290), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (1288, 1290), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((2729, 2742), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['int'], {}), '(int)\n', (2737, 2742), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((3811, 3826), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['float'], {}), '(float)\n', (3819, 3826), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((4911, 4924), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['int'], {}), '(int)\n', (4919, 4924), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((532, 560), 'PyQt5.QtCore.QObject.__init__', 'QObject.__init__', (['self', 'None'], {}), '(self, None)\n', (548, 560), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((1492, 1503), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (1501, 1503), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((1725, 1736), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (1734, 1736), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((1980, 1991), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (1989, 1991), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((2307, 2330), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (2328, 2330), False, 'from PyQt5 import QtWidgets\n'), ((2347, 2381), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["self.ui['label']"], {}), "(self.ui['label'])\n", (2363, 2381), False, 'from PyQt5 import QtWidgets\n'), ((2617, 2636), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (2634, 2636), False, 'from PyQt5 import QtWidgets\n'), ((2938, 2949), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (2947, 2949), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((3092, 3103), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (3101, 3103), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((3409, 3429), 'PyQt5.QtWidgets.QSpinBox', 'QtWidgets.QSpinBox', ([], {}), '()\n', (3427, 3429), False, 'from PyQt5 import QtWidgets\n'), ((4024, 4035), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (4033, 4035), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((4180, 4191), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (4189, 4191), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((4499, 4525), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QtWidgets.QDoubleSpinBox', ([], {}), '()\n', (4523, 4525), False, 'from PyQt5 import QtWidgets\n'), ((5120, 5131), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (5129, 5131), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((5275, 5286), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (5284, 5286), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((5592, 5613), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (5611, 5613), False, 'from PyQt5 import QtWidgets\n'), ((5993, 6021), 'PyQt5.QtCore.QObject.__init__', 'QObject.__init__', (['self', 'None'], {}), '(self, None)\n', (6009, 6021), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((6314, 6342), 'numpy.all', 'np.all', (['(self._value != value)'], {}), '(self._value != value)\n', (6320, 6342), True, 'import numpy as np\n'), ((6969, 6982), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['int'], {}), '(int)\n', (6977, 6982), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((7134, 7145), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (7143, 7145), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((7477, 7488), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (7486, 7488), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((7583, 7607), 'numpy.array', 'np.array', (['val'], {'dtype': 'int'}), '(val, dtype=int)\n', (7591, 7607), True, 'import numpy as np\n'), ((7814, 7837), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (7835, 7837), False, 'from PyQt5 import QtWidgets\n'), ((8818, 8833), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['float'], {}), '(float)\n', (8826, 8833), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((8980, 8991), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (8989, 8991), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((9325, 9336), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (9334, 9336), False, 'from PyQt5.QtCore import QObject, QSettings, pyqtSlot, pyqtSignal\n'), ((9431, 9457), 'numpy.array', 'np.array', (['val'], {'dtype': 'float'}), '(val, dtype=float)\n', (9439, 9457), True, 'import numpy as np\n'), ((9664, 9687), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (9685, 9687), False, 'from PyQt5 import QtWidgets\n'), ((2482, 2515), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (["self.ui['unit']"], {}), "(self.ui['unit'])\n", (2498, 2515), False, 'from PyQt5 import QtWidgets\n'), ((6774, 6806), 'numpy.array', 'np.array', (['default_val'], {'dtype': 'int'}), '(default_val, dtype=int)\n', (6782, 6806), True, 'import numpy as np\n'), ((7908, 7928), 'PyQt5.QtWidgets.QSpinBox', 'QtWidgets.QSpinBox', ([], {}), '()\n', (7926, 7928), False, 'from PyQt5 import QtWidgets\n'), ((8623, 8655), 'numpy.array', 'np.array', (['default_val'], {'dtype': 'int'}), '(default_val, dtype=int)\n', (8631, 8655), True, 'import numpy as np\n'), ((9758, 9778), 'PyQt5.QtWidgets.QSpinBox', 'QtWidgets.QSpinBox', ([], {}), '()\n', (9776, 9778), False, 'from PyQt5 import QtWidgets\n')] |
import os
from pathlib import Path
import joblib
import pandas as pd
import numpy as np
from multiprocessing import Pool
from collections import defaultdict
import functools
import re
import sys
sys.path.insert(0, './code')
from utils import DataLogger # noqa: E402
class DataNotFoundException(Exception):
pass
def get_time_split(df):
df_12 = df[df['dt'] <= 12]
df_16 = df[(df['dt'] > 12) & (df['dt'] <= 16)]
# df_20 = df[(df['dt'] > 16) & (df['dt'] <= 19)]
# df_21 = df[(df['dt'] > 17) & (df['dt'] <= 20)]
df_22 = df[(df['dt'] > 18) & (df['dt'] <= 21)]
df_23 = df[(df['dt'] > 19) & (df['dt'] <= 22)]
# df_24 = df[(df['dt'] > 20) & (df['dt'] <= 23)]
# df_25 = df[(df['dt'] > 21) & (df['dt'] <= 24)]
r_dict = {
"one_to_twelve": df_12,
"twelve_to_sixteen": df_16,
# "prev_three_months_20": df_20,
# "prev_three_months_21": df_21,
"prev_three_months_22": df_22,
"prev_three_months_23": df_23,
# "prev_three_months_24": df_24,
# "prev_three_months_25": df_25
}
return r_dict
def get_merge_dict():
merge_dict = {
# 20: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_20"],
# 21: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_21"],
22: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_22"],
23: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_23"],
# 24: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_24"],
# 25: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_25"],
}
return merge_dict
def get_time_split_result(a_func):
@functools.wraps(a_func)
def wrapper(self, df):
r_dict = defaultdict(list)
df_dict = get_time_split(df)
use_dict = {key: a_func(self, df_dict[key]) for key in df_dict.keys()}
merge_dict = get_merge_dict()
for dt in merge_dict.keys():
vals_12 = use_dict[merge_dict[dt][0]]
vals_16 = use_dict[merge_dict[dt][1]]
vals_prevs = use_dict[merge_dict[dt][2]]
for val, val_12, val_16 in zip(vals_prevs, vals_12, vals_16):
name = val[0]
name_12 = "{}_12".format(name)
name_16 = "{}_16".format(name)
r_dict[name].append(val[1])
r_dict[name_12].append(val_12[1])
r_dict[name_16].append(val_16[1])
return r_dict
return wrapper
class DataLoader():
def __init__(self):
self.output_path = Path(os.path.abspath(os.getcwd())) / 'output'
self.input_path = Path(os.path.abspath(os.getcwd())) / 'input'
self.model_path = Path(os.path.abspath(os.getcwd())) / 'model'
def save_data(self, cls, data_name, message):
logger = DataLogger()
logger.save_data("Save data {} is generated from {}".format(
data_name, message))
joblib.dump(cls, self.output_path / data_name)
logger.save_data("{} is sucessfully saved".format(data_name))
def load_data(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.input_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.input_path / data_name, **kwargs)
return data
def load_result(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.output_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.output_path / data_name, **kwargs)
return data
class FeatLoader(DataLoader):
def __init__(self):
super(FeatLoader, self).__init__()
self.required_cate = ('2', '6', '10', '12', '13', '15', '18', '19',
'21', '22', '25', '26', '36', '37', '39', '48')
self.shop_cate = [str(i + 1) for i in range(48)] + ['other']
self.shop_amt = [
"shop_{}_amt".format(shop_tag) for shop_tag in self.shop_cate
]
self.shop_cnt = [
"shop_{}_cnt".format(shop_tag) for shop_tag in self.shop_cate
]
self.card_cate = [str(i + 1) for i in range(14)] + ['other']
self.card_amt = [
"card_{}_txn_amt".format(card_cate) for card_cate in self.card_cate
]
self.card_cnt = [
"card_{}_txn_cnt".format(card_cate) for card_cate in self.card_cate
]
self.count = 0
self.profile_cate = [
"masts",
"educd",
"trdtp",
"naty",
"poscd",
"cuorg",
"primary_card",
"slam",
"age",
"gender_code",
]
self.basic_info = [
'masts',
'educd',
'trdtp',
'naty',
'poscd',
'cuorg',
'primary_card',
'age',
'gender_code',
]
self.dts = [dt for dt in range(1, 25)]
def update_data(self, data):
self.data = data.copy()
class AmtFeatLoader(FeatLoader):
def __init__(self):
super(AmtFeatLoader, self).__init__()
self.get_feat_config()
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
# result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {self.get_amt_by_months}
def get_amt_by_months(self, df):
def get_shop_amt_cate(x):
dt, shop_tag = x
name = "shop_{}_amt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_amt_cate in self.shop_amt:
result.update({shop_amt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_amt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_amt_cate,
axis=1)
amt_dict = {
shop_amt_cate: amt
for amt, shop_amt_cate in zip(df['txn_amt'],
df['shop_amt_cate'])
}
result.update(amt_dict)
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class ProfileFeatLoader(FeatLoader):
def __init__(self):
super(ProfileFeatLoader, self).__init__()
self.get_feat_config()
self.card_cnt_pct = [
"card_{}_cnt_pct".format(cate) for cate in self.card_cate
]
self.card_avg_amt = [
"card_{}_avg_amt".format(cate) for cate in self.card_cate
]
def fit(self):
# run 500000 times loop
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
self.data = self.get_early_calculation(self.data)
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.concat(feat_group)
def get_early_calculation(self, df):
df['avg_amt'] = df['txn_amt'] / df['txn_cnt']
df['offline_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['overseas_offline_cnt'])
df['online_cnt_pct'] = df['txn_cnt'] / (df['domestic_online_cnt'] +
df['overseas_online_cnt'])
df['domestic_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['domestic_online_cnt'])
df['overseas_cnt_pct'] = df['txn_cnt'] / (df['overseas_offline_cnt'] +
df['overseas_online_cnt'])
# generate card amt
for cate in self.card_cate:
df['card_{}_txn_amt'.format(
cate)] = df['card_{}_txn_amt_pct'.format(cate)] * df['txn_amt']
# generate card cnt ratio
for cate in self.card_cate:
new_key = "card_{}_cnt_pct".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[cnt_key] / df['txn_cnt']
# generate the avg for card cate
for cate in self.card_cate:
new_key = "card_{}_avg_amt".format(cate)
amt_key = "card_{}_txn_amt".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[amt_key] / df[cnt_key]
return df
def update_a_df(self, df):
# df: user history records
result = {
'dt': [22, 23],
'chid': [df['chid'].iloc[0]] * 2,
}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {
# 最開始使用信用卡時間 #首刷月
# 離首刷月多久
self.get_start_use_dt,
# # 消費多少種類
# # 消費多少重要種類
self.get_how_many_tags,
# # basic info
self.get_basic_profile,
}
def get_basic_profile(self, df):
if df.empty:
r_dict = {
profile_cate: [-1] * 3
for profile_cate in self.profile_cate
}
else:
r_dict = {
profile_cate: df[profile_cate].iloc[0]
for profile_cate in self.profile_cate
}
return r_dict
@get_time_split_result
def get_how_many_tags(self, df):
if df.empty:
r_list = [("how_many_tag", -1), ("how_many_tag_imp", -1)]
else:
how_many_tag = len(df['shop_tag'].unique())
how_many_tag_imp = len(df[df['shop_tag'].isin(
self.required_cate)]['shop_tag'].unique())
r_list = [("how_many_tag", how_many_tag),
("how_many_tag_imp", how_many_tag_imp)]
return r_list
def get_start_use_dt(self, df):
if df.empty:
r_dict = {"start_dt": [-1] * 2, "how_long_dt": [-1] * 2}
else:
start_dt = df['dt'].iloc[0]
how_long_dt = np.array([24, 25]) - np.array([start_dt] * 2)
r_dict = {
"start_dt": [start_dt] * 2,
"how_long_dt": list(how_long_dt)
}
return r_dict
class CntFeatLoader(FeatLoader):
def __init__(self):
super(CntFeatLoader, self).__init__()
self.get_feat_config()
def get_feat_config(self):
self.feat_config = {self.get_cnt_by_months}
def get_cnt_by_months(self, df):
def get_shop_cnt_cate(x):
dt, shop_tag = x
name = "shop_{}_cnt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_cnt_cate in self.shop_cnt:
result.update({shop_cnt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_cnt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_cnt_cate,
axis=1)
cnt_dict = {
shop_cnt_cate: cnt
for cnt, shop_cnt_cate in zip(df['txn_cnt'],
df['shop_cnt_cate'])
}
result.update(cnt_dict)
return result
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class RankTopFeatLoader(FeatLoader):
def __init__(self):
super(RankTopFeatLoader, self).__init__()
self.get_feat_config()
self.shop_cate_map = {
i: a_shop_cate
for i, a_shop_cate in enumerate(self.shop_cate)
}
self.imp_cate_map = {
i: imp_cate
for i, imp_cate in enumerate(self.required_cate)
}
def update_a_df(self, df):
print(df.columns[0])
result = []
for feat_func in self.feat_config:
result.append(feat_func(df))
tops = pd.concat(result, axis=1)
return tops
def get_feat_config(self):
self.feat_config = [
self.get_tops_by_months,
self.get_imp_tops_by_months,
]
def get_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
top3 = df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
top3.columns = [
'top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.format(dt)
]
for col in top3.columns:
top3[col] = top3[col].map(self.shop_cate_map)
top3['how_many_cate_{}'.format(dt)] = df.gt(0).sum(axis=1)
top3.loc[
top3['how_many_cate_{}'.format(dt)] == 0,
['top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.
format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 1,
['top3_{}'.format(dt), 'top2_{}'.format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 2,
['top3_{}'.format(dt)]] = "-1"
return top3
def get_imp_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
reg = r"shop_(\d+_|other_)(.+)_\d+"
fetch_type = re.findall(reg, df.columns[0])[0][1]
imp_cols = [
"shop_{}_{}_{}".format(a_cate, fetch_type, dt)
for a_cate in self.required_cate
]
imp_df = df[imp_cols].copy()
imp_top3 = imp_df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
imp_top3.columns = [
'imp_top3_{}'.format(dt), 'imp_top2_{}'.format(dt),
'imp_top1_{}'.format(dt)
]
for col in imp_top3.columns:
imp_top3[col] = imp_top3[col].map(self.imp_cate_map)
imp_top3['how_many_cate_imp_{}'.format(dt)] = imp_df.gt(0).sum(axis=1)
imp_top3.loc[imp_top3["how_many_cate_imp_{}".format(dt)] == 0, [
"imp_top3_{}".format(dt), "imp_top2_{}".format(dt), "imp_top1_{}".
format(dt)
]] = "-1"
imp_top3.loc[
imp_top3["how_many_cate_imp_{}".format(dt)] == 1,
["imp_top3_{}".format(dt), "imp_top2_{}".format(dt)]] = "-1"
imp_top3.loc[imp_top3['how_many_cate_imp_{}'.format(dt)] == 2,
['imp_top3_{}'.format(dt)]] = "-1"
return imp_top3
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
feats = [self.data[['chid']].reset_index(drop=True)]
df = self.data.drop("chid", axis=1).reset_index(drop=True)
cols = list(df.columns)
cols_group = [cols[dt * 49:(1 + dt) * 49] for dt in range(24)]
df_group = [df[col_seg] for col_seg in cols_group]
pool = Pool(4, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.concat(feats + feat_group, axis=1)
class ProfileShopFeatLoader(FeatLoader):
def __init__(self):
super(ProfileShopFeatLoader, self).__init__()
self.get_feat_config()
def update_data(self, data):
self.data = {}
for dt in range(1, 25):
use_cols = []
for shop_tag in self.required_cate:
col = "shop_{}_amt_{}".format(shop_tag, dt)
use_cols.append(col)
tmp_df = data[['chid'] + use_cols]
tmp_df.columns = ['chid'] + list(self.required_cate)
tmp_df['dt'] = dt
tmp_df['query_id'] = tmp_df['chid'].apply(
lambda x: str(x)) + tmp_df['dt'].apply(lambda x: str(x))
tmp_df = tmp_df.melt(id_vars=['chid', 'dt', 'query_id'],
var_name='shop_tag',
value_name='txn_amt')
self.data.update({dt: tmp_df})
def get_feat_config(self):
self.feat_config = {self.get_rank_by_months}
def get_rank_by_months(self, dt):
print(dt)
df = self.data[dt]
df_group = df.groupby('query_id')
df_group = [df[1] for df in df_group]
df_group = [
df.sort_values(by='txn_amt', ascending=False) for df in df_group
]
df = pd.concat(df_group)
df['rank_{}'.format(dt)] = list(range(1, 17)) * 500000
df['rank_{}'.format(dt)] = df[['rank_{}'.format(dt)
]].mask(df['txn_amt'] == 0, 0)
df = df[['chid', 'shop_tag', 'rank_{}'.format(dt)]]
return df
def fit(self):
feat_group = [self.get_rank_by_months(1)]
for dt in range(2, 25):
feat_group.append(self.get_rank_by_months(dt))
self.feats = pd.concat(feat_group, axis=1)
class FreqFeatLoader(FeatLoader):
def __init__(self, prefix, fetch_type):
super(FreqFeatLoader, self).__init__()
self.prefix = prefix
self.fetch_type = fetch_type
def update_data(self, data):
self.get_cols(self.prefix, self.fetch_type)
self.train_23 = data.drop(self.cols_24 + self.cols_23,
axis=1).reset_index(drop=True)
self.train_24 = data.drop(self.cols_1 + self.cols_24,
axis=1).reset_index(drop=True)
self.test = data.drop(self.cols_1 + self.cols_2,
axis=1).reset_index(drop=True)
self.train_23.columns = self.get_new_cols(self.train_23, 23)
self.train_24.columns = self.get_new_cols(self.train_24, 24)
self.test.columns = self.get_new_cols(self.test, 25)
self.train_23['dt'] = 23
self.train_24['dt'] = 24
self.test['dt'] = 25
self.train_23['query_id'] = self.train_23['chid'].apply(
lambda x: str(x)) + self.train_23['dt'].apply(lambda x: str(x))
self.train_24['query_id'] = self.train_24['chid'].apply(
lambda x: str(x)) + self.train_24['dt'].apply(lambda x: str(x))
self.test['query_id'] = self.test['chid'].apply(
lambda x: str(x)) + self.test['dt'].apply(lambda x: str(x))
def get_cols(self, prefix='amt', fetch_type='total'):
if fetch_type == 'total':
use_cates = self.shop_cate
else:
use_cates = self.required_cate
self.cols_24 = [
"shop_{}_{}_24".format(shop_cate, self.prefix)
for shop_cate in use_cates
]
self.cols_23 = [
"shop_{}_{}_23".format(shop_cate, self.prefix)
for shop_cate in use_cates
]
self.cols_1 = [
"shop_{}_{}_1".format(shop_cate, self.prefix)
for shop_cate in use_cates
]
self.cols_2 = [
"shop_{}_{}_2".format(shop_cate, self.prefix)
for shop_cate in use_cates
]
self.cols_less_3 = []
for use_cate in use_cates:
for dt in range(1, 4):
col = "shop_{}_{}_{}".format(use_cate, self.prefix, dt - 4)
self.cols_less_3.append(col)
def get_new_cols(self, df, dt):
new_cols = ['chid']
for col in df.columns[1:]:
reg = r"(.+_)\d+"
c_dt = col.split('_')[-1]
new_dt = int(c_dt) - dt
n_col = re.findall(reg, col)[0]
n_col = n_col + str(new_dt)
new_cols.append(n_col)
return new_cols
def get_freq_buy(self, df, start_dt=1):
results = {
'chid': df['chid'].to_list(),
'dt': df['dt'].to_list(),
'query_id': df['query_id'].to_list()
}
for shop_cate in self.required_cate:
cols = [
"shop_{}_{}_{}".format(shop_cate, self.prefix, dt - 23)
for dt in range(start_dt, 23)
]
shop_ratio = df[cols][df[cols] != 0].count(1) / 22
results[shop_cate] = shop_ratio
feats = pd.DataFrame(results)
feats = feats.melt(id_vars=['chid', 'dt', 'query_id'],
var_name='shop_tag',
value_name='freq_buy')
return feats
def get_relative_freq_buy(self, df, start_dt=1):
total_ratio = (df.iloc[:, 1:-2] != 0.0).sum(axis=1) / 1078
results = {
'chid': df['chid'].to_list(),
'dt': df['dt'].to_list(),
'query_id': df['query_id'].to_list()
}
for shop_cate in self.required_cate:
cols = [
"shop_{}_{}_{}".format(shop_cate, self.prefix, dt - 23)
for dt in range(start_dt, 23)
]
shop_ratio = df[cols][df[cols] != 0].count(axis=1) / 22
results[shop_cate] = shop_ratio / total_ratio
feats = pd.DataFrame(results)
feats = feats.melt(id_vars=['chid', 'dt', 'query_id'],
var_name='shop_tag',
value_name="relative_freq_buy_{}".format(
self.fetch_type))
return feats
def get_freq_value(self, df, start_dt=1):
total_ratio = df.iloc[:, 1:-2][df.iloc[:, 1:-2] != 0.0].mean(axis=1)
results = {
'chid': df['chid'].to_list(),
'dt': df['dt'].to_list(),
'query_id': df['query_id'].to_list()
}
for shop_cate in self.required_cate:
cols = [
"shop_{}_{}_{}".format(shop_cate, self.prefix, dt - 23)
for dt in range(start_dt, 23)
]
shop_ratio = df[cols][df[cols] != 0].mean(axis=1)
results[shop_cate] = shop_ratio / total_ratio
feats = pd.DataFrame(results)
feats = feats.melt(id_vars=['chid', 'dt', 'query_id'],
var_name='shop_tag',
value_name='relative_{}_buy_{}'.format(
self.prefix, self.fetch_type))
return feats
def fit_transform_single(self, func, start_dt=1):
feats_23 = func(self.train_23, start_dt)
feats_24 = func(self.train_24, start_dt)
feats_test = func(self.test, start_dt)
feats_train = pd.concat([
feats_23.drop(['chid', 'dt'], axis=1),
feats_24.drop(['chid', 'dt'], axis=1)
])
feats_test = feats_test.drop(['chid', 'dt'], axis=1)
return feats_train, feats_test
| [
"sys.path.insert",
"pandas.read_csv",
"functools.wraps",
"os.getcwd",
"utils.DataLogger",
"numpy.array",
"numpy.argsort",
"collections.defaultdict",
"multiprocessing.Pool",
"pandas.concat",
"joblib.load",
"pandas.DataFrame",
"re.findall",
"joblib.dump"
] | [((199, 227), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./code"""'], {}), "(0, './code')\n", (214, 227), False, 'import sys\n'), ((1670, 1693), 'functools.wraps', 'functools.wraps', (['a_func'], {}), '(a_func)\n', (1685, 1693), False, 'import functools\n'), ((1738, 1755), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1749, 1755), False, 'from collections import defaultdict\n'), ((2813, 2825), 'utils.DataLogger', 'DataLogger', ([], {}), '()\n', (2823, 2825), False, 'from utils import DataLogger\n'), ((2936, 2982), 'joblib.dump', 'joblib.dump', (['cls', '(self.output_path / data_name)'], {}), '(cls, self.output_path / data_name)\n', (2947, 2982), False, 'import joblib\n'), ((6755, 6785), 'multiprocessing.Pool', 'Pool', (['(8)'], {'maxtasksperchild': '(1000)'}), '(8, maxtasksperchild=1000)\n', (6759, 6785), False, 'from multiprocessing import Pool\n'), ((6886, 6910), 'pandas.DataFrame', 'pd.DataFrame', (['feat_group'], {}), '(feat_group)\n', (6898, 6910), True, 'import pandas as pd\n'), ((7609, 7639), 'multiprocessing.Pool', 'Pool', (['(8)'], {'maxtasksperchild': '(1000)'}), '(8, maxtasksperchild=1000)\n', (7613, 7639), False, 'from multiprocessing import Pool\n'), ((7740, 7761), 'pandas.concat', 'pd.concat', (['feat_group'], {}), '(feat_group)\n', (7749, 7761), True, 'import pandas as pd\n'), ((9557, 9577), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (9569, 9577), True, 'import pandas as pd\n'), ((12761, 12791), 'multiprocessing.Pool', 'Pool', (['(8)'], {'maxtasksperchild': '(1000)'}), '(8, maxtasksperchild=1000)\n', (12765, 12791), False, 'from multiprocessing import Pool\n'), ((12892, 12916), 'pandas.DataFrame', 'pd.DataFrame', (['feat_group'], {}), '(feat_group)\n', (12904, 12916), True, 'import pandas as pd\n'), ((13494, 13519), 'pandas.concat', 'pd.concat', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (13503, 13519), True, 'import pandas as pd\n'), ((16240, 16270), 'multiprocessing.Pool', 'Pool', (['(4)'], {'maxtasksperchild': '(1000)'}), '(4, maxtasksperchild=1000)\n', (16244, 16270), False, 'from multiprocessing import Pool\n'), ((16371, 16408), 'pandas.concat', 'pd.concat', (['(feats + feat_group)'], {'axis': '(1)'}), '(feats + feat_group, axis=1)\n', (16380, 16408), True, 'import pandas as pd\n'), ((17690, 17709), 'pandas.concat', 'pd.concat', (['df_group'], {}), '(df_group)\n', (17699, 17709), True, 'import pandas as pd\n'), ((18163, 18192), 'pandas.concat', 'pd.concat', (['feat_group'], {'axis': '(1)'}), '(feat_group, axis=1)\n', (18172, 18192), True, 'import pandas as pd\n'), ((21363, 21384), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (21375, 21384), True, 'import pandas as pd\n'), ((22188, 22209), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (22200, 22209), True, 'import pandas as pd\n'), ((23077, 23098), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (23089, 23098), True, 'import pandas as pd\n'), ((3173, 3223), 'joblib.load', 'joblib.load', (['(self.input_path / data_name)'], {}), '(self.input_path / data_name, **kwargs)\n', (3184, 3223), False, 'import joblib\n'), ((3469, 3520), 'joblib.load', 'joblib.load', (['(self.output_path / data_name)'], {}), '(self.output_path / data_name, **kwargs)\n', (3480, 3520), False, 'import joblib\n'), ((3276, 3326), 'pandas.read_csv', 'pd.read_csv', (['(self.input_path / data_name)'], {}), '(self.input_path / data_name, **kwargs)\n', (3287, 3326), True, 'import pandas as pd\n'), ((3573, 3624), 'pandas.read_csv', 'pd.read_csv', (['(self.output_path / data_name)'], {}), '(self.output_path / data_name, **kwargs)\n', (3584, 3624), True, 'import pandas as pd\n'), ((10965, 10983), 'numpy.array', 'np.array', (['[24, 25]'], {}), '([24, 25])\n', (10973, 10983), True, 'import numpy as np\n'), ((10986, 11010), 'numpy.array', 'np.array', (['([start_dt] * 2)'], {}), '([start_dt] * 2)\n', (10994, 11010), True, 'import numpy as np\n'), ((14689, 14719), 're.findall', 're.findall', (['reg', 'df.columns[0]'], {}), '(reg, df.columns[0])\n', (14699, 14719), False, 'import re\n'), ((20714, 20734), 're.findall', 're.findall', (['reg', 'col'], {}), '(reg, col)\n', (20724, 20734), False, 'import re\n'), ((2578, 2589), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2587, 2589), False, 'import os\n'), ((2650, 2661), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2659, 2661), False, 'import os\n'), ((2721, 2732), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2730, 2732), False, 'import os\n'), ((13804, 13817), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (13814, 13817), True, 'import numpy as np\n'), ((14940, 14953), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (14950, 14953), True, 'import numpy as np\n')] |
import numpy as np
from scipy.spatial.qhull import ConvexHull, QhullError
# this two functions are from
# https://stackoverflow.com/questions/37117878/generating-a-filled-polygon-inside-a-numpy-array/37123933#37123933
def check(p1, p2, idxs):
"""
Uses the line defined by p1 and p2 to check array of
input indices against interpolated value
Returns boolean array, with True inside and False outside of shape
"""
p1 = p1.astype(float)
p2 = p2.astype(float)
# Calculate max column idx for each row idx based on interpolated line between two points
if p1[0] == p2[0]:
max_col_idx = (idxs[0] - p1[0]) * idxs.shape[1]
sign = np.sign(p2[1] - p1[1])
else:
max_col_idx = (idxs[0] - p1[0]) / (p2[0] - p1[0]) * (p2[1] - p1[1]) + p1[1]
sign = np.sign(p2[0] - p1[0])
return idxs[1] * sign <= max_col_idx * sign
def create_polygon(shape, vertices):
"""
Creates np.array with dimensions defined by shape
Fills polygon defined by vertices with ones, all other values zero"""
idxs = np.indices(shape) # Create 3D array of indices
base_array = np.zeros(shape, dtype=float) # Initialize your array of zeros
fill = np.ones(base_array.shape) * True # Initialize boolean array defining shape fill
# Create check array for each edge segment, combine into fill array
for k in range(vertices.shape[0]):
fill = np.all([fill, check(vertices[k - 1], vertices[k], idxs)], axis=0)
# Set all values inside polygon to one
base_array[fill] = 1
return base_array
def _convex_fill(array: np.ndarray):
if array.ndim != 2:
raise ValueError("Convex fill need to be called on 2d array.")
points = np.transpose(np.nonzero(array))
try:
convex = ConvexHull(points)
convex_points = points[convex.vertices]
convex.close()
return create_polygon(array.shape, convex_points[::-1])
except (QhullError, ValueError):
return None
def convex_fill(array: np.ndarray):
arr_shape = array.shape
array = np.squeeze(array)
if array.ndim not in [2, 3]:
raise ValueError("Convex hull support only 2 and 3 dimension images")
# res = np.zeros(array.shape, array.dtype)
components = np.bincount(array.flat)
for i in range(1, components.size):
if components[i] == 0:
continue
component: np.ndarray = array == i
points = np.nonzero(component)
if len(points) == 0 or len(points[0]) == 0: # pylint: disable=R1714
continue
lower_bound = np.min(points, axis=1)
upper_bound = np.max(points, axis=1)
cut_area = tuple(slice(x, y + 1) for x, y in zip(lower_bound, upper_bound))
if array.ndim == 3:
cut_area = (slice(None),) + cut_area[1:]
component = component[cut_area]
if array.ndim == 2:
res = _convex_fill(component)
if res is None:
continue
array[cut_area][res > 0] = i
elif array.ndim == 3:
for j in range(lower_bound[0], upper_bound[0] + 1):
res = _convex_fill(component[j])
if res is None:
continue
new_cut = (j,) + cut_area[1:]
tmp = array[new_cut]
tmp[res > 0] = i
array[new_cut] = tmp
return array.reshape(arr_shape)
| [
"numpy.ones",
"numpy.indices",
"numpy.squeeze",
"scipy.spatial.qhull.ConvexHull",
"numpy.max",
"numpy.zeros",
"numpy.sign",
"numpy.nonzero",
"numpy.min",
"numpy.bincount"
] | [((1066, 1083), 'numpy.indices', 'np.indices', (['shape'], {}), '(shape)\n', (1076, 1083), True, 'import numpy as np\n'), ((1131, 1159), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1139, 1159), True, 'import numpy as np\n'), ((2066, 2083), 'numpy.squeeze', 'np.squeeze', (['array'], {}), '(array)\n', (2076, 2083), True, 'import numpy as np\n'), ((2260, 2283), 'numpy.bincount', 'np.bincount', (['array.flat'], {}), '(array.flat)\n', (2271, 2283), True, 'import numpy as np\n'), ((677, 699), 'numpy.sign', 'np.sign', (['(p2[1] - p1[1])'], {}), '(p2[1] - p1[1])\n', (684, 699), True, 'import numpy as np\n'), ((809, 831), 'numpy.sign', 'np.sign', (['(p2[0] - p1[0])'], {}), '(p2[0] - p1[0])\n', (816, 831), True, 'import numpy as np\n'), ((1206, 1231), 'numpy.ones', 'np.ones', (['base_array.shape'], {}), '(base_array.shape)\n', (1213, 1231), True, 'import numpy as np\n'), ((1732, 1749), 'numpy.nonzero', 'np.nonzero', (['array'], {}), '(array)\n', (1742, 1749), True, 'import numpy as np\n'), ((1777, 1795), 'scipy.spatial.qhull.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (1787, 1795), False, 'from scipy.spatial.qhull import ConvexHull, QhullError\n'), ((2436, 2457), 'numpy.nonzero', 'np.nonzero', (['component'], {}), '(component)\n', (2446, 2457), True, 'import numpy as np\n'), ((2578, 2600), 'numpy.min', 'np.min', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (2584, 2600), True, 'import numpy as np\n'), ((2623, 2645), 'numpy.max', 'np.max', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (2629, 2645), True, 'import numpy as np\n')] |
################################
########### Imports ############
################################
import sys
import os
sys.path.append('../../python/')
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#set up matplotlib
os.environ['MPLCONFIGDIR'] = '../mplstyles'
import matplotlib as mpl
from matplotlib import pyplot as plt
#got smarter about the mpl config: see mplstyles/ directory
plt.style.use('standard')
#fonts
# Set the font dictionaries (for plot title and axis titles)
title_font = {'fontname':'Arial', 'size':'16', 'color':'black', 'weight':'normal',
'verticalalignment':'bottom'} # Bottom vertical alignment for more space
axis_font = {'fontname':'Arial', 'size':'32'}
legend_font = {'fontname':'Arial', 'size':'22'}
#fonts global settings
mpl.rc('font',family=legend_font['fontname'])
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import emcee
#For some reason, this speeds things up!
os.environ["OMP_NUM_THREADS"] = "1"
################################
######Module Variables##########
################################
#see: https://docstore.mik.ua/orelly/other/python/0596001886_pythonian-chp-7-sect-1.html
#for conventions on private variables (i.e. leading underscores)
refit=False
#fit parameters
_fitk=0.137
_fitq=1e-3
################################
########Fit Settings ###########
################################
#Construct a dictionary to store all the MCMC fit parameters and results
#These are all the settings a user will regularly change
########################## Data Settings ##########################
_mcmc_data={'g4_load_frac':1,
'cap_load_frac':1,
#'cap_sim_file':'/data/chocula/villaa/cascadeSimData/si28_R68_400k.pkl',
#'cap_rcapture':0.161,
'cap_sim_file':'../data/v3_400k.pkl',
'cap_rcapture':0.218,
########################## Spectrum Settings ##########################
'Emax': 2000, #[eVee]
'Ebins': np.linspace(0,2500,251), #np.linspace(0,2000,201),
'Efit_min':50, #[eVee]
'Efit_max':2000, #1750, #[eVee]
'spectrum_units':'reco-rate', #One of {'counts', 'reco-rate'}
########################## Yield Model Settings ##########################
#'Y_model':'Lind',
#'Y_labels': [r'k', r'$F_{NR}$'],
#'Y_bounds': [(0.05,0.3),(0,30)],
#'Y_model':'Chav',
#'Y_labels': [r'k', r'$a^{-1}$', r'$F_{NR}$'],
#'Y_bounds': [(0.05,0.3),(0,2e3),(0,30)],
'Y_model':'Sor',
'Y_labels': [r'k', r'q', r'$F_{NR}$'],
'Y_bounds': [(0.05,0.3),(0,3e-2),(0,30)],
#'Y_model':'AC',
#'Y_labels': [r'k', r'$\xi$', r'$F_{NR}$'],
#'Y_bounds': [(0.05,0.3),(0,2e3),(0,30)],
#'Y_model':'pchip',
#'Y_labels': [r'k', 'Er0', 'Er1', 'Er2', 'f1', r'$F_{NR}$'],
#'Y_bounds': [(0.05,0.3),(0,1e-3),(0,1e-3),(0,1e-3),(0,1),(0,10)],
#'Y_model':'Shexp',
#'Y_labels': [r'k', 'Yshelf', 'Ec', 'dE', 'alpha', r'$F_{NR}$'],
#'Y_bounds': [(0.05,0.3),(0,0.3),(0,1e3),(0,1e3),(0,100),(0,30)],
#'Y_model':'Pol3',
#'Y_labels': [r'p0', r'p1', r'p2', r'$F_{NR}$'],
#'Y_bounds': [(-0.5,0.5),(-5e-4,5e-4),(-5e-7,5e-7),(0,30)],
########################## Sim Spectra Settings ##########################
'ER_spec_model':'sim', #One of {'sim', 'flat') to selct from G4 simulation or flat
#'ER_par_labels':[r'$scale_{G4}$'],
'ER_par_labels':[r'$scale_{ER}$'],
'ER_par_bounds':[(0,20)], #Unitless scaling factor
#'ER_spec_model':'flat',
#'ER_par_labels':[r'$R0_{ER}$'],
#'ER_par_bounds':[(0,4e-2)], # Units are [Counts/sec/eVee bin] or [Counts/eVee bin] depending on spectrum_units
#
'NR_spec_model':'sim', #One of {'sim', 'flat', 'exp') to selct from G4 simulation, flat, or exponential
#'NR_par_labels':[r'$scale_{G4}$'],
'NR_par_labels':[r'$scale_{NR}$'],
'NR_par_bounds':[(0,20)], #Unitless scaling factor
#'NR_spec_model':'exp',
#'NR_par_labels':[r'$R0_{NR}$',r'$E0_{NR}$'], #R0*exp(-E/E0) gives NR spectrum (post-yield)
#'NR_par_bounds':[(0,0.1),(0,2e3)], # Units are [Counts/sec/eVee bin, eVee] or [Counts/eVee bin] depending on spectrum_units
#
'NG_spec_model':'sim', #Not going to implement anything other than sim for (n,gamma) yet
'NG_par_labels':[r'$scale_{ng}$'],
'NG_par_bounds':[(0,10)], #Unitless scaling factor
########################## Likelihood Settings ##########################
'likelihood':'SNorm', #One of {'Pois', 'Norm', 'SNorm'} Only SNorm uses sigmas, others assume Pois stats
########################## Uncertainty Settings ##########################
'doDetRes': True, #Include detector resolution effects
'fpeak':1, #0.753 -- 1.0
'doEffsyst':False, #Include systematics from cut efficiencies
'doBurstLeaksyst':False, #Include burst cut leakage systematic
########################## MCMC Settings ##########################
'nwalkers':128,
'nstep':500000,
'guesses':'Uniform', #Can either be uniform or shape (nwalkers, ndim),
'moves':'DE8020',#'Default': StretchMove, 'DE8020': 80/20 DEMove/DESnookerMove
'saveMCMC':True
}
################################
###########Functions############
################################
def parse_options():
_mcmc_data['labels']=_mcmc_data['Y_labels']+_mcmc_data['ER_par_labels']+_mcmc_data['NR_par_labels']+_mcmc_data['NG_par_labels']
_mcmc_data['bounds']=_mcmc_data['Y_bounds']+_mcmc_data['ER_par_bounds']+_mcmc_data['NR_par_bounds']+_mcmc_data['NG_par_bounds']
#Special case if ER and NR are both sim and we want to use the same G4 scaling factor for both:
#if (mcmc_data['ER_spec_model']=='sim') and (mcmc_data['NR_spec_model']=='sim'):
if (_mcmc_data['ER_par_labels']==[r'$scale_{G4}$']) and (_mcmc_data['NR_par_labels']==[r'$scale_{G4}$']):
_mcmc_data['labels']=_mcmc_data['Y_labels']+_mcmc_data['NR_par_labels']+_mcmc_data['NG_par_labels']
_mcmc_data['bounds']=_mcmc_data['Y_bounds']+_mcmc_data['NR_par_bounds']+_mcmc_data['NG_par_bounds']
_mcmc_data['ndim']=len(_mcmc_data['labels'])
return True
################################
######Execute on Load###########
################################
def printfunc():
print(_mcmc_data) #troubleshooting line
printfunc()
parse_options()
| [
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.rc",
"sys.path.append",
"warnings.filterwarnings"
] | [((121, 153), 'sys.path.append', 'sys.path.append', (['"""../../python/"""'], {}), "('../../python/')\n", (136, 153), False, 'import sys\n'), ((170, 239), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.dtype size changed"""'}), "('ignore', message='numpy.dtype size changed')\n", (193, 239), False, 'import warnings\n'), ((240, 309), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.ufunc size changed"""'}), "('ignore', message='numpy.ufunc size changed')\n", (263, 309), False, 'import warnings\n'), ((496, 521), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""standard"""'], {}), "('standard')\n", (509, 521), True, 'from matplotlib import pyplot as plt\n'), ((879, 925), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': "legend_font['fontname']"}), "('font', family=legend_font['fontname'])\n", (885, 925), True, 'import matplotlib as mpl\n'), ((2094, 2119), 'numpy.linspace', 'np.linspace', (['(0)', '(2500)', '(251)'], {}), '(0, 2500, 251)\n', (2105, 2119), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
sys.path.append('..')
from neml import models, elasticity, drivers, damage, creep
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
s = 150.0
# We'll do it in terms of Hayhurst-Leckie
nu = 1.8
eta = 2.1
s0 = 7000.0
w0 = 3.25e-2
n = 4.0
e0 = 1.0 # This is key
# My parameters
xi = nu
phi = eta
A = s0 / (w0**(1.0/nu))
tf = 10000
# Hayhurst solution
times = np.linspace(0,tf,100)
dmg = (1-(eta+1)*w0*(s/s0)**nu*times)**(1.0/(eta+1.0))
strain = -e0 * (s/s0)**(-nu) * dmg**(eta+1) / ((1 + eta - n) * w0) * (s / (s0*dmg)
)**n + e0 * (s/s0)**(n-nu)/((1+eta-n)*w0)
E = 10000000.0
nu = 0.3
srate = 100.0
emodel = elasticity.IsotropicLinearElasticModel(E, "youngs", nu, "poissons")
bmodel = models.SmallStrainElasticity(emodel)
scmodel = creep.NormalizedPowerLawCreep(s0, n)
cfmodel = creep.J2CreepModel(scmodel)
cmodel = models.SmallStrainCreepPlasticity(emodel, bmodel, cfmodel)
model = damage.ModularCreepDamageModel_sd(emodel, A, xi, phi,
damage.VonMisesEffectiveStress(), cmodel)
res = drivers.creep(model, s, srate, tf, nsteps = 1000)
plt.plot(times, strain)
plt.plot(res['rtime'], res['rstrain'])
plt.show()
| [
"neml.models.SmallStrainCreepPlasticity",
"matplotlib.pyplot.plot",
"neml.creep.NormalizedPowerLawCreep",
"numpy.linspace",
"neml.creep.J2CreepModel",
"neml.drivers.creep",
"neml.elasticity.IsotropicLinearElasticModel",
"neml.damage.VonMisesEffectiveStress",
"sys.path.append",
"neml.models.SmallSt... | [((35, 56), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (50, 56), False, 'import sys\n'), ((456, 479), 'numpy.linspace', 'np.linspace', (['(0)', 'tf', '(100)'], {}), '(0, tf, 100)\n', (467, 479), True, 'import numpy as np\n'), ((729, 796), 'neml.elasticity.IsotropicLinearElasticModel', 'elasticity.IsotropicLinearElasticModel', (['E', '"""youngs"""', 'nu', '"""poissons"""'], {}), "(E, 'youngs', nu, 'poissons')\n", (767, 796), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((808, 844), 'neml.models.SmallStrainElasticity', 'models.SmallStrainElasticity', (['emodel'], {}), '(emodel)\n', (836, 844), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((857, 893), 'neml.creep.NormalizedPowerLawCreep', 'creep.NormalizedPowerLawCreep', (['s0', 'n'], {}), '(s0, n)\n', (886, 893), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((906, 933), 'neml.creep.J2CreepModel', 'creep.J2CreepModel', (['scmodel'], {}), '(scmodel)\n', (924, 933), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((945, 1003), 'neml.models.SmallStrainCreepPlasticity', 'models.SmallStrainCreepPlasticity', (['emodel', 'bmodel', 'cfmodel'], {}), '(emodel, bmodel, cfmodel)\n', (978, 1003), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((1125, 1172), 'neml.drivers.creep', 'drivers.creep', (['model', 's', 'srate', 'tf'], {'nsteps': '(1000)'}), '(model, s, srate, tf, nsteps=1000)\n', (1138, 1172), False, 'from neml import models, elasticity, drivers, damage, creep\n'), ((1178, 1201), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'strain'], {}), '(times, strain)\n', (1186, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1242), 'matplotlib.pyplot.plot', 'plt.plot', (["res['rtime']", "res['rstrain']"], {}), "(res['rtime'], res['rstrain'])\n", (1212, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1253, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1106), 'neml.damage.VonMisesEffectiveStress', 'damage.VonMisesEffectiveStress', ([], {}), '()\n', (1104, 1106), False, 'from neml import models, elasticity, drivers, damage, creep\n')] |
"""Runs model on data input"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import time
import numpy as np
import tensorflow as tf
import sigpy.mri
from tqdm import tqdm
from matplotlib import pyplot
import model
import data
from utils import mri
from utils import fftc
from utils import tfmri
import utils.logging
logger = utils.logging.logger
class DeepRecon:
def __init__(self,
model_dir,
num_channels,
shape_z,
shape_y,
shape_scale=5,
num_maps=1,
batch_size=1,
tf_graph=None,
tf_sess=None,
debug_plot=False):
"""
Setup model for inference
Args:
model_dir: Directory with model files
num_channels: Number of channels for input data
shape_z: Shape of input data in Z
shape_y: Shape of input data in Y
shape_scale: Scale data with center k-space data
num_maps: Number of sets of sensitivity maps
"""
self.debug_plot = debug_plot
self.tf_graph = tf_graph
if self.tf_graph is None:
self.tf_graph = tf.Graph()
self.tf_sess = tf_sess
if self.tf_sess is None:
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True # pylint: disable=E1101
session_config.allow_soft_placement = True
self.tf_sess = tf.Session(
graph=self.tf_graph, config=session_config)
params = model.load_params(model_dir)
with self.tf_graph.as_default():
self.batch_size = batch_size
self.tf_kspace_input = tf.placeholder(
tf.complex64,
(self.batch_size, shape_z, shape_y, num_channels))
self.tf_sensemap_input = tf.placeholder(
tf.complex64,
(self.batch_size, shape_z, shape_y, num_maps, num_channels))
if shape_scale > 0:
scale = tf.image.resize_image_with_crop_or_pad(
self.tf_kspace_input, shape_scale, shape_scale)
scale = tf.reduce_mean(tf.square(tf.abs(scale)))
scale *= shape_scale * shape_scale / shape_y / shape_z
else:
logger.info('Turning off scaling...')
scale = 1.0
scale = tf.cast(1.0 / tf.sqrt(scale), dtype=tf.complex64)
tf_kspace_input_scaled = self.tf_kspace_input * scale
tf_image_output_scaled, tf_kspace_output_scaled, self.iter_out = model.unrolled_prox(
tf_kspace_input_scaled,
self.tf_sensemap_input,
num_grad_steps=params['unrolled_steps'],
resblock_num_features=params['unrolled_num_features'],
resblock_num_blocks=params['unrolled_num_resblocks'],
resblock_share=params['unrolled_share'],
training=False,
hard_projection=params['hard_projection'],
scope=params['recon_scope'])
self.tf_image_output = tf_image_output_scaled / scale
self.tf_kspace_output = tf_kspace_output_scaled / scale
if params['loss_adv'] > 0:
adv_scope = 'Adversarial'
tf_image_input_scaled = tfmri.model_transpose(
tf_kspace_input_scaled, self.tf_sensemap_input)
self.adv_output = model.adversarial(
tf_image_input_scaled, training=False, scope=adv_scope)
else:
self.adv_output = None
filename_latest_model = tf.train.latest_checkpoint(model_dir)
logger.info('Loading model ({})...'.format(filename_latest_model))
saver = tf.train.Saver()
saver.restore(self.tf_sess, filename_latest_model)
def run(self, kspace, sensemap):
"""
Run inference on dataset
Args
kspace: (channels, kz, ky, x)
sensemap: (maps, channels, z, y, x)
"""
logger.info('IFFT in x...')
kspace_input = fftc.ifftc(kspace, axis=-1)
# (channels, kz, ky, x) to (x, kz, ky, channels)
kspace_input = np.transpose(kspace_input, (3, 1, 2, 0))
kspace_output = np.zeros(kspace_input.shape, dtype=np.complex64)
if self.debug_plot:
image_input = fftc.ifftc(fftc.ifftc(kspace_input, axis=1), axis=2)
image_input = mri.sumofsq(image_input, axis=-1)
image_output = np.zeros(image_input.shape, dtype=np.float64)
# tranpose to (x, kz, ky, maps, channels)
sensemap_input = np.transpose(sensemap, (4, 2, 3, 0, 1))
num_x = kspace_input.shape[0]
num_batches = int(np.ceil(1.0 * num_x / self.batch_size))
logger.info('Running inference ({} batches)...'.format(num_batches))
def wrap(x):
return x
if logger.getEffectiveLevel() is utils.logging.logging.INFO:
wrap = tqdm
time_start = time.time()
for b in wrap(range(num_batches)):
x_start = b * self.batch_size
x_end = (b + 1) * self.batch_size
logger.debug(' batch {}/{}: ({}, {})'.format(
b, num_batches, x_start, x_end))
kspace_input_batch = kspace_input[x_start:x_end, :, :, :].copy()
sensemap_input_batch = sensemap_input[x_start:x_end, :, :, :]
x_act_end = kspace_input_batch.shape[0] + x_start
if x_end != x_act_end:
pad = x_end - x_act_end
zeropad = np.zeros((pad, ) + kspace_input_batch.shape[1:],
np.complex64)
kspace_input_batch = np.concatenate(
(kspace_input_batch, zeropad), axis=0)
zeropad = np.zeros((pad, ) + sensemap_input_batch.shape[1:],
np.complex64)
sensemap_input_batch = np.concatenate(
(sensemap_input_batch, zeropad), axis=0)
feed_dict = {
self.tf_kspace_input: kspace_input_batch,
self.tf_sensemap_input: sensemap_input_batch
}
out = self.tf_sess.run([self.tf_kspace_output],
feed_dict=feed_dict)[0]
kspace_output[x_start:x_act_end, :, :, :] = out
if self.debug_plot:
imout = fftc.ifftc(fftc.ifftc(out, axis=1), axis=2)
imout = mri.sumofsq(imout, axis=-1)
image_output[x_start:x_act_end, :, :] = imout
image_axial_disp = np.concatenate(
(image_input[x_start, :, :], image_output[x_start, :, :]),
axis=1)
image_sag_disp = np.concatenate(
(image_input[:, :, image_input.shape[-1] // 2],
image_output[:, :, image_output.shape[-1] // 2]),
axis=1)
pyplot.figure(1)
pyplot.subplot(2, 1, 1)
pyplot.imshow(image_axial_disp, cmap='gray')
pyplot.axis('off')
pyplot.title('Processed: {}/{}'.format(b, num_batches))
pyplot.subplot(2, 1, 2)
pyplot.imshow(image_sag_disp, cmap='gray')
pyplot.axis('off')
pyplot.pause(0.01)
time_end = time.time()
time_total = time_end - time_start
logger.info('Timer: ')
logger.info(' Per slice: {} s'.format(
time_total / num_batches / self.batch_size))
logger.info(' Per batch: {} s'.format(time_total / num_batches))
logger.info(' Total: {} s'.format(time_total))
# (x, kz, ky, channels) to (channels, kz, ky, x)
kspace_output = np.transpose(kspace_output, (3, 1, 2, 0))
logger.info('FFT in x...')
kspace_output = fftc.fftc(kspace_output, axis=-1)
return kspace_output
def has_adv(self):
return self.adv_output is not None
def run_adv(self, kspace, sensemap):
"""
Run inference on dataset
Args
kspace: (channels, kz, ky, x)
sensemap: (maps, channels, z, y, x)
"""
if self.adv_output is None:
logger.warning('No Adversarial network with model')
return None
logger.info('IFFT in x...')
kspace_input = fftc.ifftc(kspace, axis=-1)
# (channels, kz, ky, x) to (x, kz, ky, channels)
kspace_input = np.transpose(kspace_input, (3, 1, 2, 0))
adv_output = None
if self.debug_plot:
image_input = fftc.ifftc(fftc.ifftc(kspace_input, axis=1), axis=2)
image_input = mri.sumofsq(image_input, axis=-1)
image_output = np.zeros(image_input.shape, dtype=np.float64)
# tranpose to (x, kz, ky, maps, channels)
sensemap_input = np.transpose(sensemap, (4, 2, 3, 0, 1))
num_x = kspace_input.shape[0]
num_batches = int(np.ceil(1.0 * num_x / self.batch_size))
logger.info('Running inference ({} batches)...'.format(num_batches))
def wrap(x):
return x
if logger.getEffectiveLevel() is utils.logging.logging.INFO:
wrap = tqdm
time_start = time.time()
for b in wrap(range(num_batches)):
x_start = b * self.batch_size
x_end = (b + 1) * self.batch_size
logger.debug(' batch {}/{}: ({}, {})'.format(
b, num_batches, x_start, x_end))
kspace_input_batch = kspace_input[x_start:x_end, :, :, :].copy()
sensemap_input_batch = sensemap_input[x_start:x_end, :, :, :]
x_act_end = kspace_input_batch.shape[0] + x_start
if x_end != x_act_end:
pad = x_end - x_act_end
zeropad = np.zeros((pad, ) + kspace_input_batch.shape[1:],
np.complex64)
kspace_input_batch = np.concatenate(
(kspace_input_batch, zeropad), axis=0)
zeropad = np.zeros((pad, ) + sensemap_input_batch.shape[1:],
np.complex64)
sensemap_input_batch = np.concatenate(
(sensemap_input_batch, zeropad), axis=0)
feed_dict = {
self.tf_kspace_input: kspace_input_batch,
self.tf_sensemap_input: sensemap_input_batch
}
out = self.tf_sess.run([self.adv_output], feed_dict=feed_dict)[0]
if adv_output is None:
adv_output = np.zeros(
(kspace_input.shape[0], ) + out.shape[1:], np.complex64)
adv_output[x_start:x_act_end, :, :, :] = out
if self.debug_plot:
imout = fftc.ifftc(fftc.ifftc(out, axis=1), axis=2)
imout = mri.sumofsq(imout, axis=-1)
image_output[x_start:x_act_end, :, :] = imout
image_axial_disp = np.concatenate(
(image_input[x_start, :, :] / np.max(image_input),
image_output[x_start, :, :]) / np.max(image_output),
axis=1)
image_sag_disp = np.concatenate(
(image_input[:, :, image_input.shape[-1] // 2] /
np.max(image_input),
image_output[:, :, image_output.shape[-1] // 2]) /
np.max(image_output),
axis=1)
pyplot.figure(1)
pyplot.subplot(2, 1, 1)
pyplot.imshow(image_axial_disp, cmap='gray')
pyplot.axis('off')
pyplot.title('Processed: {}/{}'.format(b, num_batches))
pyplot.subplot(2, 1, 2)
pyplot.imshow(image_sag_disp, cmap='gray')
pyplot.axis('off')
pyplot.pause(0.01)
time_end = time.time()
time_total = time_end - time_start
logger.info('Timer: ')
logger.info(' Per slice: {} s'.format(
time_total / num_batches / self.batch_size))
logger.info(' Per batch: {} s'.format(time_total / num_batches))
logger.info(' Total: {} s'.format(time_total))
# (x, kz, ky, channels) to (channels, kz, ky, x)
adv_output = np.transpose(adv_output, (3, 1, 2, 0))
return adv_output
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run inference')
parser.add_argument(
'model_dir', action='store', help='Location of trained model')
parser.add_argument(
'kspace_input', action='store', help='npy file of kspace input data')
parser.add_argument(
'kspace_output', action='store', help='npy file of kspace output data')
parser.add_argument(
'--sensemap', default=None, help='Insert sensemap as npy')
parser.add_argument('--device', default='0', help='GPU device to use')
parser.add_argument(
'--batch_size', default=1, type=int, help='Batch size for inference')
parser.add_argument(
'--verbose',
action='store_true',
help='Verbose printing (default: False)')
parser.add_argument(
'--plot',
action='store_true',
help='Plotting for debugging (default: False)')
args = parser.parse_args()
log_level = utils.logging.logging.INFO if args.verbose else utils.logging.logging.WARNING
logger.setLevel(log_level)
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
logger.info('Using GPU device {}...'.format(args.device))
logger.info('Loading k-space data from {}...'.format(args.kspace_input))
kspace = np.load(args.kspace_input)
sensemap = None
if args.sensemap and os.path.isfile(args.sensemap):
logger.info('Loading sensitivity maps from {}...'.format(
args.sensemap))
sensemap = np.load(args.sensemap)
else:
logger.info('Estimating sensitivity maps...')
sensemap = mri.estimate_sense_maps(kspace)
if args.sensemap:
logger.info(' Saving sensitivity maps to {}...'.format(
args.sensemap))
np.save(args.sensemap, sensemap)
sensemap = np.squeeze(sensemap)
if sensemap.ndim != 5:
# (maps, channels, z, y, x)
sensemap = np.expand_dims(sensemap, axis=0)
logger.info('Setting up model from {}...'.format(args.model_dir))
num_channels = kspace.shape[0]
shape_z = kspace.shape[1]
shape_y = kspace.shape[2]
model = DeepRecon(
args.model_dir,
num_channels,
shape_z,
shape_y,
batch_size=args.batch_size,
debug_plot=args.plot)
logger.info('Running inference...')
kspace_output = model.run(kspace, sensemap)
logger.info('Writing output to {}...'.format(args.kspace_output))
np.save(args.kspace_output, kspace_output)
logger.info('Finished')
| [
"model.load_params",
"utils.tfmri.model_transpose",
"model.run",
"model.adversarial",
"numpy.save",
"matplotlib.pyplot.imshow",
"tensorflow.Graph",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.placeholder",
"model.unrolled_prox",
"numpy.max",
"numpy.concatenate",
"tensorflo... | [((12580, 12632), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run inference"""'}), "(description='Run inference')\n", (12603, 12632), False, 'import argparse\n'), ((13824, 13850), 'numpy.load', 'np.load', (['args.kspace_input'], {}), '(args.kspace_input)\n', (13831, 13850), True, 'import numpy as np\n'), ((14365, 14385), 'numpy.squeeze', 'np.squeeze', (['sensemap'], {}), '(sensemap)\n', (14375, 14385), True, 'import numpy as np\n'), ((14897, 14924), 'model.run', 'model.run', (['kspace', 'sensemap'], {}), '(kspace, sensemap)\n', (14906, 14924), False, 'import model\n'), ((15000, 15042), 'numpy.save', 'np.save', (['args.kspace_output', 'kspace_output'], {}), '(args.kspace_output, kspace_output)\n', (15007, 15042), True, 'import numpy as np\n'), ((1674, 1702), 'model.load_params', 'model.load_params', (['model_dir'], {}), '(model_dir)\n', (1691, 1702), False, 'import model\n'), ((4241, 4268), 'utils.fftc.ifftc', 'fftc.ifftc', (['kspace'], {'axis': '(-1)'}), '(kspace, axis=-1)\n', (4251, 4268), False, 'from utils import fftc\n'), ((4349, 4389), 'numpy.transpose', 'np.transpose', (['kspace_input', '(3, 1, 2, 0)'], {}), '(kspace_input, (3, 1, 2, 0))\n', (4361, 4389), True, 'import numpy as np\n'), ((4414, 4462), 'numpy.zeros', 'np.zeros', (['kspace_input.shape'], {'dtype': 'np.complex64'}), '(kspace_input.shape, dtype=np.complex64)\n', (4422, 4462), True, 'import numpy as np\n'), ((4780, 4819), 'numpy.transpose', 'np.transpose', (['sensemap', '(4, 2, 3, 0, 1)'], {}), '(sensemap, (4, 2, 3, 0, 1))\n', (4792, 4819), True, 'import numpy as np\n'), ((5161, 5172), 'time.time', 'time.time', ([], {}), '()\n', (5170, 5172), False, 'import time\n'), ((7537, 7548), 'time.time', 'time.time', ([], {}), '()\n', (7546, 7548), False, 'import time\n'), ((7944, 7985), 'numpy.transpose', 'np.transpose', (['kspace_output', '(3, 1, 2, 0)'], {}), '(kspace_output, (3, 1, 2, 0))\n', (7956, 7985), True, 'import numpy as np\n'), ((8046, 8079), 'utils.fftc.fftc', 'fftc.fftc', (['kspace_output'], {'axis': '(-1)'}), '(kspace_output, axis=-1)\n', (8055, 8079), False, 'from utils import fftc\n'), ((8560, 8587), 'utils.fftc.ifftc', 'fftc.ifftc', (['kspace'], {'axis': '(-1)'}), '(kspace, axis=-1)\n', (8570, 8587), False, 'from utils import fftc\n'), ((8668, 8708), 'numpy.transpose', 'np.transpose', (['kspace_input', '(3, 1, 2, 0)'], {}), '(kspace_input, (3, 1, 2, 0))\n', (8680, 8708), True, 'import numpy as np\n'), ((9052, 9091), 'numpy.transpose', 'np.transpose', (['sensemap', '(4, 2, 3, 0, 1)'], {}), '(sensemap, (4, 2, 3, 0, 1))\n', (9064, 9091), True, 'import numpy as np\n'), ((9433, 9444), 'time.time', 'time.time', ([], {}), '()\n', (9442, 9444), False, 'import time\n'), ((12068, 12079), 'time.time', 'time.time', ([], {}), '()\n', (12077, 12079), False, 'import time\n'), ((12472, 12510), 'numpy.transpose', 'np.transpose', (['adv_output', '(3, 1, 2, 0)'], {}), '(adv_output, (3, 1, 2, 0))\n', (12484, 12510), True, 'import numpy as np\n'), ((13896, 13925), 'os.path.isfile', 'os.path.isfile', (['args.sensemap'], {}), '(args.sensemap)\n', (13910, 13925), False, 'import os\n'), ((14040, 14062), 'numpy.load', 'np.load', (['args.sensemap'], {}), '(args.sensemap)\n', (14047, 14062), True, 'import numpy as np\n'), ((14146, 14177), 'utils.mri.estimate_sense_maps', 'mri.estimate_sense_maps', (['kspace'], {}), '(kspace)\n', (14169, 14177), False, 'from utils import mri\n'), ((14468, 14500), 'numpy.expand_dims', 'np.expand_dims', (['sensemap'], {'axis': '(0)'}), '(sensemap, axis=0)\n', (14482, 14500), True, 'import numpy as np\n'), ((1297, 1307), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1305, 1307), True, 'import tensorflow as tf\n'), ((1401, 1417), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1415, 1417), True, 'import tensorflow as tf\n'), ((1584, 1638), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.tf_graph', 'config': 'session_config'}), '(graph=self.tf_graph, config=session_config)\n', (1594, 1638), True, 'import tensorflow as tf\n'), ((1821, 1900), 'tensorflow.placeholder', 'tf.placeholder', (['tf.complex64', '(self.batch_size, shape_z, shape_y, num_channels)'], {}), '(tf.complex64, (self.batch_size, shape_z, shape_y, num_channels))\n', (1835, 1900), True, 'import tensorflow as tf\n'), ((1971, 2064), 'tensorflow.placeholder', 'tf.placeholder', (['tf.complex64', '(self.batch_size, shape_z, shape_y, num_maps, num_channels)'], {}), '(tf.complex64, (self.batch_size, shape_z, shape_y, num_maps,\n num_channels))\n', (1985, 2064), True, 'import tensorflow as tf\n'), ((2708, 3077), 'model.unrolled_prox', 'model.unrolled_prox', (['tf_kspace_input_scaled', 'self.tf_sensemap_input'], {'num_grad_steps': "params['unrolled_steps']", 'resblock_num_features': "params['unrolled_num_features']", 'resblock_num_blocks': "params['unrolled_num_resblocks']", 'resblock_share': "params['unrolled_share']", 'training': '(False)', 'hard_projection': "params['hard_projection']", 'scope': "params['recon_scope']"}), "(tf_kspace_input_scaled, self.tf_sensemap_input,\n num_grad_steps=params['unrolled_steps'], resblock_num_features=params[\n 'unrolled_num_features'], resblock_num_blocks=params[\n 'unrolled_num_resblocks'], resblock_share=params['unrolled_share'],\n training=False, hard_projection=params['hard_projection'], scope=params\n ['recon_scope'])\n", (2727, 3077), False, 'import model\n'), ((3770, 3807), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (3796, 3807), True, 'import tensorflow as tf\n'), ((3907, 3923), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3921, 3923), True, 'import tensorflow as tf\n'), ((4597, 4630), 'utils.mri.sumofsq', 'mri.sumofsq', (['image_input'], {'axis': '(-1)'}), '(image_input, axis=-1)\n', (4608, 4630), False, 'from utils import mri\n'), ((4658, 4703), 'numpy.zeros', 'np.zeros', (['image_input.shape'], {'dtype': 'np.float64'}), '(image_input.shape, dtype=np.float64)\n', (4666, 4703), True, 'import numpy as np\n'), ((4884, 4922), 'numpy.ceil', 'np.ceil', (['(1.0 * num_x / self.batch_size)'], {}), '(1.0 * num_x / self.batch_size)\n', (4891, 4922), True, 'import numpy as np\n'), ((8869, 8902), 'utils.mri.sumofsq', 'mri.sumofsq', (['image_input'], {'axis': '(-1)'}), '(image_input, axis=-1)\n', (8880, 8902), False, 'from utils import mri\n'), ((8930, 8975), 'numpy.zeros', 'np.zeros', (['image_input.shape'], {'dtype': 'np.float64'}), '(image_input.shape, dtype=np.float64)\n', (8938, 8975), True, 'import numpy as np\n'), ((9156, 9194), 'numpy.ceil', 'np.ceil', (['(1.0 * num_x / self.batch_size)'], {}), '(1.0 * num_x / self.batch_size)\n', (9163, 9194), True, 'import numpy as np\n'), ((14317, 14349), 'numpy.save', 'np.save', (['args.sensemap', 'sensemap'], {}), '(args.sensemap, sensemap)\n', (14324, 14349), True, 'import numpy as np\n'), ((2151, 2241), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['self.tf_kspace_input', 'shape_scale', 'shape_scale'], {}), '(self.tf_kspace_input, shape_scale,\n shape_scale)\n', (2189, 2241), True, 'import tensorflow as tf\n'), ((3456, 3525), 'utils.tfmri.model_transpose', 'tfmri.model_transpose', (['tf_kspace_input_scaled', 'self.tf_sensemap_input'], {}), '(tf_kspace_input_scaled, self.tf_sensemap_input)\n', (3477, 3525), False, 'from utils import tfmri\n'), ((3581, 3654), 'model.adversarial', 'model.adversarial', (['tf_image_input_scaled'], {'training': '(False)', 'scope': 'adv_scope'}), '(tf_image_input_scaled, training=False, scope=adv_scope)\n', (3598, 3654), False, 'import model\n'), ((4529, 4561), 'utils.fftc.ifftc', 'fftc.ifftc', (['kspace_input'], {'axis': '(1)'}), '(kspace_input, axis=1)\n', (4539, 4561), False, 'from utils import fftc\n'), ((5727, 5788), 'numpy.zeros', 'np.zeros', (['((pad,) + kspace_input_batch.shape[1:])', 'np.complex64'], {}), '((pad,) + kspace_input_batch.shape[1:], np.complex64)\n', (5735, 5788), True, 'import numpy as np\n'), ((5862, 5915), 'numpy.concatenate', 'np.concatenate', (['(kspace_input_batch, zeropad)'], {'axis': '(0)'}), '((kspace_input_batch, zeropad), axis=0)\n', (5876, 5915), True, 'import numpy as np\n'), ((5963, 6026), 'numpy.zeros', 'np.zeros', (['((pad,) + sensemap_input_batch.shape[1:])', 'np.complex64'], {}), '((pad,) + sensemap_input_batch.shape[1:], np.complex64)\n', (5971, 6026), True, 'import numpy as np\n'), ((6102, 6157), 'numpy.concatenate', 'np.concatenate', (['(sensemap_input_batch, zeropad)'], {'axis': '(0)'}), '((sensemap_input_batch, zeropad), axis=0)\n', (6116, 6157), True, 'import numpy as np\n'), ((6642, 6669), 'utils.mri.sumofsq', 'mri.sumofsq', (['imout'], {'axis': '(-1)'}), '(imout, axis=-1)\n', (6653, 6669), False, 'from utils import mri\n'), ((6768, 6853), 'numpy.concatenate', 'np.concatenate', (['(image_input[x_start, :, :], image_output[x_start, :, :])'], {'axis': '(1)'}), '((image_input[x_start, :, :], image_output[x_start, :, :]),\n axis=1)\n', (6782, 6853), True, 'import numpy as np\n'), ((6924, 7049), 'numpy.concatenate', 'np.concatenate', (['(image_input[:, :, image_input.shape[-1] // 2], image_output[:, :, \n image_output.shape[-1] // 2])'], {'axis': '(1)'}), '((image_input[:, :, image_input.shape[-1] // 2], image_output\n [:, :, image_output.shape[-1] // 2]), axis=1)\n', (6938, 7049), True, 'import numpy as np\n'), ((7123, 7139), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (7136, 7139), False, 'from matplotlib import pyplot\n'), ((7156, 7179), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (7170, 7179), False, 'from matplotlib import pyplot\n'), ((7196, 7240), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image_axial_disp'], {'cmap': '"""gray"""'}), "(image_axial_disp, cmap='gray')\n", (7209, 7240), False, 'from matplotlib import pyplot\n'), ((7257, 7275), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (7268, 7275), False, 'from matplotlib import pyplot\n'), ((7364, 7387), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (7378, 7387), False, 'from matplotlib import pyplot\n'), ((7404, 7446), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image_sag_disp'], {'cmap': '"""gray"""'}), "(image_sag_disp, cmap='gray')\n", (7417, 7446), False, 'from matplotlib import pyplot\n'), ((7463, 7481), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (7474, 7481), False, 'from matplotlib import pyplot\n'), ((7498, 7516), 'matplotlib.pyplot.pause', 'pyplot.pause', (['(0.01)'], {}), '(0.01)\n', (7510, 7516), False, 'from matplotlib import pyplot\n'), ((8801, 8833), 'utils.fftc.ifftc', 'fftc.ifftc', (['kspace_input'], {'axis': '(1)'}), '(kspace_input, axis=1)\n', (8811, 8833), False, 'from utils import fftc\n'), ((9999, 10060), 'numpy.zeros', 'np.zeros', (['((pad,) + kspace_input_batch.shape[1:])', 'np.complex64'], {}), '((pad,) + kspace_input_batch.shape[1:], np.complex64)\n', (10007, 10060), True, 'import numpy as np\n'), ((10134, 10187), 'numpy.concatenate', 'np.concatenate', (['(kspace_input_batch, zeropad)'], {'axis': '(0)'}), '((kspace_input_batch, zeropad), axis=0)\n', (10148, 10187), True, 'import numpy as np\n'), ((10235, 10298), 'numpy.zeros', 'np.zeros', (['((pad,) + sensemap_input_batch.shape[1:])', 'np.complex64'], {}), '((pad,) + sensemap_input_batch.shape[1:], np.complex64)\n', (10243, 10298), True, 'import numpy as np\n'), ((10374, 10429), 'numpy.concatenate', 'np.concatenate', (['(sensemap_input_batch, zeropad)'], {'axis': '(0)'}), '((sensemap_input_batch, zeropad), axis=0)\n', (10388, 10429), True, 'import numpy as np\n'), ((10752, 10816), 'numpy.zeros', 'np.zeros', (['((kspace_input.shape[0],) + out.shape[1:])', 'np.complex64'], {}), '((kspace_input.shape[0],) + out.shape[1:], np.complex64)\n', (10760, 10816), True, 'import numpy as np\n'), ((11021, 11048), 'utils.mri.sumofsq', 'mri.sumofsq', (['imout'], {'axis': '(-1)'}), '(imout, axis=-1)\n', (11032, 11048), False, 'from utils import mri\n'), ((11654, 11670), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (11667, 11670), False, 'from matplotlib import pyplot\n'), ((11687, 11710), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (11701, 11710), False, 'from matplotlib import pyplot\n'), ((11727, 11771), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image_axial_disp'], {'cmap': '"""gray"""'}), "(image_axial_disp, cmap='gray')\n", (11740, 11771), False, 'from matplotlib import pyplot\n'), ((11788, 11806), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (11799, 11806), False, 'from matplotlib import pyplot\n'), ((11895, 11918), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (11909, 11918), False, 'from matplotlib import pyplot\n'), ((11935, 11977), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image_sag_disp'], {'cmap': '"""gray"""'}), "(image_sag_disp, cmap='gray')\n", (11948, 11977), False, 'from matplotlib import pyplot\n'), ((11994, 12012), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (12005, 12012), False, 'from matplotlib import pyplot\n'), ((12029, 12047), 'matplotlib.pyplot.pause', 'pyplot.pause', (['(0.01)'], {}), '(0.01)\n', (12041, 12047), False, 'from matplotlib import pyplot\n'), ((2529, 2543), 'tensorflow.sqrt', 'tf.sqrt', (['scale'], {}), '(scale)\n', (2536, 2543), True, 'import tensorflow as tf\n'), ((6585, 6608), 'utils.fftc.ifftc', 'fftc.ifftc', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (6595, 6608), False, 'from utils import fftc\n'), ((10964, 10987), 'utils.fftc.ifftc', 'fftc.ifftc', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (10974, 10987), False, 'from utils import fftc\n'), ((2308, 2321), 'tensorflow.abs', 'tf.abs', (['scale'], {}), '(scale)\n', (2314, 2321), True, 'import tensorflow as tf\n'), ((11286, 11306), 'numpy.max', 'np.max', (['image_output'], {}), '(image_output)\n', (11292, 11306), True, 'import numpy as np\n'), ((11588, 11608), 'numpy.max', 'np.max', (['image_output'], {}), '(image_output)\n', (11594, 11608), True, 'import numpy as np\n'), ((11213, 11232), 'numpy.max', 'np.max', (['image_input'], {}), '(image_input)\n', (11219, 11232), True, 'import numpy as np\n'), ((11475, 11494), 'numpy.max', 'np.max', (['image_input'], {}), '(image_input)\n', (11481, 11494), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Kwik GUI."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
from pathlib import Path
import shutil
from tempfile import TemporaryDirectory
import numpy as np
from phylib.stats.clusters import get_waveform_amplitude
from phylib.utils import Bunch, connect
from phy.utils.context import Context
from phy.gui import create_app, run_app
from ..base import WaveformMixin, FeatureMixin, TraceMixin, BaseController
from phy.cluster.supervisor import Supervisor
logger = logging.getLogger(__name__)
try:
from klusta.kwik import KwikModel
from klusta.launch import cluster
except ImportError: # pragma: no cover
logger.debug("Package klusta not installed: the KwikGUI will not work.")
#------------------------------------------------------------------------------
# Kwik GUI
#------------------------------------------------------------------------------
def _backup(path):
"""Backup a file."""
assert path.exists()
path_backup = str(path) + '.bak'
if not Path(path_backup).exists():
logger.info("Backup `%s`.", path_backup)
shutil.copy(path, path_backup)
class KwikModelGUI(KwikModel):
@property
def features(self):
return self.all_features
def get_features(self, spike_ids, channel_ids):
return self.all_features[spike_ids][:, channel_ids, :]
def get_waveforms(self, spike_ids, channel_ids):
return self.all_waveforms[spike_ids][:, channel_ids, :]
class KwikController(WaveformMixin, FeatureMixin, TraceMixin, BaseController):
"""Controller for the Kwik GUI.
Constructor
-----------
kwik_path : str or Path
Path to the kwik file
channel_group : int
The default channel group to load
clustering : str
The default clustering to load
config_dir : str or Path
Path to the configuration directory
model : Model
Model object, optional (it is automatically created otherwise)
plugins : list
List of plugins to manually activate, optional (the plugins are automatically loaded from
the user configuration directory).
clear_cache : boolean
Whether to clear the cache on startup.
enable_threading : boolean
Whether to enable threading in the views when selecting clusters.
"""
gui_name = 'KwikGUI'
# Classes to load by default, in that order. The view refresh follows the same order
# when the cluster selection changes.
default_views = (
'CorrelogramView',
'ISIView',
'WaveformView',
'FeatureView',
'AmplitudeView',
'FiringRateView',
'TraceView',
)
def __init__(self, kwik_path=None, **kwargs):
assert kwik_path
kwik_path = Path(kwik_path)
dir_path = kwik_path.parent
self.channel_group = kwargs.get('channel_group', None)
self.clustering = kwargs.get('clustering', None)
super(KwikController, self).__init__(kwik_path=kwik_path, dir_path=dir_path, **kwargs)
# Internal methods
# -------------------------------------------------------------------------
def _set_cache(self, clear_cache=None):
"""Set up the cache, clear it if required, and create the Context instance."""
self.cache_dir = self.dir_path / '.phy'
if self.channel_group is not None:
self.cache_dir = self.cache_dir / str(self.channel_group)
if clear_cache:
logger.warn("Deleting the cache directory %s.", self.cache_dir)
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.context = Context(self.cache_dir)
def _create_model(self, **kwargs):
kwik_path = kwargs.get('kwik_path')
_backup(kwik_path)
kwargs = {k: v for k, v in kwargs.items() if k in ('clustering', 'channel_group')}
return KwikModelGUI(str(kwik_path), **kwargs)
def _set_supervisor(self):
"""Create the Supervisor instance."""
# Load the new cluster id.
new_cluster_id = self.context.load('new_cluster_id').get('new_cluster_id', None)
# Cluster groups.
cluster_groups = self.model.cluster_groups
# Create the Supervisor instance.
supervisor = Supervisor(
spike_clusters=self.model.spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=self.cluster_metrics,
similarity=self.similarity_functions[self.similarity],
new_cluster_id=new_cluster_id,
context=self.context,
)
# Connect the `save_clustering` event raised by the supervisor when saving
# to the model's saving functions.
connect(self.on_save_clustering, sender=supervisor)
@connect(sender=supervisor)
def on_attach_gui(sender):
@supervisor.actions.add(shortcut='shift+ctrl+k', set_busy=True)
def recluster(cluster_ids=None):
"""Relaunch KlustaKwik on the selected clusters."""
# Selected clusters.
cluster_ids = supervisor.selected
spike_ids = self.selector(None, cluster_ids)
logger.info("Running KlustaKwik on %d spikes.", len(spike_ids))
# Run KK2 in a temporary directory to avoid side effects.
n = 10
with TemporaryDirectory() as tempdir:
spike_clusters, metadata = cluster(
self.model,
spike_ids,
num_starting_clusters=n,
tempdir=tempdir,
)
self.supervisor.split(spike_ids, spike_clusters)
self.supervisor = supervisor
def _get_masks(self, cluster_id):
spike_ids = self.selector(self.n_spikes_waveforms, [cluster_id])
if self.model.all_masks is None:
return np.ones((self.n_spikes_waveforms, self.model.n_channels))
return self.model.all_masks[spike_ids]
def _get_mean_masks(self, cluster_id):
return np.mean(self._get_masks(cluster_id), axis=0)
def _get_waveforms(self, cluster_id):
"""Return a selection of waveforms for a cluster."""
pos = self.model.channel_positions
spike_ids = self.selector(self.n_spikes_waveforms, [cluster_id])
data = self.model.all_waveforms[spike_ids]
mm = self._get_mean_masks(cluster_id)
mw = np.mean(data, axis=0)
amp = get_waveform_amplitude(mm, mw)
masks = self._get_masks(cluster_id)
# Find the best channels.
channel_ids = np.argsort(amp)[::-1]
return Bunch(
data=data[..., channel_ids],
channel_ids=channel_ids,
channel_positions=pos[channel_ids],
masks=masks[:, channel_ids],
)
def _get_mean_waveforms(self, cluster_id):
b = self._get_waveforms(cluster_id).copy()
b.data = np.mean(b.data, axis=0)[np.newaxis, ...]
b.masks = np.mean(b.masks, axis=0)[np.newaxis, ...] ** .1
b['alpha'] = 1.
return b
# Public methods
# -------------------------------------------------------------------------
def get_best_channels(self, cluster_id):
"""Get the best channels of a given cluster."""
mm = self._get_mean_masks(cluster_id)
channel_ids = np.argsort(mm)[::-1]
ind = mm[channel_ids] > .1
if np.sum(ind) > 0:
channel_ids = channel_ids[ind]
else: # pragma: no cover
channel_ids = channel_ids[:4]
return channel_ids
def get_channel_amplitudes(self, cluster_id):
"""Return the channel amplitudes of the best channels of a given cluster."""
channel_ids = self.get_best_channels(cluster_id)
return channel_ids, np.ones(len(channel_ids))
def on_save_clustering(self, sender, spike_clusters, groups, *labels):
"""Save the modified data."""
groups = {c: g.title() for c, g in groups.items()}
self.model.save(spike_clusters, groups)
self._save_cluster_info()
#------------------------------------------------------------------------------
# Kwik commands
#------------------------------------------------------------------------------
def kwik_gui(path, channel_group=None, clustering=None, **kwargs): # pragma: no cover
"""Launch the Kwik GUI."""
assert path
create_app()
controller = KwikController(
path, channel_group=channel_group, clustering=clustering, **kwargs)
gui = controller.create_gui()
gui.show()
run_app()
gui.close()
def kwik_describe(path, channel_group=None, clustering=None):
"""Describe a template dataset."""
assert path
KwikModel(path, channel_group=channel_group, clustering=clustering).describe()
| [
"logging.getLogger",
"numpy.mean",
"phylib.utils.Bunch",
"tempfile.TemporaryDirectory",
"phy.utils.context.Context",
"numpy.ones",
"pathlib.Path",
"klusta.launch.cluster",
"shutil.rmtree",
"numpy.argsort",
"numpy.sum",
"phy.cluster.supervisor.Supervisor",
"phy.gui.run_app",
"shutil.copy",
... | [((636, 663), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (653, 663), False, 'import logging\n'), ((8539, 8551), 'phy.gui.create_app', 'create_app', ([], {}), '()\n', (8549, 8551), False, 'from phy.gui import create_app, run_app\n'), ((8714, 8723), 'phy.gui.run_app', 'run_app', ([], {}), '()\n', (8721, 8723), False, 'from phy.gui import create_app, run_app\n'), ((1239, 1269), 'shutil.copy', 'shutil.copy', (['path', 'path_backup'], {}), '(path, path_backup)\n', (1250, 1269), False, 'import shutil\n'), ((2898, 2913), 'pathlib.Path', 'Path', (['kwik_path'], {}), '(kwik_path)\n', (2902, 2913), False, 'from pathlib import Path\n'), ((3747, 3770), 'phy.utils.context.Context', 'Context', (['self.cache_dir'], {}), '(self.cache_dir)\n', (3754, 3770), False, 'from phy.utils.context import Context\n'), ((4372, 4615), 'phy.cluster.supervisor.Supervisor', 'Supervisor', ([], {'spike_clusters': 'self.model.spike_clusters', 'cluster_groups': 'cluster_groups', 'cluster_metrics': 'self.cluster_metrics', 'similarity': 'self.similarity_functions[self.similarity]', 'new_cluster_id': 'new_cluster_id', 'context': 'self.context'}), '(spike_clusters=self.model.spike_clusters, cluster_groups=\n cluster_groups, cluster_metrics=self.cluster_metrics, similarity=self.\n similarity_functions[self.similarity], new_cluster_id=new_cluster_id,\n context=self.context)\n', (4382, 4615), False, 'from phy.cluster.supervisor import Supervisor\n'), ((4820, 4871), 'phylib.utils.connect', 'connect', (['self.on_save_clustering'], {'sender': 'supervisor'}), '(self.on_save_clustering, sender=supervisor)\n', (4827, 4871), False, 'from phylib.utils import Bunch, connect\n'), ((4882, 4908), 'phylib.utils.connect', 'connect', ([], {'sender': 'supervisor'}), '(sender=supervisor)\n', (4889, 4908), False, 'from phylib.utils import Bunch, connect\n'), ((6566, 6587), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6573, 6587), True, 'import numpy as np\n'), ((6602, 6632), 'phylib.stats.clusters.get_waveform_amplitude', 'get_waveform_amplitude', (['mm', 'mw'], {}), '(mm, mw)\n', (6624, 6632), False, 'from phylib.stats.clusters import get_waveform_amplitude\n'), ((6770, 6898), 'phylib.utils.Bunch', 'Bunch', ([], {'data': 'data[..., channel_ids]', 'channel_ids': 'channel_ids', 'channel_positions': 'pos[channel_ids]', 'masks': 'masks[:, channel_ids]'}), '(data=data[..., channel_ids], channel_ids=channel_ids,\n channel_positions=pos[channel_ids], masks=masks[:, channel_ids])\n', (6775, 6898), False, 'from phylib.utils import Bunch, connect\n'), ((3674, 3723), 'shutil.rmtree', 'shutil.rmtree', (['self.cache_dir'], {'ignore_errors': '(True)'}), '(self.cache_dir, ignore_errors=True)\n', (3687, 3723), False, 'import shutil\n'), ((6027, 6084), 'numpy.ones', 'np.ones', (['(self.n_spikes_waveforms, self.model.n_channels)'], {}), '((self.n_spikes_waveforms, self.model.n_channels))\n', (6034, 6084), True, 'import numpy as np\n'), ((6733, 6748), 'numpy.argsort', 'np.argsort', (['amp'], {}), '(amp)\n', (6743, 6748), True, 'import numpy as np\n'), ((7070, 7093), 'numpy.mean', 'np.mean', (['b.data'], {'axis': '(0)'}), '(b.data, axis=0)\n', (7077, 7093), True, 'import numpy as np\n'), ((7490, 7504), 'numpy.argsort', 'np.argsort', (['mm'], {}), '(mm)\n', (7500, 7504), True, 'import numpy as np\n'), ((7557, 7568), 'numpy.sum', 'np.sum', (['ind'], {}), '(ind)\n', (7563, 7568), True, 'import numpy as np\n'), ((8863, 8930), 'klusta.kwik.KwikModel', 'KwikModel', (['path'], {'channel_group': 'channel_group', 'clustering': 'clustering'}), '(path, channel_group=channel_group, clustering=clustering)\n', (8872, 8930), False, 'from klusta.kwik import KwikModel\n'), ((1154, 1171), 'pathlib.Path', 'Path', (['path_backup'], {}), '(path_backup)\n', (1158, 1171), False, 'from pathlib import Path\n'), ((7129, 7153), 'numpy.mean', 'np.mean', (['b.masks'], {'axis': '(0)'}), '(b.masks, axis=0)\n', (7136, 7153), True, 'import numpy as np\n'), ((5480, 5500), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (5498, 5500), False, 'from tempfile import TemporaryDirectory\n'), ((5560, 5632), 'klusta.launch.cluster', 'cluster', (['self.model', 'spike_ids'], {'num_starting_clusters': 'n', 'tempdir': 'tempdir'}), '(self.model, spike_ids, num_starting_clusters=n, tempdir=tempdir)\n', (5567, 5632), False, 'from klusta.launch import cluster\n')] |
import numpy as np
import pymc3 as pm
from sklearn.metrics import r2_score
import theano
import theano.tensor as T
from pymc3_models.exc import PyMC3ModelsError
from pymc3_models.models import BayesianModel
class LinearRegression(BayesianModel):
"""
Linear Regression built using PyMC3.
"""
def __init__(self):
super(LinearRegression, self).__init__()
def create_model(self):
"""
Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the training data. Otherwise, setting the shared variables later will raise an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
the PyMC3 model
"""
model_input = theano.shared(np.zeros([self.num_training_samples, self.num_pred]))
model_output = theano.shared(np.zeros(self.num_training_samples))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
}
model = pm.Model()
with model:
alpha = pm.Normal('alpha', mu=0, sd=100, shape=(1))
betas = pm.Normal('betas', mu=0, sd=100, shape=(1, self.num_pred))
s = pm.HalfNormal('s', tau=1)
mean = alpha + T.sum(betas * model_input, 1)
y = pm.Normal('y', mu=mean, sd=s, observed=model_output)
return model
def fit(self, X, y, inference_type='advi', minibatch_size=None, inference_args=None):
"""
Train the Linear Regression model
Parameters
----------
X : numpy array, shape [n_samples, n_features]
y : numpy array, shape [n_samples, ]
inference_type : string, specifies which inference method to call. Defaults to 'advi'. Currently, only 'advi' and 'nuts' are supported
minibatch_size : number of samples to include in each minibatch for ADVI, defaults to None, so minibatch is not run by default
inference_args : dict, arguments to be passed to the inference methods. Check the PyMC3 docs for permissable values. If no arguments are specified, default values will be set.
"""
self.num_training_samples, self.num_pred = X.shape
self.inference_type = inference_type
if y.ndim != 1:
y = np.squeeze(y)
if not inference_args:
inference_args = self._set_default_inference_args()
if self.cached_model is None:
self.cached_model = self.create_model()
if minibatch_size:
with self.cached_model:
minibatches = {
self.shared_vars['model_input']: pm.Minibatch(X, batch_size=minibatch_size),
self.shared_vars['model_output']: pm.Minibatch(y, batch_size=minibatch_size),
}
inference_args['more_replacements'] = minibatches
else:
self._set_shared_vars({'model_input': X, 'model_output': y})
self._inference(inference_type, inference_args)
return self
def predict(self, X, return_std=False):
"""
Predicts values of new data with a trained Linear Regression model
Parameters
----------
X : numpy array, shape [n_samples, n_features]
return_std : Boolean flag of whether to return standard deviations with mean values. Defaults to False.
"""
if self.trace is None:
raise PyMC3ModelsError('Run fit on the model before predict.')
num_samples = X.shape[0]
if self.cached_model is None:
self.cached_model = self.create_model()
self._set_shared_vars({'model_input': X, 'model_output': np.zeros(num_samples)})
ppc = pm.sample_ppc(self.trace, model=self.cached_model, samples=2000)
if return_std:
return ppc['y'].mean(axis=0), ppc['y'].std(axis=0)
else:
return ppc['y'].mean(axis=0)
def score(self, X, y):
"""
Scores new data with a trained model.
Parameters
----------
X : numpy array, shape [n_samples, n_features]
y : numpy array, shape [n_samples, ]
"""
return r2_score(y, self.predict(X))
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(LinearRegression, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(LinearRegression, self).load(file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
| [
"pymc3_models.exc.PyMC3ModelsError",
"pymc3.sample_ppc",
"theano.tensor.sum",
"pymc3.HalfNormal",
"pymc3.Minibatch",
"numpy.squeeze",
"numpy.zeros",
"pymc3.Model",
"pymc3.Normal"
] | [((1044, 1054), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1052, 1054), True, 'import pymc3 as pm\n'), ((3750, 3814), 'pymc3.sample_ppc', 'pm.sample_ppc', (['self.trace'], {'model': 'self.cached_model', 'samples': '(2000)'}), '(self.trace, model=self.cached_model, samples=2000)\n', (3763, 3814), True, 'import pymc3 as pm\n'), ((776, 828), 'numpy.zeros', 'np.zeros', (['[self.num_training_samples, self.num_pred]'], {}), '([self.num_training_samples, self.num_pred])\n', (784, 828), True, 'import numpy as np\n'), ((868, 903), 'numpy.zeros', 'np.zeros', (['self.num_training_samples'], {}), '(self.num_training_samples)\n', (876, 903), True, 'import numpy as np\n'), ((1096, 1137), 'pymc3.Normal', 'pm.Normal', (['"""alpha"""'], {'mu': '(0)', 'sd': '(100)', 'shape': '(1)'}), "('alpha', mu=0, sd=100, shape=1)\n", (1105, 1137), True, 'import pymc3 as pm\n'), ((1160, 1218), 'pymc3.Normal', 'pm.Normal', (['"""betas"""'], {'mu': '(0)', 'sd': '(100)', 'shape': '(1, self.num_pred)'}), "('betas', mu=0, sd=100, shape=(1, self.num_pred))\n", (1169, 1218), True, 'import pymc3 as pm\n'), ((1236, 1261), 'pymc3.HalfNormal', 'pm.HalfNormal', (['"""s"""'], {'tau': '(1)'}), "('s', tau=1)\n", (1249, 1261), True, 'import pymc3 as pm\n'), ((1337, 1389), 'pymc3.Normal', 'pm.Normal', (['"""y"""'], {'mu': 'mean', 'sd': 's', 'observed': 'model_output'}), "('y', mu=mean, sd=s, observed=model_output)\n", (1346, 1389), True, 'import pymc3 as pm\n'), ((2320, 2333), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (2330, 2333), True, 'import numpy as np\n'), ((3463, 3519), 'pymc3_models.exc.PyMC3ModelsError', 'PyMC3ModelsError', (['"""Run fit on the model before predict."""'], {}), "('Run fit on the model before predict.')\n", (3479, 3519), False, 'from pymc3_models.exc import PyMC3ModelsError\n'), ((1290, 1319), 'theano.tensor.sum', 'T.sum', (['(betas * model_input)', '(1)'], {}), '(betas * model_input, 1)\n', (1295, 1319), True, 'import theano.tensor as T\n'), ((3711, 3732), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (3719, 3732), True, 'import numpy as np\n'), ((2670, 2712), 'pymc3.Minibatch', 'pm.Minibatch', (['X'], {'batch_size': 'minibatch_size'}), '(X, batch_size=minibatch_size)\n', (2682, 2712), True, 'import pymc3 as pm\n'), ((2768, 2810), 'pymc3.Minibatch', 'pm.Minibatch', (['y'], {'batch_size': 'minibatch_size'}), '(y, batch_size=minibatch_size)\n', (2780, 2810), True, 'import pymc3 as pm\n')] |
# -*- coding: utf-8 -*-
"""
mosaicRaster.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2021-01-12'
__copyright__ = '(C) 2021, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal #https://gdal.org/python/
from itertools import combinations
from matplotlib import path
import numpy as np
from pyproj.crs import CRS
from math import floor, ceil
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.dip import Interpolar
import os
from qgis.PyQt.QtGui import QIcon
class MosaicRaster(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return MosaicRaster()
def name(self):
return 'mosaicraster'
def displayName(self):
return self.tr('Mosaic raster', 'Mosaicar raster')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('mosaic,merge,raster,combine,mosaik,mosaico,mesclar').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = 'Creates raster mosaic: a combination or merge of two or more images.'
txt_pt = 'Cria um mosaico: uma combinação ou mesclagem de duas ou mais imagens.'
figure = 'images/tutorial/raster_mosaic.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
RASTERLIST ='RASTERLIST'
CHANGERESOLUTION = 'CHANGERESOLUTION'
RESOLUTION = 'RESOLUTION'
OVERLAP = 'OVERLAP'
NULLVALUE = 'NULLVALUE'
RESAMPLING = 'RESAMPLING'
CLIP = 'CLIP'
FRAME = 'FRAME'
MOSAIC = 'MOSAIC'
OPEN = 'OPEN'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.RASTERLIST,
self.tr('Raster List', 'Lista de Rasters'),
layerType = QgsProcessing.TypeRaster
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.CHANGERESOLUTION,
self.tr('Change resolution', 'Alterar resolução'),
defaultValue = False
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.RESOLUTION,
self.tr('New Resolution (meters)', 'Nova resolução espacial (metros)'),
type =1, #Double = 1 and Integer = 0
defaultValue = 100,
optional = True
)
)
sobrep = [self.tr('First (faster)', 'Primeiro (mais rápido)'),
self.tr('Average', 'Média'),
self.tr('Median', 'Mediana'),
self.tr('Maximum', 'Máximo'),
self.tr('Minimum', 'Mínimo')]
self.addParameter(
QgsProcessingParameterEnum(
self.OVERLAP,
self.tr('Ovelap', 'Sobreposição'),
options = sobrep,
defaultValue= 0
)
)
interp = [self.tr('Nearest neighbor', 'Vizinho mais próximo'),
self.tr('Bilinear'),
self.tr('Bicubic', 'Bicúbica')]
self.addParameter(
QgsProcessingParameterEnum(
self.RESAMPLING,
self.tr('Interpolation', 'Interpolação'),
options = interp,
defaultValue= 0
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.NULLVALUE,
self.tr('Null value', 'Valor nulo'),
type =0, #Double = 1 and Integer = 0
defaultValue = 0
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.CLIP,
self.tr('Clip by frame', 'Cortar pela moldura'),
defaultValue = False
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.FRAME,
self.tr('Frame', 'Moldura'),
[QgsProcessing.TypeVectorPolygon],
optional = True
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFileDestination(
self.MOSAIC,
self.tr('Mosaic', 'Mosaico'),
fileFilter = 'GeoTIFF (*.tif)'
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Load mosaic', 'Carregar mosaico'),
defaultValue= True
)
)
def processAlgorithm(self, parameters, context, feedback):
# inputs
rasters = self.parameterAsLayerList(
parameters,
self.RASTERLIST,
context
)
if rasters is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.RASTERLIST))
reamostragem = self.parameterAsEnum(
parameters,
self.RESAMPLING,
context
)
reamostragem = ['nearest','bilinear','bicubic'][reamostragem]
sobrep = self.parameterAsEnum(
parameters,
self.OVERLAP,
context
)
muda_res = self.parameterAsBool(
parameters,
self.CHANGERESOLUTION,
context
)
resolucao = self.parameterAsDouble(
parameters,
self.RESOLUTION,
context
)
valor_nulo = self.parameterAsDouble(
parameters,
self.NULLVALUE,
context
)
moldura = self.parameterAsDouble(
parameters,
self.CLIP,
context
)
if moldura:
vlayer = self.parameterAsVectorLayer(
parameters,
self.FRAME,
context
)
if vlayer is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.FRAME))
# output
Output = self.parameterAsFileOutput(
parameters,
self.MOSAIC,
context
)
Carregar = self.parameterAsBool(
parameters,
self.OPEN,
context
)
lista = []
for raster_lyr in rasters:
lista += [raster_lyr.dataProvider().dataSourceUri()]
if len(lista) < 1:
raise QgsProcessingException(self.tr('At least one raster must be selected!', 'Pelo menos um raster deve ser selecionado!'))
if len(lista) == 1:
sobrep = 0 # apenas um raster (sem sobreposicao)
# Gerar geometria para cada raster
geoms = []
SRC = []
n_bands =[]
GDT = []
nulos = []
XRES, YRES = [], []
for item in lista:
image = gdal.Open(item)
SRC += [QgsCoordinateReferenceSystem(image.GetProjection())] # wkt
ulx, xres, xskew, uly, yskew, yres = image.GetGeoTransform()
cols = image.RasterXSize
rows = image.RasterYSize
n_bands += [image.RasterCount]
GDT += [image.GetRasterBand(1).DataType]
nulos += [image.GetRasterBand(1).GetNoDataValue()]
XRES += [xres]
YRES += [yres]
image=None # Close image
# Creating BBox
coord = [[QgsPointXY(ulx, uly),
QgsPointXY(ulx+cols*xres, uly),
QgsPointXY(ulx+cols*xres, uly+rows*yres),
QgsPointXY(ulx, uly+rows*yres),
QgsPointXY(ulx, uly)]]
geom = QgsGeometry.fromPolygonXY(coord)
geoms += [geom]
## Validar dados de entrada
# Mesmo numero de bandas
if not n_bands.count(n_bands[0]) == len(n_bands):
raise QgsProcessingException(self.tr('The images must have the same number of bands!', 'As imagens devem ter o mesmo número de bandas!'))
# Mesmo SRC
if not SRC.count(SRC[0]) == len(SRC):
raise QgsProcessingException(self.tr('The images must have the same CRS!', 'As imagens devem ter o mesmo SRC!'))
# Mesmo GDT
if not GDT.count(GDT[0]) == len(GDT):
raise QgsProcessingException(self.tr('The images must have the same data type!', 'As imagens devem ter o tipo de dado!'))
# Mesmo valor nulo
if not nulos.count(nulos[0]) == len(nulos):
raise QgsProcessingException(self.tr('The images must have the same definied null value!', 'As imagens devem ter o mesmo valor para definir pixel nulo!'))
# Dados para o raster de saída
prj = SRC[0].toWkt()
n_bands = n_bands[0]
GDT = GDT[0]
xres = np.mean(XRES)
yres = np.mean(YRES)
NULO = valor_nulo
if valor_nulo == -1:
valor_nulo = nulos[0] if nulos[0] is not None else 0
if moldura: # Pegar extensão X e Y da moldura
# SRC da moldura deve ser o mesmo dos raster
if vlayer.sourceCrs() != QgsCoordinateReferenceSystem(prj):
raise QgsProcessingException(self.tr("The frame's CRS must be iqual to the rasters' CRS!", 'O SRC da moldura deve ser igual ao SRC dos rasters!'))
for feat in vlayer.getFeatures():
moldura_geom = feat.geometry()
break
moldura_rect = moldura_geom.boundingBox()
y_min = moldura_rect.yMinimum()
y_max = moldura_rect.yMaximum()
x_min = moldura_rect.xMinimum()
x_max = moldura_rect.xMaximum()
else: # Mesclar geometrias e obter a extensão
new_geom = QgsGeometry()
new_geom = new_geom.unaryUnion(geoms)
extensao = new_geom.boundingBox()
# Coodenadas máxima e mínima da extensão
y_min = extensao.yMinimum()
y_max = extensao.yMaximum()
x_min = extensao.xMinimum()
x_max = extensao.xMaximum()
# Transformar resolucao de metros para graus, se o SRC for Geográfico
src_qgis = QgsCoordinateReferenceSystem(prj)
if src_qgis.isGeographic():
EPSG = int(src_qgis.authid().split(':')[-1])
proj_crs = CRS.from_epsg(EPSG)
a=proj_crs.ellipsoid.semi_major_metre
f=1/proj_crs.ellipsoid.inverse_flattening
e2 = f*(2-f)
N = a/np.sqrt(1-e2*(np.sin((y_min+y_max)/2))**2) # Raio de curvatura 1º vertical
M = a*(1-e2)/(1-e2*(np.sin((y_min+y_max)/2))**2)**(3/2.) # Raio de curvatura meridiana
R = np.sqrt(M*N) # Raio médio de Gauss
theta = resolucao/R
resolucao = np.degrees(theta) # Radianos para graus
# Definir n_col, n_lin e resolucao
if moldura:
if muda_res:
n_lin = round((y_max-y_min)/abs(resolucao))
n_col = round((x_max-x_min)/abs(resolucao))
else:
n_lin = round((y_max-y_min)/abs(yres))
n_col = round((x_max-x_min)/abs(xres))
xres = (x_max-x_min)/n_col
yres = -(y_max-y_min)/n_lin
else:
if muda_res:
n_lin = round((y_max-y_min)/abs(resolucao))
n_col = round((x_max-x_min)/abs(resolucao))
xres = resolucao
yres = -resolucao
else:
n_lin = round((y_max-y_min)/abs(yres))
n_col = round((x_max-x_min)/abs(xres))
xres = (x_max-x_min)/n_col
yres = -(y_max-y_min)/n_lin
feedback.pushInfo(self.tr('Size: ', 'Tamanho: ') + str(n_lin) +'x' + str(n_col))
# Geotransform do Mosaico
ulx = x_min
uly = y_max
xskew, yskew = 0, 0
geotransform = [ulx, xres, xskew, uly, yskew, yres]
origem = (ulx, uly)
resol_X = abs(xres)
resol_Y = abs(yres)
# Numeração das Imagens
valores = list(range(1,len(lista)+1))
# Definição de áreas de varredura
feedback.pushInfo(self.tr('Defining mosaic filling areas...', 'Definindo áreas de preenchimento do mosaico...'))
# Gerar combinações dos Rasters
if sobrep != 0:
combs = []
feedback.pushInfo(self.tr('Creating combinations...', 'Gerando combinações...'))
for k in range(1,5):
combs += list(combinations(valores,k))
if feedback.isCanceled():
break
# Armazenar geometrias exclusivas de cada combinação
classes = {}
feedback.pushInfo(self.tr('Indentifying combinations...', 'Identificando combinações...'))
Percent = 100.0/(len(combs))
current = 0
for comb in combs:
if len(comb)==1:
geom1 = geoms[comb[0]-1]
lista_outras = []
for geom in geoms:
if geom1 != geom:
lista_outras += [geom]
outras = QgsGeometry()
outras = outras.unaryUnion(lista_outras)
diferença = geom1.difference(outras)
if not diferença.isEmpty():
classes[comb] = {'geom': diferença}
elif len(comb) < len(valores):
intersecao = geoms[comb[0]-1]
sentinela = True
for ind in comb[1:]:
geom = geoms[ind-1]
if geom.intersects(intersecao):
intersecao = intersecao.intersection(geom)
else:
sentinela = False
continue
lista_outras = []
for valor in valores:
if valor not in comb:
lista_outras += [geoms[valor-1]]
outras = QgsGeometry()
outras = outras.unaryUnion(lista_outras)
if sentinela:
diferença = intersecao.difference(outras)
if not diferença.isEmpty():
classes[comb] = {'geom': diferença}
else:
intersecao = geoms[comb[0]-1]
sentinela = True
for ind in comb[1:]:
geom = geoms[ind-1]
if geom.intersects(intersecao):
intersecao = intersecao.intersection(geom)
else:
sentinela = False
continue
if sentinela:
classes[comb] = {'geom': intersecao}
if feedback.isCanceled():
break
current += 1
feedback.setProgress(int(current * Percent))
else:
# Gerar geometrias por área sem cálculo de sobreposição ("first")
combs = np.array(valores)[:,np.newaxis]
classes = {}
acumulado = geoms[combs[0][0]-1]
classes[(1,)] = {'geom': acumulado}
for k in range(1, len(combs)):
comb = combs[k]
geom = geoms[comb[0]-1]
diferenca = geom.difference(acumulado)
classes[(comb[0],)] = {'geom': diferenca}
acumulado = acumulado.combine(geom)
if feedback.isCanceled():
break
# Gerar lista com os valores classificados
Percent = 100.0/(len(classes))
current = 0
cont_px = 0
for classe in classes:
feedback.pushInfo((self.tr('Classifying class {}...', 'Classificando classe {}...')).format(str(classe)))
geom = classes[classe]['geom']
if moldura:
geom = geom.intersection(moldura_geom)
if geom.type() == 2:
if geom.isMultipart():
coords = geom.asMultiPolygon()[0][0]
else:
coords = geom.asPolygon()[0]
else:
del classes[classe]
continue
caminho = []
for ponto in coords:
linha = (origem[1]-ponto.y())/resol_Y
coluna = (ponto.x() - origem[0])/resol_X
caminho += [(linha, coluna)]
p = path.Path(caminho)
box = geom.boundingBox()
uly = box.yMaximum()
lry = box.yMinimum()
ulx = box.xMinimum()
lrx = box.xMaximum()
# Limites de Varredura
row_ini = int(round((origem[1]-uly)/resol_Y - 0.5))-1
row_fim = int(round((origem[1]-lry)/resol_Y - 0.5))+1
col_ini = int(round((ulx - origem[0])/resol_X - 0.5))-1
col_fim = int(round((lrx - origem[0])/resol_X - 0.5))+1
lin, col = np.meshgrid(np.arange(row_ini, row_fim),np.arange(col_ini, col_fim))
LIN = lin.flatten()[:,np.newaxis]
COL = col.flatten()[:,np.newaxis]
pixels_center = np.hstack((LIN + 0.5, COL + 0.5)) # centro do pixel
# Verificando pixels dentro de poligono
flags = p.contains_points(pixels_center)
pixels_x = (LIN+1).flatten()*flags # soma e subtrair 1 para evitar zero
pixels_y = (COL+1).flatten()*flags
pixels_x = (pixels_x[pixels_x>0]-1)[:,np.newaxis]
pixels_y = (pixels_y[pixels_y>0]-1)[:,np.newaxis]
pixels = np.hstack((pixels_x, pixels_y))
classes[classe]['pixels'] = pixels
cont_px += len(pixels)
current += 1
feedback.setProgress(int(current * Percent))
# Criar Raster
Driver = gdal.GetDriverByName('GTiff').Create(Output, n_col, n_lin, n_bands, GDT)
Driver.SetGeoTransform(geotransform)
Driver.SetProjection(prj)
# Mosaicar por banda
Percent = 100.0/(cont_px*n_bands)
current = 0
for k in range(n_bands):
feedback.pushInfo((self.tr('Creating band {}...', 'Criando banda {}...')).format(str(k+1)))
# Criar Array do mosaico
tipo = gdal_array.GDALTypeCodeToNumericTypeCode(GDT)
inteiro = True if GDT in (gdal.GDT_Byte,
gdal.GDT_UInt16,
gdal.GDT_Int16,
gdal.GDT_UInt32,
gdal.GDT_Int32) else False
banda = np.ones((n_lin,n_col), dtype = tipo) * (int(valor_nulo) if inteiro else valor_nulo)
imgs = {}
# Para cada classe abrir banda da(s) imagem(ns)
for classe in classes:
# Deixando somente imagens a serem utilizadas
for item in valores:
if (item not in classe) and (item in imgs):
del imgs[item]
# Preenchendo dados da imagem no dicionário
for img in classe:
if img not in imgs or len(lista) == 1:
img_path = lista[img-1]
image = gdal.Open(img_path)
ulx, xres, xskew, uly, yskew, yres = image.GetGeoTransform()
img_origem = (ulx, uly)
img_resol_X = abs(xres)
img_resol_Y = abs(yres)
img_band = image.GetRasterBand(k+1).ReadAsArray()
imgs[img] = {'band': img_band,
'xres': img_resol_X,
'yres': img_resol_Y,
'origem': img_origem }
image = None
if sobrep == 0: # Se for "primeiro", interpolar apenas da primeira img da comb, caso contrário
img = classe[0]
# Para cada pixel da classe
for px in classes[classe]['pixels']:
lin,col = px
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
Interpolado = Interpolar(X, Y,
imgs[img]['band'],
imgs[img]['origem'],
imgs[img]['xres'],
imgs[img]['yres'],
reamostragem,
valor_nulo)
if Interpolado != valor_nulo:
banda[lin][col] = round(Interpolado) if inteiro else Interpolado
if feedback.isCanceled():
break
current += 1
feedback.setProgress(int(current * Percent))
else: # Para cada pixel da classe interpolar o valor da banda de cada img
for px in classes[classe]['pixels']:
lin,col = px
X = origem[0] + resol_X*(col + 0.5)
Y = origem[1] - resol_Y*(lin + 0.5)
interp_values = []
for img in imgs:
Interpolado = Interpolar(X, Y,
imgs[img]['band'],
imgs[img]['origem'],
imgs[img]['xres'],
imgs[img]['yres'],
reamostragem,
valor_nulo)
if Interpolado != valor_nulo:
interp_values += [Interpolado]
# Calcular o valor agregado (0:first, 1:average, 2:median, 3:min, 4:max) e inserir na banda (se byte, arredondar)
if interp_values:
if sobrep == 1:
result = np.mean(interp_values)
elif sobrep == 2:
result = np.median(interp_values)
elif sobrep == 3:
result = np.min(interp_values)
elif sobrep == 4:
result = np.max(interp_values)
banda[lin][col] = round(result) if inteiro else result
if feedback.isCanceled():
break
current += 1
feedback.setProgress(int(current * Percent))
# Salvar banda
outband = Driver.GetRasterBand(k+1)
feedback.pushInfo(self.tr('Writing Band {}...'.format(k+1), 'Escrevendo Banda {}...'.format(k+1)))
outband.WriteArray(banda)
if NULO != -1:
outband.SetNoDataValue(valor_nulo)
# Salvar e Fechar Raster
Driver.FlushCache() # Escrever no disco
Driver = None # Salvar e fechar
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('<NAME> - Cartographic Engineer', '<NAME> - Eng Cart'))
self.CAMINHO = Output
self.CARREGAR = Carregar
return {self.MOSAIC: Output}
# Carregamento de arquivo de saída
def postProcessAlgorithm(self, context, feedback):
if self.CARREGAR:
rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Mosaic', 'Mosaico'))
QgsProject.instance().addMapLayer(rlayer)
return {}
| [
"osgeo.gdal.Open",
"PyQt5.QtCore.QCoreApplication.translate",
"numpy.sqrt",
"numpy.hstack",
"qgis.core.QgsApplication.locale",
"numpy.array",
"qgis.core.QgsGeometry.fromPolygonXY",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"matplotlib.path.Path",
"lftools.geocapt.dip.Interpolar",
"numpy.max... | [((2584, 2607), 'qgis.core.QgsApplication.locale', 'QgsApplication.locale', ([], {}), '()\n', (2605, 2607), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((2661, 2709), 'PyQt5.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['"""Processing"""', 'string'], {}), "('Processing', string)\n", (2687, 2709), False, 'from PyQt5.QtCore import QCoreApplication, QVariant\n'), ((11788, 11801), 'numpy.mean', 'np.mean', (['XRES'], {}), '(XRES)\n', (11795, 11801), True, 'import numpy as np\n'), ((11817, 11830), 'numpy.mean', 'np.mean', (['YRES'], {}), '(YRES)\n', (11824, 11830), True, 'import numpy as np\n'), ((13141, 13174), 'qgis.core.QgsCoordinateReferenceSystem', 'QgsCoordinateReferenceSystem', (['prj'], {}), '(prj)\n', (13169, 13174), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((3847, 3853), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (3851, 3853), False, 'from lftools.geocapt.imgs import Imgs\n'), ((9877, 9892), 'osgeo.gdal.Open', 'gdal.Open', (['item'], {}), '(item)\n', (9886, 9892), False, 'from osgeo import osr, gdal_array, gdal\n'), ((10678, 10710), 'qgis.core.QgsGeometry.fromPolygonXY', 'QgsGeometry.fromPolygonXY', (['coord'], {}), '(coord)\n', (10703, 10710), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((12720, 12733), 'qgis.core.QgsGeometry', 'QgsGeometry', ([], {}), '()\n', (12731, 12733), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((13291, 13310), 'pyproj.crs.CRS.from_epsg', 'CRS.from_epsg', (['EPSG'], {}), '(EPSG)\n', (13304, 13310), False, 'from pyproj.crs import CRS\n'), ((13648, 13662), 'numpy.sqrt', 'np.sqrt', (['(M * N)'], {}), '(M * N)\n', (13655, 13662), True, 'import numpy as np\n'), ((13739, 13756), 'numpy.degrees', 'np.degrees', (['theta'], {}), '(theta)\n', (13749, 13756), True, 'import numpy as np\n'), ((19532, 19550), 'matplotlib.path.Path', 'path.Path', (['caminho'], {}), '(caminho)\n', (19541, 19550), False, 'from matplotlib import path\n'), ((20235, 20268), 'numpy.hstack', 'np.hstack', (['(LIN + 0.5, COL + 0.5)'], {}), '((LIN + 0.5, COL + 0.5))\n', (20244, 20268), True, 'import numpy as np\n'), ((20668, 20699), 'numpy.hstack', 'np.hstack', (['(pixels_x, pixels_y)'], {}), '((pixels_x, pixels_y))\n', (20677, 20699), True, 'import numpy as np\n'), ((21344, 21389), 'osgeo.gdal_array.GDALTypeCodeToNumericTypeCode', 'gdal_array.GDALTypeCodeToNumericTypeCode', (['GDT'], {}), '(GDT)\n', (21384, 21389), False, 'from osgeo import osr, gdal_array, gdal\n'), ((12100, 12133), 'qgis.core.QgsCoordinateReferenceSystem', 'QgsCoordinateReferenceSystem', (['prj'], {}), '(prj)\n', (12128, 12133), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((18123, 18140), 'numpy.array', 'np.array', (['valores'], {}), '(valores)\n', (18131, 18140), True, 'import numpy as np\n'), ((20058, 20085), 'numpy.arange', 'np.arange', (['row_ini', 'row_fim'], {}), '(row_ini, row_fim)\n', (20067, 20085), True, 'import numpy as np\n'), ((20086, 20113), 'numpy.arange', 'np.arange', (['col_ini', 'col_fim'], {}), '(col_ini, col_fim)\n', (20095, 20113), True, 'import numpy as np\n'), ((20905, 20934), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (20925, 20934), False, 'from osgeo import osr, gdal_array, gdal\n'), ((21692, 21727), 'numpy.ones', 'np.ones', (['(n_lin, n_col)'], {'dtype': 'tipo'}), '((n_lin, n_col), dtype=tipo)\n', (21699, 21727), True, 'import numpy as np\n'), ((3526, 3551), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3541, 3551), False, 'import os\n'), ((10420, 10440), 'qgis.core.QgsPointXY', 'QgsPointXY', (['ulx', 'uly'], {}), '(ulx, uly)\n', (10430, 10440), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((10464, 10498), 'qgis.core.QgsPointXY', 'QgsPointXY', (['(ulx + cols * xres)', 'uly'], {}), '(ulx + cols * xres, uly)\n', (10474, 10498), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((10518, 10566), 'qgis.core.QgsPointXY', 'QgsPointXY', (['(ulx + cols * xres)', '(uly + rows * yres)'], {}), '(ulx + cols * xres, uly + rows * yres)\n', (10528, 10566), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((10582, 10616), 'qgis.core.QgsPointXY', 'QgsPointXY', (['ulx', '(uly + rows * yres)'], {}), '(ulx, uly + rows * yres)\n', (10592, 10616), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((10636, 10656), 'qgis.core.QgsPointXY', 'QgsPointXY', (['ulx', 'uly'], {}), '(ulx, uly)\n', (10646, 10656), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((15458, 15482), 'itertools.combinations', 'combinations', (['valores', 'k'], {}), '(valores, k)\n', (15470, 15482), False, 'from itertools import combinations\n'), ((16118, 16131), 'qgis.core.QgsGeometry', 'QgsGeometry', ([], {}), '()\n', (16129, 16131), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((26941, 26962), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (26960, 26962), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((17033, 17046), 'qgis.core.QgsGeometry', 'QgsGeometry', ([], {}), '()\n', (17044, 17046), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsWkbTypes, QgsFields, QgsField, QgsFeature, QgsPointXY, QgsGeometry, QgsProcessingException, QgsProcessingAlgorithm, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterField, QgsProcessingParameterBoolean, QgsProcessingParameterCrs, QgsProcessingParameterEnum, QgsFeatureRequest, QgsExpression, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterFileDestination, QgsProcessingParameterMultipleLayers, QgsProcessingParameterVectorLayer, QgsProcessingParameterRasterLayer, QgsProcessingParameterRasterDestination, QgsApplication, QgsProject, QgsRasterLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem\n'), ((22329, 22348), 'osgeo.gdal.Open', 'gdal.Open', (['img_path'], {}), '(img_path)\n', (22338, 22348), False, 'from osgeo import osr, gdal_array, gdal\n'), ((23369, 23493), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', "imgs[img]['band']", "imgs[img]['origem']", "imgs[img]['xres']", "imgs[img]['yres']", 'reamostragem', 'valor_nulo'], {}), "(X, Y, imgs[img]['band'], imgs[img]['origem'], imgs[img]['xres'],\n imgs[img]['yres'], reamostragem, valor_nulo)\n", (23379, 23493), False, 'from lftools.geocapt.dip import Interpolar\n'), ((24547, 24671), 'lftools.geocapt.dip.Interpolar', 'Interpolar', (['X', 'Y', "imgs[img]['band']", "imgs[img]['origem']", "imgs[img]['xres']", "imgs[img]['yres']", 'reamostragem', 'valor_nulo'], {}), "(X, Y, imgs[img]['band'], imgs[img]['origem'], imgs[img]['xres'],\n imgs[img]['yres'], reamostragem, valor_nulo)\n", (24557, 24671), False, 'from lftools.geocapt.dip import Interpolar\n'), ((13472, 13499), 'numpy.sin', 'np.sin', (['((y_min + y_max) / 2)'], {}), '((y_min + y_max) / 2)\n', (13478, 13499), True, 'import numpy as np\n'), ((13565, 13592), 'numpy.sin', 'np.sin', (['((y_min + y_max) / 2)'], {}), '((y_min + y_max) / 2)\n', (13571, 13592), True, 'import numpy as np\n'), ((25372, 25394), 'numpy.mean', 'np.mean', (['interp_values'], {}), '(interp_values)\n', (25379, 25394), True, 'import numpy as np\n'), ((25482, 25506), 'numpy.median', 'np.median', (['interp_values'], {}), '(interp_values)\n', (25491, 25506), True, 'import numpy as np\n'), ((3971, 3996), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3986, 3996), False, 'import os\n'), ((25594, 25615), 'numpy.min', 'np.min', (['interp_values'], {}), '(interp_values)\n', (25600, 25615), True, 'import numpy as np\n'), ((25703, 25724), 'numpy.max', 'np.max', (['interp_values'], {}), '(interp_values)\n', (25709, 25724), True, 'import numpy as np\n')] |
import logging
from itertools import combinations
from collections import defaultdict
import numpy as np
import scipy.spatial as spatial
from opensfm import bow
from opensfm import context
logger = logging.getLogger(__name__)
def has_gps_info(exif):
return (exif and
'gps' in exif and
'latitude' in exif['gps'] and
'longitude' in exif['gps'])
def match_candidates_by_distance(images_ref, images_cand, exifs, reference,
max_neighbors, max_distance):
"""Find candidate matching pairs by GPS distance.
The GPS altitude is ignored because we want images of the same position
at different altitudes to be matched together. Otherwise, for drone
datasets, flights at different altitudes do not get matched.
"""
if max_neighbors <= 0 and max_distance <= 0:
return set()
max_neighbors = max_neighbors or 99999999
max_distance = max_distance or 99999999.
k = min(len(images_cand), max_neighbors)
points = np.zeros((len(images_cand), 3))
for i, image in enumerate(images_cand):
gps = exifs[image]['gps']
points[i] = reference.to_topocentric(
gps['latitude'], gps['longitude'], 0)
tree = spatial.cKDTree(points)
pairs = set()
for image_ref in images_ref:
nn = k+1 if image_ref in images_cand else k
gps = exifs[image_ref]['gps']
point = reference.to_topocentric(
gps['latitude'], gps['longitude'], 0)
distances, neighbors = tree.query(
point, k=nn, distance_upper_bound=max_distance)
for j in neighbors:
if j >= len(images_cand):
continue
image_cand = images_cand[j]
if image_cand != image_ref:
pairs.add(tuple(sorted((image_ref, image_cand))))
return pairs
def match_candidates_with_bow(data, images_ref, images_cand,
exifs, reference, max_neighbors,
max_gps_distance, max_gps_neighbors,
enforce_other_cameras):
"""Find candidate matching pairs using BoW-based distance.
If max_gps_distance > 0, then we use first restrain a set of
candidates using max_gps_neighbors neighbors selected using
GPS distance.
If enforce_other_cameras is True, we keep max_neighbors images
with same cameras AND max_neighbors images from any other different
camera.
"""
if max_neighbors <= 0:
return set()
# preempt candidates images using GPS
preempted_cand = {im: images_cand for im in images_ref}
if max_gps_distance > 0 or max_gps_neighbors > 0:
gps_pairs = match_candidates_by_distance(images_ref, images_cand,
exifs, reference,
max_gps_neighbors,
max_gps_distance)
preempted_cand = defaultdict(list)
for p in gps_pairs:
preempted_cand[p[0]].append(p[1])
preempted_cand[p[1]].append(p[0])
# reduce sets of images from which to load words (RAM saver)
need_load = set(preempted_cand.keys())
for v in preempted_cand.values():
need_load.update(v)
# construct BoW histograms
logger.info("Computing %d BoW histograms" % len(need_load))
histograms = load_histograms(data, need_load)
args = list(match_bow_arguments(preempted_cand, histograms))
# parralel BoW neighbors computation
processes = processes_that_fit_in_memory(data.config['processes'])
logger.info("Computing BoW candidates with %d processes" % processes)
results = context.parallel_map(match_bow_unwrap_args, args, processes)
# construct final sets of pairs to match
pairs = set()
for im, order, other in results:
if enforce_other_cameras:
pairs = pairs.union(pairs_from_neighbors(im, exifs, order, other, max_neighbors))
else:
for i in order[:max_neighbors]:
pairs.add(tuple(sorted((im, other[i]))))
return pairs
def match_bow_arguments(candidates, histograms):
""" Generate arguments for parralel processing of BoW """
for im, cands in candidates.items():
yield (im, cands, histograms)
def match_bow_unwrap_args(args):
""" Wrapper for parralel processing of BoW """
image, other_images, histograms = args
return bow_distances(image, other_images, histograms)
def match_candidates_by_time(images_ref, images_cand, exifs, max_neighbors):
"""Find candidate matching pairs by time difference."""
if max_neighbors <= 0:
return set()
k = min(len(images_cand), max_neighbors)
times = np.zeros((len(images_cand), 1))
for i, image in enumerate(images_cand):
times[i] = exifs[image]['capture_time']
tree = spatial.cKDTree(times)
pairs = set()
for image_ref in images_ref:
nn = k+1 if image_ref in images_cand else k
time = exifs[image_ref]['capture_time']
distances, neighbors = tree.query(time, k=nn)
for j in neighbors:
if j >= len(images_cand):
continue
image_cand = images_cand[j]
if image_ref != image_cand:
pairs.add(tuple(sorted((image_ref, image_cand))))
return pairs
def match_candidates_by_order(images_ref, images_cand, max_neighbors):
"""Find candidate matching pairs by sequence order."""
if max_neighbors <= 0:
return set()
n = (max_neighbors + 1) // 2
pairs = set()
for i, image_ref in enumerate(images_ref):
a = max(0, i - n)
b = min(len(images_cand), i + n)
for j in range(a, b):
image_cand = images_cand[j]
if image_ref != image_cand:
pairs.add(tuple(sorted((image_ref, images_cand))))
return pairs
def match_candidates_from_metadata(images_ref, images_cand, exifs, data):
"""Compute candidate matching pairs between between images_ref and images_cand"""
max_distance = data.config['matching_gps_distance']
gps_neighbors = data.config['matching_gps_neighbors']
time_neighbors = data.config['matching_time_neighbors']
order_neighbors = data.config['matching_order_neighbors']
bow_neighbors = data.config['matching_bow_neighbors']
bow_gps_distance = data.config['matching_bow_gps_distance']
bow_gps_neighbors = data.config['matching_bow_gps_neighbors']
bow_other_cameras = data.config['matching_bow_other_cameras']
if not data.reference_lla_exists():
data.invent_reference_lla()
reference = data.load_reference()
if not all(map(has_gps_info, exifs.values())):
if gps_neighbors != 0:
logger.warn("Not all images have GPS info. "
"Disabling matching_gps_neighbors.")
gps_neighbors = 0
max_distance = 0
images_ref.sort()
if max_distance == gps_neighbors == time_neighbors == order_neighbors == bow_neighbors == 0:
# All pair selection strategies deactivated so we match all pairs
d = set()
t = set()
o = set()
b = set()
pairs = set([tuple(sorted(i, j)) for i in images_ref for j in images_cand])
else:
d = match_candidates_by_distance(images_ref, images_cand, exifs, reference,
gps_neighbors, max_distance)
t = match_candidates_by_time(images_ref, images_cand, exifs, time_neighbors)
o = match_candidates_by_order(images_ref, images_cand, order_neighbors)
b = match_candidates_with_bow(data, images_ref, images_cand,
exifs, reference, bow_neighbors,
bow_gps_distance, bow_gps_neighbors,
bow_other_cameras)
pairs = d | t | o | b
res = {im: [] for im in images_ref}
for im1, im2 in pairs:
res[im1].append(im2)
report = {
"num_pairs_distance": len(d),
"num_pairs_time": len(t),
"num_pairs_order": len(o),
"num_pairs_bow": len(b)
}
return res, report
def bow_distances(image, other_images, histograms):
""" Compute BoW-based distance (L1 on histogram of words)
between an image and other images.
"""
if image not in histograms:
return image, [], []
distances = []
other = []
h = histograms[image]
for im2 in other_images:
if im2 != image and im2 in histograms:
h2 = histograms[im2]
distances.append(np.fabs(h - h2).sum())
other.append(im2)
return image, np.argsort(distances), other
def load_histograms(data, images):
""" Load BoW histograms of given images """
min_num_feature = 8
histograms = {}
bows = bow.load_bows(data.config)
for im in images:
words = data.load_words(im)
if words is None:
logger.error("Could not load words for image {}".format(im))
continue
mask = data.load_masks(data, im) if hasattr(data, 'load_masks') else None
filtered_words = words[mask] if mask else words
if len(filtered_words) <= min_num_feature:
logger.warning("Too few filtered features in image {}: {}".format(
im, len(filtered_words)))
continue
histograms[im] = bows.histogram(words[:, 0])
return histograms
def pairs_from_neighbors(image, exifs, order, other, max_neighbors):
"""Construct matching pairs given closest ordered neighbors.
Pairs will of form (image, im2), im2 being the closest max_neighbors
given by (order, other) having the same cameras OR the closest max_neighbors
having from any other camera.
"""
same_camera, other_cameras = [], []
for i in order:
im2 = other[i]
if exifs[im2]['camera'] == exifs[image]['camera']:
if len(same_camera) < max_neighbors:
same_camera.append(im2)
else:
if len(other_cameras) < max_neighbors:
other_cameras.append(im2)
if len(same_camera) + len(other_cameras) >= 2 * max_neighbors:
break
pairs = set()
for im2 in same_camera+other_cameras:
pairs.add(tuple(sorted((image, im2))))
return pairs
def processes_that_fit_in_memory(desired):
"""Amount of parallel BoW process that fit in memory."""
per_process_mem = 1.6 * 1024
available_mem = context.memory_available()
if available_mem is not None:
fittable = max(1, int(available_mem / per_process_mem))
return min(desired, fittable)
else:
return desired
| [
"logging.getLogger",
"numpy.fabs",
"opensfm.context.memory_available",
"scipy.spatial.cKDTree",
"opensfm.context.parallel_map",
"opensfm.bow.load_bows",
"numpy.argsort",
"collections.defaultdict"
] | [((202, 229), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (219, 229), False, 'import logging\n'), ((1245, 1268), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['points'], {}), '(points)\n', (1260, 1268), True, 'import scipy.spatial as spatial\n'), ((3712, 3772), 'opensfm.context.parallel_map', 'context.parallel_map', (['match_bow_unwrap_args', 'args', 'processes'], {}), '(match_bow_unwrap_args, args, processes)\n', (3732, 3772), False, 'from opensfm import context\n'), ((4894, 4916), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['times'], {}), '(times)\n', (4909, 4916), True, 'import scipy.spatial as spatial\n'), ((8868, 8894), 'opensfm.bow.load_bows', 'bow.load_bows', (['data.config'], {}), '(data.config)\n', (8881, 8894), False, 'from opensfm import bow\n'), ((10525, 10551), 'opensfm.context.memory_available', 'context.memory_available', ([], {}), '()\n', (10549, 10551), False, 'from opensfm import context\n'), ((2987, 3004), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2998, 3004), False, 'from collections import defaultdict\n'), ((8698, 8719), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (8708, 8719), True, 'import numpy as np\n'), ((8627, 8642), 'numpy.fabs', 'np.fabs', (['(h - h2)'], {}), '(h - h2)\n', (8634, 8642), True, 'import numpy as np\n')] |
import os
import argparse
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import numpy.random as npr
import mimo
from mimo.distributions import NormalGamma
from mimo.distributions import MatrixNormalWishart
from mimo.distributions import GaussianWithNormalGamma
from mimo.distributions import LinearGaussianWithMatrixNormalWishart
from mimo.distributions import TruncatedStickBreaking
from mimo.distributions import Dirichlet
from mimo.distributions import CategoricalWithDirichlet
from mimo.distributions import CategoricalWithStickBreaking
from mimo.mixtures import BayesianMixtureOfLinearGaussians
import matplotlib.pyplot as plt
from tqdm import tqdm
import pathos
from pathos.pools import _ProcessPool as Pool
nb_cores = pathos.multiprocessing.cpu_count()
def _job(kwargs):
args = kwargs.pop('arguments')
seed = kwargs.pop('seed')
input = kwargs.pop('train_input')
target = kwargs.pop('train_target')
input_dim = input.shape[-1]
target_dim = target.shape[-1]
# set random seed
np.random.seed(seed)
nb_params = input_dim
if args.affine:
nb_params += 1
basis_prior = []
models_prior = []
# initialize Normal
alpha_ng = 1.
beta_ng = 1. / (2 * 1e2)
kappas = 1e-2
# initialize Matrix-Normal
psi_mnw = 1e1
K = 1e-2
for n in range(args.nb_models):
basis_hypparams = dict(mu=np.zeros((input_dim,)),
alphas=np.ones(input_dim) * alpha_ng,
betas=np.ones(input_dim) * beta_ng,
kappas=np.ones(input_dim) * kappas)
aux = NormalGamma(**basis_hypparams)
basis_prior.append(aux)
models_hypparams = dict(M=np.zeros((target_dim, nb_params)),
K=np.eye(nb_params) * K, nu=target_dim + 1,
psi=np.eye(target_dim) * psi_mnw)
aux = MatrixNormalWishart(**models_hypparams)
models_prior.append(aux)
# define gating
if args.prior == 'stick-breaking':
gating_hypparams = dict(K=args.nb_models, gammas=np.ones((args.nb_models,)),
deltas=np.ones((args.nb_models,)) * args.alpha)
gating_prior = TruncatedStickBreaking(**gating_hypparams)
ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithStickBreaking(gating_prior),
basis=[GaussianWithNormalGamma(basis_prior[i])
for i in range(args.nb_models)],
models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)
for i in range(args.nb_models)])
else:
gating_hypparams = dict(K=args.nb_models, alphas=np.ones((args.nb_models,)) * args.alpha)
gating_prior = Dirichlet(**gating_hypparams)
ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithDirichlet(gating_prior),
basis=[GaussianWithNormalGamma(basis_prior[i])
for i in range(args.nb_models)],
models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)
for i in range(args.nb_models)])
ilr.add_data(target, input, whiten=True,
transform_type='Standard')
# Gibbs sampling
ilr.resample(maxiter=args.gibbs_iters,
progprint=args.verbose)
for _ in range(args.super_iters):
if args.stochastic:
# Stochastic meanfield VI
ilr.meanfield_stochastic_descent(maxiter=args.svi_iters,
stepsize=args.svi_stepsize,
batchsize=args.svi_batchsize)
if args.deterministic:
# Meanfield VI
ilr.meanfield_coordinate_descent(tol=args.earlystop,
maxiter=args.meanfield_iters,
progprint=args.verbose)
ilr.gating.prior = ilr.gating.posterior
for i in range(ilr.likelihood.size):
ilr.basis[i].prior = ilr.basis[i].posterior
ilr.models[i].prior = ilr.models[i].posterior
return ilr
def parallel_ilr_inference(nb_jobs=50, **kwargs):
kwargs_list = []
for n in range(nb_jobs):
kwargs['seed'] = n
kwargs_list.append(kwargs.copy())
with Pool(processes=min(nb_jobs, nb_cores),
initializer=tqdm.set_lock,
initargs=(tqdm.get_lock(),)) as p:
res = p.map(_job, kwargs_list)
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate ilr with a Stick-breaking prior')
parser.add_argument('--datapath', help='path to dataset', default=os.path.abspath(mimo.__file__ + '/../../datasets'))
parser.add_argument('--evalpath', help='path to evaluation', default=os.path.abspath(mimo.__file__ + '/../../evaluation/toy'))
parser.add_argument('--nb_seeds', help='number of seeds', default=1, type=int)
parser.add_argument('--prior', help='prior type', default='stick-breaking')
parser.add_argument('--alpha', help='concentration parameter', default=5, type=float)
parser.add_argument('--nb_models', help='max number of models', default=50, type=int)
parser.add_argument('--affine', help='affine functions', action='store_true', default=True)
parser.add_argument('--no_affine', help='non-affine functions', dest='affine', action='store_false')
parser.add_argument('--super_iters', help='interleaving Gibbs/VI iterations', default=2, type=int)
parser.add_argument('--gibbs_iters', help='Gibbs iterations', default=10, type=int)
parser.add_argument('--stochastic', help='use stochastic VI', action='store_true', default=False)
parser.add_argument('--no_stochastic', help='do not use stochastic VI', dest='stochastic', action='store_false')
parser.add_argument('--deterministic', help='use deterministic VI', action='store_true', default=True)
parser.add_argument('--no_deterministic', help='do not use deterministic VI', dest='deterministic', action='store_false')
parser.add_argument('--meanfield_iters', help='max VI iterations', default=25, type=int)
parser.add_argument('--svi_iters', help='SVI iterations', default=100, type=int)
parser.add_argument('--svi_stepsize', help='SVI step size', default=1e-1, type=float)
parser.add_argument('--svi_batchsize', help='SVI batch size', default=128, type=int)
parser.add_argument('--prediction', help='prediction w/ mode or average', default='average')
parser.add_argument('--earlystop', help='stopping criterion for VI', default=1e-2, type=float)
parser.add_argument('--verbose', help='show learning progress', action='store_true', default=True)
parser.add_argument('--mute', help='show no output', dest='verbose', action='store_false')
parser.add_argument('--nb_train', help='size of train dataset', default=2000, type=int)
parser.add_argument('--seed', help='choose seed', default=1337, type=int)
args = parser.parse_args()
np.random.seed(args.seed)
# create Sine data
nb_train = args.nb_train
true_target = np.zeros((nb_train,))
true_input = np.zeros((nb_train,))
data = np.zeros((nb_train, 2))
step = 10. * np.pi / nb_train
for i in range(data.shape[0]):
true_input[i] = i * step
data[i, 0] = true_input[i] + 0.1 * npr.randn()
true_target[i] = 3 * np.sin(true_input[i])
data[i, 1] = true_target[i] + 0.3 * npr.randn()
from itertools import chain
r = list(chain(range(0, 500), range(1000, 1500)))
train_data = data[r, :]
# training data
nb_train = args.nb_train
input, target = data[:, :1], data[:, 1:]
train_input, train_target = train_data[:, :1], train_data[:, 1:]
ilr = parallel_ilr_inference(nb_jobs=args.nb_seeds,
train_input=train_input,
train_target=train_target,
arguments=args)[0]
# predict on training
mu, var, std, nlpd = \
ilr.meanfield_prediction(input, target, prediction=args.prediction)
# metrics
from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score
mse = mean_squared_error(target, mu)
evar = explained_variance_score(target, mu, multioutput='variance_weighted')
smse = 1. - r2_score(target, mu, multioutput='variance_weighted')
print('TRAIN - EVAR:', evar, 'MSE:', mse, 'SMSE:', smse, 'NLPD:',
nlpd.mean(), 'Compnents:', len(ilr.used_labels))
fig, axes = plt.subplots(2, 1)
# plot prediction
sorter = np.argsort(input, axis=0).flatten()
sorted_input, sorted_target = input[sorter, 0], target[sorter, 0]
sorted_mu, sorted_std = mu[sorter, 0], std[sorter, 0]
axes[0].plot(true_input, true_target, '--k')
axes[0].scatter(train_input, train_target, marker='+', s=1.25, color='k')
axes[0].plot(sorted_input, sorted_mu, color='crimson')
for c in [1., 2.]:
axes[0].fill_between(sorted_input,
sorted_mu - c * sorted_std,
sorted_mu + c * sorted_std,
edgecolor=(0, 0, 1, 0.1), facecolor=(0, 0, 1, 0.1))
axes[0].set_ylabel('y')
axes[0].set_ylim(-7.5, 7.5)
# plot gaussian activations
axes[1].set_xlabel('x')
axes[1].set_ylabel('p(x)')
activations = ilr.meanfield_predictive_activation(sorted_input)
axes[1].plot(sorted_input, activations)
plt.show()
| [
"mimo.distributions.TruncatedStickBreaking",
"sklearn.metrics.explained_variance_score",
"mimo.distributions.NormalGamma",
"numpy.argsort",
"numpy.sin",
"sklearn.metrics.r2_score",
"mimo.distributions.CategoricalWithDirichlet",
"argparse.ArgumentParser",
"mimo.distributions.Dirichlet",
"numpy.rand... | [((741, 775), 'pathos.multiprocessing.cpu_count', 'pathos.multiprocessing.cpu_count', ([], {}), '()\n', (773, 775), False, 'import pathos\n'), ((1034, 1054), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1048, 1054), True, 'import numpy as np\n'), ((4859, 4938), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate ilr with a Stick-breaking prior"""'}), "(description='Evaluate ilr with a Stick-breaking prior')\n", (4882, 4938), False, 'import argparse\n'), ((7337, 7362), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (7351, 7362), True, 'import numpy as np\n'), ((7435, 7456), 'numpy.zeros', 'np.zeros', (['(nb_train,)'], {}), '((nb_train,))\n', (7443, 7456), True, 'import numpy as np\n'), ((7474, 7495), 'numpy.zeros', 'np.zeros', (['(nb_train,)'], {}), '((nb_train,))\n', (7482, 7495), True, 'import numpy as np\n'), ((7508, 7531), 'numpy.zeros', 'np.zeros', (['(nb_train, 2)'], {}), '((nb_train, 2))\n', (7516, 7531), True, 'import numpy as np\n'), ((8546, 8576), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['target', 'mu'], {}), '(target, mu)\n', (8564, 8576), False, 'from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score\n'), ((8588, 8657), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['target', 'mu'], {'multioutput': '"""variance_weighted"""'}), "(target, mu, multioutput='variance_weighted')\n", (8612, 8657), False, 'from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score\n'), ((8875, 8893), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (8887, 8893), True, 'import matplotlib.pyplot as plt\n'), ((9813, 9823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9821, 9823), True, 'import matplotlib.pyplot as plt\n'), ((1635, 1665), 'mimo.distributions.NormalGamma', 'NormalGamma', ([], {}), '(**basis_hypparams)\n', (1646, 1665), False, 'from mimo.distributions import NormalGamma\n'), ((1925, 1964), 'mimo.distributions.MatrixNormalWishart', 'MatrixNormalWishart', ([], {}), '(**models_hypparams)\n', (1944, 1964), False, 'from mimo.distributions import MatrixNormalWishart\n'), ((2246, 2288), 'mimo.distributions.TruncatedStickBreaking', 'TruncatedStickBreaking', ([], {}), '(**gating_hypparams)\n', (2268, 2288), False, 'from mimo.distributions import TruncatedStickBreaking\n'), ((2919, 2948), 'mimo.distributions.Dirichlet', 'Dirichlet', ([], {}), '(**gating_hypparams)\n', (2928, 2948), False, 'from mimo.distributions import Dirichlet\n'), ((8674, 8727), 'sklearn.metrics.r2_score', 'r2_score', (['target', 'mu'], {'multioutput': '"""variance_weighted"""'}), "(target, mu, multioutput='variance_weighted')\n", (8682, 8727), False, 'from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score\n'), ((5009, 5059), 'os.path.abspath', 'os.path.abspath', (["(mimo.__file__ + '/../../datasets')"], {}), "(mimo.__file__ + '/../../datasets')\n", (5024, 5059), False, 'import os\n'), ((5134, 5190), 'os.path.abspath', 'os.path.abspath', (["(mimo.__file__ + '/../../evaluation/toy')"], {}), "(mimo.__file__ + '/../../evaluation/toy')\n", (5149, 5190), False, 'import os\n'), ((7718, 7739), 'numpy.sin', 'np.sin', (['true_input[i]'], {}), '(true_input[i])\n', (7724, 7739), True, 'import numpy as np\n'), ((8930, 8955), 'numpy.argsort', 'np.argsort', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (8940, 8955), True, 'import numpy as np\n'), ((1393, 1415), 'numpy.zeros', 'np.zeros', (['(input_dim,)'], {}), '((input_dim,))\n', (1401, 1415), True, 'import numpy as np\n'), ((1733, 1766), 'numpy.zeros', 'np.zeros', (['(target_dim, nb_params)'], {}), '((target_dim, nb_params))\n', (1741, 1766), True, 'import numpy as np\n'), ((2115, 2141), 'numpy.ones', 'np.ones', (['(args.nb_models,)'], {}), '((args.nb_models,))\n', (2122, 2141), True, 'import numpy as np\n'), ((2344, 2386), 'mimo.distributions.CategoricalWithStickBreaking', 'CategoricalWithStickBreaking', (['gating_prior'], {}), '(gating_prior)\n', (2372, 2386), False, 'from mimo.distributions import CategoricalWithStickBreaking\n'), ((3004, 3042), 'mimo.distributions.CategoricalWithDirichlet', 'CategoricalWithDirichlet', (['gating_prior'], {}), '(gating_prior)\n', (3028, 3042), False, 'from mimo.distributions import CategoricalWithDirichlet\n'), ((7677, 7688), 'numpy.random.randn', 'npr.randn', ([], {}), '()\n', (7686, 7688), True, 'import numpy.random as npr\n'), ((7784, 7795), 'numpy.random.randn', 'npr.randn', ([], {}), '()\n', (7793, 7795), True, 'import numpy.random as npr\n'), ((1455, 1473), 'numpy.ones', 'np.ones', (['input_dim'], {}), '(input_dim)\n', (1462, 1473), True, 'import numpy as np\n'), ((1523, 1541), 'numpy.ones', 'np.ones', (['input_dim'], {}), '(input_dim)\n', (1530, 1541), True, 'import numpy as np\n'), ((1591, 1609), 'numpy.ones', 'np.ones', (['input_dim'], {}), '(input_dim)\n', (1598, 1609), True, 'import numpy as np\n'), ((1802, 1819), 'numpy.eye', 'np.eye', (['nb_params'], {}), '(nb_params)\n', (1808, 1819), True, 'import numpy as np\n'), ((1880, 1898), 'numpy.eye', 'np.eye', (['target_dim'], {}), '(target_dim)\n', (1886, 1898), True, 'import numpy as np\n'), ((2182, 2208), 'numpy.ones', 'np.ones', (['(args.nb_models,)'], {}), '((args.nb_models,))\n', (2189, 2208), True, 'import numpy as np\n'), ((2442, 2481), 'mimo.distributions.GaussianWithNormalGamma', 'GaussianWithNormalGamma', (['basis_prior[i]'], {}), '(basis_prior[i])\n', (2465, 2481), False, 'from mimo.distributions import GaussianWithNormalGamma\n'), ((2624, 2698), 'mimo.distributions.LinearGaussianWithMatrixNormalWishart', 'LinearGaussianWithMatrixNormalWishart', (['models_prior[i]'], {'affine': 'args.affine'}), '(models_prior[i], affine=args.affine)\n', (2661, 2698), False, 'from mimo.distributions import LinearGaussianWithMatrixNormalWishart\n'), ((2855, 2881), 'numpy.ones', 'np.ones', (['(args.nb_models,)'], {}), '((args.nb_models,))\n', (2862, 2881), True, 'import numpy as np\n'), ((3098, 3137), 'mimo.distributions.GaussianWithNormalGamma', 'GaussianWithNormalGamma', (['basis_prior[i]'], {}), '(basis_prior[i])\n', (3121, 3137), False, 'from mimo.distributions import GaussianWithNormalGamma\n'), ((3280, 3354), 'mimo.distributions.LinearGaussianWithMatrixNormalWishart', 'LinearGaussianWithMatrixNormalWishart', (['models_prior[i]'], {'affine': 'args.affine'}), '(models_prior[i], affine=args.affine)\n', (3317, 3354), False, 'from mimo.distributions import LinearGaussianWithMatrixNormalWishart\n'), ((4736, 4751), 'tqdm.tqdm.get_lock', 'tqdm.get_lock', ([], {}), '()\n', (4749, 4751), False, 'from tqdm import tqdm\n')] |
import pandas as pd
import numpy as np
import json
prec = 5
df = pd.read_csv('spectrum.txt', skiprows=13, sep='\s+', decimal=',', header=None, names=['Wavelength', 'Counts']).round(prec)
dct = {'model_name': 'umuarama', 'wavelength': df.Wavelength.tolist()}
for i in range(1000):
dct[f'sample_{i}'] = {
"class": "soil", # this will change to soil, rock, etc
"count": (df.Counts + np.random.rand()).round(prec).tolist()
}
with open('saida.json', 'w+') as f:
json.dump(dct, f)
# this is an output example:
# dct2 = {x: 'soil' for x in dct.keys() if 'count' in x} | [
"numpy.random.rand",
"json.dump",
"pandas.read_csv"
] | [((489, 506), 'json.dump', 'json.dump', (['dct', 'f'], {}), '(dct, f)\n', (498, 506), False, 'import json\n'), ((67, 182), 'pandas.read_csv', 'pd.read_csv', (['"""spectrum.txt"""'], {'skiprows': '(13)', 'sep': '"""\\\\s+"""', 'decimal': '""","""', 'header': 'None', 'names': "['Wavelength', 'Counts']"}), "('spectrum.txt', skiprows=13, sep='\\\\s+', decimal=',', header=\n None, names=['Wavelength', 'Counts'])\n", (78, 182), True, 'import pandas as pd\n'), ((403, 419), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (417, 419), True, 'import numpy as np\n')] |
import numpy as np
def profile_ode(x, y, sol, p):
v = y[0]
e = y[1]
out = np.vstack([(1 / p['mu']) * (v * (v - 1) + p['Gamma'] * (e - v * p['e_minus'])),
(v / p['nu']) * (-((v - 1) ** 2) / 2 + e - p['e_minus'] + (v - 1) * p['Gamma'] * p['e_minus'])])
return out | [
"numpy.vstack"
] | [((94, 274), 'numpy.vstack', 'np.vstack', (["[1 / p['mu'] * (v * (v - 1) + p['Gamma'] * (e - v * p['e_minus'])), v / p[\n 'nu'] * (-(v - 1) ** 2 / 2 + e - p['e_minus'] + (v - 1) * p['Gamma'] *\n p['e_minus'])]"], {}), "([1 / p['mu'] * (v * (v - 1) + p['Gamma'] * (e - v * p['e_minus'])\n ), v / p['nu'] * (-(v - 1) ** 2 / 2 + e - p['e_minus'] + (v - 1) * p[\n 'Gamma'] * p['e_minus'])])\n", (103, 274), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
from quad.sort_points import sort_points
CLASSES = ('__background__',
'text')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
print(dets[i])
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def vis_quads(im, class_name, dets):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
quads = dets[:, :8]
for pts in quads:
# im = cv2.polylines(im, pts, True, (0, 255, 0), 3)
cv2.line(im, (pts[0], pts[1]), (pts[2], pts[3]), (0, 255, 0), 3)
cv2.line(im, (pts[2], pts[3]), (pts[4], pts[5]), (0, 255, 0), 3)
cv2.line(im, (pts[4], pts[5]), (pts[6], pts[7]), (0, 255, 0), 3)
cv2.line(im, (pts[6], pts[7]), (pts[0], pts[1]), (0, 255, 0), 3)
im = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(im)
# plt.show()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
# Visualize detections for each class
if boxes.shape[1] == 5:
print('IF'* 10)
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_TsHRESH)
else:
print('else' * 10)
CONF_THRESH = 0.5
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
inds = np.where(scores[:, cls_ind] >= CONF_THRESH)[0]
cls_scores = scores[inds, cls_ind]
cls_boxes = boxes[inds, cls_ind * 8:(cls_ind + 1) * 8]
cls_boxes = sort_points(cls_boxes)
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
print(cls_dets)
np.savetxt('/home/wov/fots/crpn/data/res/' + 'res_' + image_name[:-4] + '.txt', cls_dets[:, :8], fmt='%d', delimiter=', ')
vis_quads(im, cls, cls_dets)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
#
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default=None)
parser.add_argument('--model', dest='model', help='*.caffemodel file',
default=None)
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
#
if args.demo_net is None:
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0], 'faster_rcnn_end2end', 'test.prototxt')
cfg_file = None
else:
prototxt = os.path.join('./models', args.demo_net, 'test.pt')
cfg_file = os.path.join('./models', args.demo_net, 'config.yml')
if cfg_file is not None:
cfg_from_file(cfg_file)
if args.model is None:
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models', NETS[args.demo_net][1])
else:
caffemodel = args.model
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
args.cpu_mode = True
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print('\n\nLoaded network {:s}'.format(caffemodel))
# Warmup on a dummy image
# im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
# for i in xrange(2):
# _, _= im_detect(net, im)
# im_names = ['img_10.jpg', 'img_14.jpg', 'img_45.jpg']
for im_name in os.listdir('/home/wov/fots/crpn/data/demo'):
print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print ('Demo for data/demo/{}'.format(im_name))
demo(net, im_name)
# plt.show()
| [
"fast_rcnn.test.im_detect",
"numpy.hstack",
"caffe.set_mode_cpu",
"quad.sort_points.sort_points",
"matplotlib.pyplot.imshow",
"os.listdir",
"argparse.ArgumentParser",
"numpy.where",
"utils.timer.Timer",
"cv2.line",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.c... | [((1149, 1179), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (1161, 1179), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1973), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1966, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1996), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1994, 1996), True, 'import matplotlib.pyplot as plt\n'), ((2001, 2011), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2009, 2011), True, 'import matplotlib.pyplot as plt\n'), ((2560, 2569), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2567, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2588), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (2584, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2756, 2802), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""demo"""', 'image_name'], {}), "(cfg.DATA_DIR, 'demo', image_name)\n", (2768, 2802), False, 'import caffe, os, sys, cv2\n'), ((2812, 2831), 'cv2.imread', 'cv2.imread', (['im_file'], {}), '(im_file)\n', (2822, 2831), False, 'import caffe, os, sys, cv2\n'), ((2903, 2910), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (2908, 2910), False, 'from utils.timer import Timer\n'), ((2947, 2965), 'fast_rcnn.test.im_detect', 'im_detect', (['net', 'im'], {}), '(net, im)\n', (2956, 2965), False, 'from fast_rcnn.test import im_detect\n'), ((4463, 4519), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Faster R-CNN demo"""'}), "(description='Faster R-CNN demo')\n", (4486, 4519), False, 'import argparse\n'), ((6154, 6197), 'caffe.Net', 'caffe.Net', (['prototxt', 'caffemodel', 'caffe.TEST'], {}), '(prototxt, caffemodel, caffe.TEST)\n', (6163, 6197), False, 'import caffe, os, sys, cv2\n'), ((6483, 6526), 'os.listdir', 'os.listdir', (['"""/home/wov/fots/crpn/data/demo"""'], {}), "('/home/wov/fots/crpn/data/demo')\n", (6493, 6526), False, 'import caffe, os, sys, cv2\n'), ((1032, 1063), 'numpy.where', 'np.where', (['(dets[:, -1] >= thresh)'], {}), '(dets[:, -1] >= thresh)\n', (1040, 1063), True, 'import numpy as np\n'), ((2243, 2307), 'cv2.line', 'cv2.line', (['im', '(pts[0], pts[1])', '(pts[2], pts[3])', '(0, 255, 0)', '(3)'], {}), '(im, (pts[0], pts[1]), (pts[2], pts[3]), (0, 255, 0), 3)\n', (2251, 2307), False, 'import caffe, os, sys, cv2\n'), ((2316, 2380), 'cv2.line', 'cv2.line', (['im', '(pts[2], pts[3])', '(pts[4], pts[5])', '(0, 255, 0)', '(3)'], {}), '(im, (pts[2], pts[3]), (pts[4], pts[5]), (0, 255, 0), 3)\n', (2324, 2380), False, 'import caffe, os, sys, cv2\n'), ((2389, 2453), 'cv2.line', 'cv2.line', (['im', '(pts[4], pts[5])', '(pts[6], pts[7])', '(0, 255, 0)', '(3)'], {}), '(im, (pts[4], pts[5]), (pts[6], pts[7]), (0, 255, 0), 3)\n', (2397, 2453), False, 'import caffe, os, sys, cv2\n'), ((2462, 2526), 'cv2.line', 'cv2.line', (['im', '(pts[6], pts[7])', '(pts[0], pts[1])', '(0, 255, 0)', '(3)'], {}), '(im, (pts[6], pts[7]), (pts[0], pts[1]), (0, 255, 0), 3)\n', (2470, 2526), False, 'import caffe, os, sys, cv2\n'), ((5276, 5372), 'os.path.join', 'os.path.join', (['cfg.MODELS_DIR', 'NETS[args.demo_net][0]', '"""faster_rcnn_end2end"""', '"""test.prototxt"""'], {}), "(cfg.MODELS_DIR, NETS[args.demo_net][0], 'faster_rcnn_end2end',\n 'test.prototxt')\n", (5288, 5372), False, 'import caffe, os, sys, cv2\n'), ((5422, 5472), 'os.path.join', 'os.path.join', (['"""./models"""', 'args.demo_net', '"""test.pt"""'], {}), "('./models', args.demo_net, 'test.pt')\n", (5434, 5472), False, 'import caffe, os, sys, cv2\n'), ((5492, 5545), 'os.path.join', 'os.path.join', (['"""./models"""', 'args.demo_net', '"""config.yml"""'], {}), "('./models', args.demo_net, 'config.yml')\n", (5504, 5545), False, 'import caffe, os, sys, cv2\n'), ((5584, 5607), 'fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['cfg_file'], {}), '(cfg_file)\n', (5597, 5607), False, 'from fast_rcnn.config import cfg, cfg_from_file\n'), ((5657, 5729), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""faster_rcnn_models"""', 'NETS[args.demo_net][1]'], {}), "(cfg.DATA_DIR, 'faster_rcnn_models', NETS[args.demo_net][1])\n", (5669, 5729), False, 'import caffe, os, sys, cv2\n'), ((5784, 5810), 'os.path.isfile', 'os.path.isfile', (['caffemodel'], {}), '(caffemodel)\n', (5798, 5810), False, 'import caffe, os, sys, cv2\n'), ((6013, 6033), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (6031, 6033), False, 'import caffe, os, sys, cv2\n'), ((6052, 6072), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (6070, 6072), False, 'import caffe, os, sys, cv2\n'), ((6081, 6110), 'caffe.set_device', 'caffe.set_device', (['args.gpu_id'], {}), '(args.gpu_id)\n', (6097, 6110), False, 'import caffe, os, sys, cv2\n'), ((1346, 1465), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bbox[0], bbox[1])', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {'fill': '(False)', 'edgecolor': '"""red"""', 'linewidth': '(3.5)'}), "((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1],\n fill=False, edgecolor='red', linewidth=3.5)\n", (1359, 1465), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3504), 'fast_rcnn.nms_wrapper.nms', 'nms', (['dets', 'NMS_THRESH'], {}), '(dets, NMS_THRESH)\n', (3486, 3504), False, 'from fast_rcnn.nms_wrapper import nms\n'), ((3978, 4000), 'quad.sort_points.sort_points', 'sort_points', (['cls_boxes'], {}), '(cls_boxes)\n', (3989, 4000), False, 'from quad.sort_points import sort_points\n'), ((4124, 4151), 'fast_rcnn.nms_wrapper.nms', 'nms', (['cls_dets', 'cfg.TEST.NMS'], {}), '(cls_dets, cfg.TEST.NMS)\n', (4127, 4151), False, 'from fast_rcnn.nms_wrapper import nms\n'), ((4234, 4360), 'numpy.savetxt', 'np.savetxt', (["('/home/wov/fots/crpn/data/res/' + 'res_' + image_name[:-4] + '.txt')", 'cls_dets[:, :8]'], {'fmt': '"""%d"""', 'delimiter': '""", """'}), "('/home/wov/fots/crpn/data/res/' + 'res_' + image_name[:-4] +\n '.txt', cls_dets[:, :8], fmt='%d', delimiter=', ')\n", (4244, 4360), True, 'import numpy as np\n'), ((3793, 3836), 'numpy.where', 'np.where', (['(scores[:, cls_ind] >= CONF_THRESH)'], {}), '(scores[:, cls_ind] >= CONF_THRESH)\n', (3801, 3836), True, 'import numpy as np\n'), ((3365, 3414), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (3374, 3414), True, 'import numpy as np\n'), ((4024, 4073), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (4033, 4073), True, 'import numpy as np\n')] |
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
import numpy as np
# PLOT SHAPE --------------------------------------------------------------------------------------------------------------------------------------------------------
def plot(parent, shape, coordinate_on, dimension_lines_on, transformed_coordinate_on, thickness_on, colors, a = 1.6, b = 0.8, d = 0.8):
if parent.plotted == True:
parent.canvas._tkcanvas.destroy()
circ = False
fig = Figure()
parent.canvas = FigureCanvasTkAgg(fig, master = parent)
parent.canvas.get_tk_widget().pack()
parent.canvas._tkcanvas.pack(side="top", fill="both", expand=1,padx = (10,20), pady = 20)
parent.plotted = True
parent.ax = fig.add_subplot(111)
parent.ax.set_aspect("equal")
fig.patch.set_facecolor(colors["secondary_color"])
parent.ax.xaxis.set_visible(False)
parent.ax.yaxis.set_visible(False)
parent.ax.set_frame_on(False)
if shape == "Rectangle":
x, y, proportional = set_dimensions(a, b)
rect_x = [-x/2, -x/2, x/2, x/2, -x/2]
rect_y = [y/2, -y/2, -y/2, y/2, y/2]
rect_x_th = [-x/2+0.1, -x/2+0.1, x/2-0.1, x/2-0.1, -x/2+0.1]
rect_y_th = [y/2-0.1, -y/2+0.1, -y/2+0.1, y/2-0.1, y/2-0.1]
parent.ax.plot(rect_x, rect_y, colors["draw_main"], lw=2)
parent.ax.fill(rect_x,rect_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(rect_x_th, rect_y_th, colors["draw_main"], lw=2)
parent.ax.fill(rect_x_th,rect_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Ellipse":
x, y, proportional = set_dimensions(a, b)
t = np.linspace(0, 2*np.pi, 100)
ell_x = x/2*np.cos(t)
ell_y = y/2*np.sin(t)
ell_x_th = (x/2-0.1)*np.cos(t)
ell_y_th = (y/2-0.1)*np.sin(t)
parent.ax.plot(ell_x, ell_y, colors["draw_main"], lw=2)
parent.ax.fill(ell_x,ell_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(ell_x_th, ell_y_th, colors["draw_main"], lw=2)
parent.ax.fill(ell_x_th,ell_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Circle":
t = np.linspace(0, 2*np.pi, 100)
x = y = d = 2
proportional = True
circ_x = d/2*np.cos(t)
circ_y = d/2*np.sin(t)
circ_x_th = (d/2-0.1)*np.cos(t)
circ_y_th = (d/2-0.1)*np.sin(t)
circ = True
parent.ax.plot(circ_x, circ_y, colors["draw_main"], lw=2)
parent.ax.fill(circ_x,circ_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(circ_x_th, circ_y_th, colors["draw_main"], lw=2)
parent.ax.fill(circ_x_th,circ_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Isosceles_triangle":
x, y, proportional = set_dimensions(a, b)
tri_x = [-x/2, x/2, 0, -x/2]
tri_y = [-y/3, -y/3, y/3*2, -y/3]
tri_x_th = [-x/2+0.175, x/2-0.175, 0, -x/2+0.175]
tri_y_th = [-y/3+0.075, -y/3+0.075, y/3*2-0.1, -y/3+0.075]
parent.ax.plot(tri_x, tri_y, colors["draw_main"], lw=2)
parent.ax.fill(tri_x,tri_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(tri_x_th, tri_y_th, colors["draw_main"], lw=2)
parent.ax.fill(tri_x_th,tri_y_th,color=colors["secondary_color"])
coordinate_displacement = y/6
elif shape == "Right_triangle":
x, y, proportional = set_dimensions(a, b)
tri_x = [-x/2, x/2, -x/2, -x/2]
tri_y = [-y/3, -y/3, y/3*2, -y/3]
tri_x_th = [-x/2+0.1, x/2-0.4, -x/2+0.1, -x/2+0.1]
tri_y_th = [-y/3+0.1, -y/3+0.1, y/3*2-0.175, -y/3+0.1]
parent.ax.plot(tri_x, tri_y, colors["draw_main"], lw=2)
parent.ax.fill(tri_x,tri_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(tri_x_th, tri_y_th, colors["draw_main"], lw=2)
parent.ax.fill(tri_x_th,tri_y_th,color=colors["secondary_color"])
coordinate_displacement = y/6
elif shape == None:
None
if coordinate_on == True:
coordinate_system(x, y, parent.ax, coordinate_displacement, colors)
if dimension_lines_on == True:
dimension_lines(x, y, parent.ax, r"$a$", r"$b$", coordinate_displacement, colors, circ)
if transformed_coordinate_on == True:
transformed_coordinate_system(x, y, parent.ax, 15, colors)
transformation_dimensions(x, y, parent.ax, colors)
if shape != None:
if proportional == False:
parent.ax.text(-x, -y, "NEM arányos!!!", verticalalignment='center', size='large', color = colors["text_color"])
print(x,y)
parent.canvas.draw()
# USEFUL FUNCTIONS --------------------------------------------------------------------------------------------------------------------------------------------------------
def set_dimensions(a, b):
ab_rate = a/b
if ab_rate > 3:
x = 3
y = 1
proportional = False
elif ab_rate < 0.33:
x = 1
y = 3
proportional = False
else:
x = a
y = b
proportional = True
return x, y, proportional
def dimension_lines(x, y, ax, t1, t2, e, colors, circ = False):
transparency = 1
color = colors['draw_tertiary']
hw = 0.015*max(x,y)
hl = 2*hw
if circ == False:
line1_x = [-x/2-max(x,y)/4, 0]
line1_y = [y/2+e, y/2+e]
line2_x = [-x/2-max(x,y)/4, 0]
line2_y = [-y/2+e, -y/2+e]
line3_x = [-x/2, -x/2]
line3_y = [-y/2-max(x,y)/4+e, -2*e]
line4_x = [x/2, x/2]
line4_y = [-y/2-max(x,y)/4+e, -2*e]
ax.arrow(line1_x[0]+x/32, line2_y[0], 0, y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[0]+x/32, line2_y[0], 0, y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[0]+x/32, line1_y[0], 0, -y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line3_x[0], line3_y[0]+x/32, x, 0, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line4_x[0], line3_y[0]+x/32, -x, 0, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.plot(line1_x, line1_y, color,zorder=0)
ax.plot(line2_x, line2_y, color,zorder=0)
ax.plot(line3_x, line3_y, color,zorder=0)
ax.plot(line4_x, line4_y, color,zorder=0)
ax.text(
0, -y/2-max(x,y)/16*5+e,
t1,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-x/2-max(x,y)/16*5, e,
t2,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
elif circ == True:
line1_x = [-1, 1]
line1_y = [1.732, -1.732]
ax.plot(line1_x, line1_y, color,zorder=3)
ax.arrow(line1_x[0], line1_y[0], 0.5, -0.866, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[1], line1_y[1], -0.5, 0.866, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.text(
1.1, -1.4,
"Ød",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-x/2-x*y/16*5, e,
t2,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
def coordinate_system(x, y, ax, e, colors):
# if plot.shape == "Right_triangle":
# color = colors['draw_secondary']
# transparency = 1
# hw = 0.015*max(x,y)
# hl = 2*hw
# ax.arrow(
# -x/2-x*y/8, 0, x+x*y/3, 0,
# head_width=hw,
# head_length=hl,
# fc=color, ec=color,
# length_includes_head = True,
# alpha=transparency,
# zorder=3)
# ax.arrow(
# -x/3/2, -y/2-x*y/8+e, 0, y+x*y/3,
# head_width=hw,
# head_length=hl,
# fc=color, ec=color,
# length_includes_head = True,
# alpha=transparency,
# zorder=3)
# ax.text(
# x/2+x*y/5, -x*y/20,
# r"$x$",
# horizontalalignment='center',
# verticalalignment='center',
# size='large',
# color = color,
# alpha=transparency)
# ax.text(
# -x*y/20, y/2+x*y/5+e,
# r"$y$",
# horizontalalignment='center',
# verticalalignment='center',
# size='large',
# color = color,
# alpha=transparency)
# else:
color = colors['draw_secondary']
transparency = 1
hw = 0.015*max(x,y)
hl = 2*hw
ax.arrow(
-x/2-max(x,y)/8, 0, x+max(x,y)/3, 0,
head_width=hw,
head_length=hl,
fc=color, ec=color,
length_includes_head = True,
alpha=transparency,
zorder=3)
ax.arrow(
0, -y/2-max(x,y)/8+e, 0, y+max(x,y)/3,
head_width=hw,
head_length=hl,
fc=color, ec=color,
length_includes_head = True,
alpha=transparency,
zorder=3)
ax.text(
x/2+max(x,y)/5, -max(x,y)/20,
r"$x$",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-max(x,y)/20, y/2+max(x,y)/5+e,
r"$y$",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
def transformed_coordinate_system(x, y, ax, phi, colors):
color = colors['draw_tertiary']
hw = 0.015*max(x,y)
hl = 2*hw
phi = phi/180*np.pi
ar1_x = (-x*3/4)*np.cos(phi)+x/5
ar1_y = -x*3/4*np.sin(phi)+y/5
ar1_dx = (x*3/2)*np.cos(phi)
ar1_dy = x*3/2*np.sin(phi)
ar2_x = y*3/4*np.sin(phi)+x/5
ar2_y = -y*3/4*np.cos(phi)+y/5
ar2_dx = (-y*3/2)*np.sin(phi)
ar2_dy = y*3/2*np.cos(phi)
ax.arrow(ar1_x, ar1_y, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=3)
ax.arrow(ar2_x, ar2_y, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=3)
ax.text(ar1_x+ar1_dx+x/20, ar1_y+ar1_dy+y/20, r"$x_1$", horizontalalignment='center', color = color,
verticalalignment='center', size='large')
ax.text(ar2_x+ar2_dx+x/20, ar2_y+ar2_dy+y/20, r"$y_1$", horizontalalignment='center', color = color,
verticalalignment='center', size='large')
def transformation_dimensions(x, y, ax, colors):
color = colors['draw_tertiary']
transparency = 1 #0.7
hw = 0.015*max(x,y)
hl = 2*hw
y_disp_x = [x/5, x]
y_disp_y = [y/5, y/5]
ax.plot(y_disp_x, y_disp_y, color, lw=1, zorder=5, alpha=transparency)
ax.arrow(x/2+x/8, 0, 0, y/5,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.arrow(x/2+x/8, y/5, 0, -y/5,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.text(x/2+x/6, y/8, r"$v$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
x_disp_x = [x/5, x/5]
x_disp_y = [y/5, -y/5]
ax.plot(x_disp_x, x_disp_y, color, lw=1, zorder=5, alpha=transparency)
ax.arrow(0, -y/8, x/5, 0,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.arrow(x/5, -y/8, -x/5, 0,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.text(x/8, -y/12, r"$u$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
style = "Simple, tail_width=0.2, head_width=4, head_length=8"
kw = dict(arrowstyle=style, color=color)
a3 = patches.FancyArrowPatch((x/2+x/3, y/5), (x/2+x/5+x/20, y/5+x*3/20),
connectionstyle="arc3,rad=.2", **kw, alpha=transparency)
ax.add_patch(a3)
ax.text(x/2+x/4+x/8, y/4+y/12, r"$\varphi$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
def sign_evaluation(alpha, angle_unit, beta = False):
if angle_unit == "deg":
alpha = alpha/180*np.pi
if beta == True:
alpha = alpha+np.pi/2
print('deg')
else:
if beta == True:
alpha = alpha+np.pi/2
print('rad')
if alpha >= 0 and alpha < np.pi/2:
signx = 1
signy = 1
if beta == True:
signx = -1
signy = 1
elif alpha >= np.pi/2 and alpha < np.pi:
signx = -1
signy = 1
if beta == True:
signx = 1
signy = 1
elif alpha >= np.pi and alpha < np.pi*3/2:
signx = -1
signy = -1
if beta == True:
signx = -1
signy = 1
elif alpha >= np.pi*3/2 and alpha < np.pi*2:
signx = 1
signy = -1
if beta == True:
signx = -1
signy = -1
else:
signx = 1
signy = 1
if beta == True:
signx = -1
signy = 1
return signx, signy, alpha
def plot_principal_axes(parent, colors, ax, alpha, angle_unit, transformed_coordinate_on,shape, a = 1.6, b = 0.8, d = 0.8):
principal_x = True
a_init, b_init, proportional = set_dimensions(a, b)
try:
parent.principal_axis1.remove()
parent.principal_axis2.remove()
parent.principal_axis1_text.remove()
parent.principal_axis2_text.remove()
except:
None
if transformed_coordinate_on == False:
color = colors['draw_principal']
# evaluate orientation signs of the principal axis
signx1, signy1, beta1 = sign_evaluation(alpha, angle_unit)
signx2, signy2, beta2 = sign_evaluation(alpha, angle_unit, True)
if shape == "Rectangle":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
x_offset = 0
y_offset = 0
elif shape == "Circle":
x = 2*d*3/4
y = 2*d*3/4
x_offset = 0
y_offset = 0
elif shape == "Ellipse":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
x_offset = 0
y_offset = 0
elif shape == "Isosceles_triangle":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
if x>y:
principal_x = True
else:
principal_x = False
x_offset = 0
y_offset = b_init/5
hw = 0.03*max(x,y)
hl = 2*hw
arrow_length = (x**2+y**2)**0.5
# first principal axis
x_val1 = arrow_length*np.cos(beta1)
y_val1 = arrow_length*np.sin(beta1)
sign_x1 = np.sign(x_val1)
sign_y1 = np.sign(y_val1)
if abs(x_val1) >= x:
x_val1 = sign_x1*x
y_val1 = x_val1*np.tan(beta1)
elif abs(y_val1) >= y:
y_val1 = sign_y1*y
x_val1 = y_val1/np.tan(beta1)
ar1_x1 = -signx1*x_val1
ar1_y1 = -signy1*y_val1
ar1_x2 = signx1*x_val1
ar1_y2 = signy1*y_val1
ar1_dx = ar1_x2-ar1_x1
ar1_dy = ar1_y2-ar1_y1
# second principal axis
x_val2 = arrow_length*np.cos(beta2)
y_val2 = arrow_length*np.sin(beta2)
sign_x2 = np.sign(x_val2)
sign_y2 = np.sign(y_val2)
if abs(x_val2) >= x:
x_val2 = sign_x2*x
y_val2 = x_val2*np.tan(beta2)
elif abs(y_val2) >= y:
y_val2 = sign_y2*y
x_val2 = y_val2/np.tan(beta2)
ar2_x1 = -signx2*x_val2
ar2_y1 = -signy2*y_val2
ar2_x2 = signx2*x_val2
ar2_y2 = signy2*y_val2
ar2_dx = ar2_x2-ar2_x1
ar2_dy = ar2_y2-ar2_y1
if principal_x == False:
parent.principal_axis1 = ax.arrow(ar1_x1+x_offset, ar1_y1, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis2 = ax.arrow(ar2_x1, ar2_y1+y_offset, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis1_text = ax.text(ar1_dx/2+0.06*max(ar1_dx,ar1_dy)+x_offset, ar1_dy/2+0.06*max(ar1_dx,ar1_dy), r"$I_1$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.principal_axis2_text = ax.text(ar2_dx/2+0.06*max(ar1_dx,ar1_dy), ar2_dy/2+0.06*max(ar1_dx,ar1_dy)+y_offset, r"$I_2$", horizontalalignment='center', color = color,
verticalalignment='center')
else:
parent.principal_axis1 = ax.arrow(ar1_x1, ar1_y1+y_offset, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis2 = ax.arrow(ar2_x1+x_offset, ar2_y1, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis1_text = ax.text(ar1_dx/2+0.06*max(ar1_dx,ar1_dy), ar1_dy/2+0.06*max(ar1_dx,ar1_dy)+y_offset, r"$I_1$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.principal_axis2_text = ax.text(ar2_dx/2+0.06*max(ar1_dx,ar1_dy)+x_offset, ar2_dy/2+0.06*max(ar1_dx,ar1_dy), r"$I_2$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.canvas.draw()
| [
"numpy.tan",
"matplotlib.figure.Figure",
"matplotlib.patches.FancyArrowPatch",
"numpy.linspace",
"numpy.cos",
"numpy.sign",
"numpy.sin",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((582, 590), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (588, 590), False, 'from matplotlib.figure import Figure\n'), ((611, 648), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'parent'}), '(fig, master=parent)\n', (628, 648), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((12705, 12859), 'matplotlib.patches.FancyArrowPatch', 'patches.FancyArrowPatch', (['(x / 2 + x / 3, y / 5)', '(x / 2 + x / 5 + x / 20, y / 5 + x * 3 / 20)'], {'connectionstyle': '"""arc3,rad=.2"""', 'alpha': 'transparency'}), "((x / 2 + x / 3, y / 5), (x / 2 + x / 5 + x / 20, y /\n 5 + x * 3 / 20), connectionstyle='arc3,rad=.2', **kw, alpha=transparency)\n", (12728, 12859), True, 'import matplotlib.patches as patches\n'), ((10425, 10436), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10431, 10436), True, 'import numpy as np\n'), ((10456, 10467), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10462, 10467), True, 'import numpy as np\n'), ((10559, 10570), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10565, 10570), True, 'import numpy as np\n'), ((10590, 10601), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10596, 10601), True, 'import numpy as np\n'), ((15805, 15820), 'numpy.sign', 'np.sign', (['x_val1'], {}), '(x_val1)\n', (15812, 15820), True, 'import numpy as np\n'), ((15839, 15854), 'numpy.sign', 'np.sign', (['y_val1'], {}), '(y_val1)\n', (15846, 15854), True, 'import numpy as np\n'), ((16388, 16403), 'numpy.sign', 'np.sign', (['x_val2'], {}), '(x_val2)\n', (16395, 16403), True, 'import numpy as np\n'), ((16422, 16437), 'numpy.sign', 'np.sign', (['y_val2'], {}), '(y_val2)\n', (16429, 16437), True, 'import numpy as np\n'), ((1826, 1856), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (1837, 1856), True, 'import numpy as np\n'), ((10353, 10364), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10359, 10364), True, 'import numpy as np\n'), ((10388, 10399), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10394, 10399), True, 'import numpy as np\n'), ((10486, 10497), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10492, 10497), True, 'import numpy as np\n'), ((10521, 10532), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10527, 10532), True, 'import numpy as np\n'), ((15729, 15742), 'numpy.cos', 'np.cos', (['beta1'], {}), '(beta1)\n', (15735, 15742), True, 'import numpy as np\n'), ((15773, 15786), 'numpy.sin', 'np.sin', (['beta1'], {}), '(beta1)\n', (15779, 15786), True, 'import numpy as np\n'), ((16312, 16325), 'numpy.cos', 'np.cos', (['beta2'], {}), '(beta2)\n', (16318, 16325), True, 'import numpy as np\n'), ((16356, 16369), 'numpy.sin', 'np.sin', (['beta2'], {}), '(beta2)\n', (16362, 16369), True, 'import numpy as np\n'), ((1875, 1884), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1881, 1884), True, 'import numpy as np\n'), ((1905, 1914), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1911, 1914), True, 'import numpy as np\n'), ((1945, 1954), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1951, 1954), True, 'import numpy as np\n'), ((1984, 1993), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1990, 1993), True, 'import numpy as np\n'), ((2393, 2423), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (2404, 2423), True, 'import numpy as np\n'), ((15943, 15956), 'numpy.tan', 'np.tan', (['beta1'], {}), '(beta1)\n', (15949, 15956), True, 'import numpy as np\n'), ((16526, 16539), 'numpy.tan', 'np.tan', (['beta2'], {}), '(beta2)\n', (16532, 16539), True, 'import numpy as np\n'), ((2493, 2502), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2499, 2502), True, 'import numpy as np\n'), ((2524, 2533), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2530, 2533), True, 'import numpy as np\n'), ((2565, 2574), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2571, 2574), True, 'import numpy as np\n'), ((2605, 2614), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2611, 2614), True, 'import numpy as np\n'), ((16047, 16060), 'numpy.tan', 'np.tan', (['beta1'], {}), '(beta1)\n', (16053, 16060), True, 'import numpy as np\n'), ((16630, 16643), 'numpy.tan', 'np.tan', (['beta2'], {}), '(beta2)\n', (16636, 16643), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import os
import queue
import threading
import time
from freezegun import freeze_time
from mantarray_desktop_app import BARCODE_INVALID_UUID
from mantarray_desktop_app import BARCODE_POLL_PERIOD
from mantarray_desktop_app import BARCODE_UNREADABLE_UUID
from mantarray_desktop_app import BARCODE_VALID_UUID
from mantarray_desktop_app import BUFFERING_STATE
from mantarray_desktop_app import CALIBRATED_STATE
from mantarray_desktop_app import CALIBRATION_NEEDED_STATE
from mantarray_desktop_app import create_magnetometer_config_dict
from mantarray_desktop_app import DEFAULT_MAGNETOMETER_CONFIG
from mantarray_desktop_app import DEFAULT_SAMPLING_PERIOD
from mantarray_desktop_app import get_redacted_string
from mantarray_desktop_app import IncorrectMagnetometerConfigFromInstrumentError
from mantarray_desktop_app import IncorrectSamplingPeriodFromInstrumentError
from mantarray_desktop_app import INSTRUMENT_INITIALIZING_STATE
from mantarray_desktop_app import LIVE_VIEW_ACTIVE_STATE
from mantarray_desktop_app import MantarrayMcSimulator
from mantarray_desktop_app import MantarrayProcessesMonitor
from mantarray_desktop_app import ok_comm
from mantarray_desktop_app import OUTGOING_DATA_BUFFER_SIZE
from mantarray_desktop_app import process_manager
from mantarray_desktop_app import process_monitor
from mantarray_desktop_app import RECORDING_STATE
from mantarray_desktop_app import RunningFIFOSimulator
from mantarray_desktop_app import SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE
from mantarray_desktop_app import SERVER_INITIALIZING_STATE
from mantarray_desktop_app import SERVER_READY_STATE
from mantarray_desktop_app import ServerManager
from mantarray_desktop_app import STOP_MANAGED_ACQUISITION_COMMUNICATION
from mantarray_desktop_app.constants import GENERIC_24_WELL_DEFINITION
from mantarray_desktop_app.server import queue_command_to_instrument_comm
import numpy as np
import pytest
from stdlib_utils import invoke_process_run_and_check_errors
from stdlib_utils import TestingQueue
from xem_wrapper import FrontPanelSimulator
from ..fixtures import fixture_patch_print
from ..fixtures import fixture_test_process_manager_creator
from ..fixtures import get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION
from ..fixtures import QUEUE_CHECK_TIMEOUT_SECONDS
from ..fixtures_mc_simulator import create_random_stim_info
from ..fixtures_ok_comm import fixture_patch_connection_to_board
from ..fixtures_process_monitor import fixture_test_monitor
from ..helpers import confirm_queue_is_eventually_empty
from ..helpers import confirm_queue_is_eventually_of_size
from ..helpers import is_queue_eventually_empty
from ..helpers import is_queue_eventually_not_empty
from ..helpers import put_object_into_queue_and_raise_error_if_eventually_still_empty
__fixtures__ = [
fixture_test_process_manager_creator,
fixture_test_monitor,
fixture_patch_connection_to_board,
fixture_patch_print,
]
def test_MantarrayProcessesMonitor__init__calls_super(mocker, test_process_manager_creator):
test_process_manager = test_process_manager_creator()
error_queue = queue.Queue()
mocked_super_init = mocker.spy(threading.Thread, "__init__")
MantarrayProcessesMonitor({}, test_process_manager, error_queue, threading.Lock())
assert mocked_super_init.call_count == 1
@pytest.mark.slow
@pytest.mark.timeout(12)
def test_MantarrayProcessesMonitor__soft_stop_calls_manager_soft_stop_and_join(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator()
monitor_thread, *_ = test_monitor(test_process_manager)
# mock to avoid issues with test hanging
mocker.patch.object(ServerManager, "shutdown_server", autospec=True)
spied_stop = mocker.spy(test_process_manager, "soft_stop_and_join_processes")
test_process_manager.start_processes()
monitor_thread.start()
time.sleep(
0.5
) # Eli (12/10/20): give time for the ProcessMonitor to consume the start up messages from the queues of the subprocesses before attempting to join them
monitor_thread.soft_stop()
monitor_thread.join()
assert spied_stop.call_count == 1
class MantarrayProcessesMonitorThatRaisesError(MantarrayProcessesMonitor):
def _commands_for_each_run_iteration(self):
raise NotImplementedError("Process Monitor Exception")
def test_MantarrayProcessesMonitor__populates_error_queue_when_error_raised(
test_process_manager_creator, patch_print
):
test_process_manager = test_process_manager_creator()
error_queue = TestingQueue()
test_pm = MantarrayProcessesMonitorThatRaisesError(
{}, test_process_manager, error_queue, threading.Lock()
)
with pytest.raises(NotImplementedError, match="Process Monitor Exception"):
invoke_process_run_and_check_errors(test_pm)
def test_MantarrayProcessesMonitor__logs_errors_raised_in_own_thread_correctly(
test_process_manager_creator, mocker, patch_print
):
expected_stack_trace = "expected stack trace"
mocked_logger = mocker.patch.object(process_monitor.logger, "error", autospec=True)
mocker.patch.object(
process_monitor, "get_formatted_stack_trace", autospec=True, return_value=expected_stack_trace
)
test_process_manager = test_process_manager_creator()
error_queue = TestingQueue()
test_pm = MantarrayProcessesMonitorThatRaisesError(
{}, test_process_manager, error_queue, threading.Lock()
)
test_pm.run(num_iterations=1)
expected_error = error_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
expected_msg = f"Error raised by Process Monitor\n{expected_stack_trace}\n{expected_error}"
mocked_logger.assert_called_once_with(expected_msg)
def test_MantarrayProcessesMonitor__when_error_raised_in_own_thread__hard_stops_and_joins_subprocesses_if_running_and_shutsdown_server(
test_process_manager_creator, mocker, patch_print
):
test_process_manager = test_process_manager_creator()
mocker.patch.object(
test_process_manager, "are_subprocess_start_ups_complete", autospec=True, return_value=True
)
mocked_hard_stop_processes = mocker.patch.object(
test_process_manager, "hard_stop_and_join_processes", autospec=True
)
mocked_shutdown_server = mocker.patch.object(test_process_manager, "shutdown_server", autospec=True)
error_queue = TestingQueue()
test_pm = MantarrayProcessesMonitorThatRaisesError(
{}, test_process_manager, error_queue, threading.Lock()
)
test_pm.run(num_iterations=1)
mocked_hard_stop_processes.assert_called_once_with(shutdown_server=False)
mocked_shutdown_server.assert_called_once()
def test_MantarrayProcessesMonitor__when_error_raised_in_own_thread__shutsdown_server_but_not_subprocesses(
test_process_manager_creator, mocker, patch_print
):
test_process_manager = test_process_manager_creator()
mocker.patch.object(
test_process_manager, "are_subprocess_start_ups_complete", autospec=True, return_value=False
)
mocked_hard_stop_processes = mocker.patch.object(
test_process_manager, "hard_stop_and_join_processes", autospec=True
)
mocked_shutdown_server = mocker.patch.object(test_process_manager, "shutdown_server", autospec=True)
error_queue = TestingQueue()
test_pm = MantarrayProcessesMonitorThatRaisesError(
{}, test_process_manager, error_queue, threading.Lock()
)
test_pm.run(num_iterations=1)
mocked_hard_stop_processes.assert_not_called()
mocked_shutdown_server.assert_called_once()
def test_MantarrayProcessesMonitor__logs_messages_from_instrument_comm(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "info", autospec=True)
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
expected_comm = {
"communication_type": "debug_console",
"command": "get_device_id",
"response": "my_cool_id",
}
instrument_comm_to_main.put_nowait(expected_comm)
assert is_queue_eventually_not_empty(instrument_comm_to_main) is True
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(instrument_comm_to_main) is True
mocked_logger.assert_called_once_with(f"Communication from the Instrument Controller: {expected_comm}")
def test_MantarrayProcessesMonitor__logs_messages_from_file_writer(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "info", autospec=True)
file_writer_to_main = (
test_process_manager.queue_container().get_communication_queue_from_file_writer_to_main()
)
expected_comm = {
"communication_type": "command_receipt",
"command": "stop_recording",
"timepoint_to_stop_recording_at": 223,
}
file_writer_to_main.put_nowait(expected_comm)
assert is_queue_eventually_not_empty(file_writer_to_main) is True
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(file_writer_to_main) is True
mocked_logger.assert_called_once_with(f"Communication from the File Writer: {expected_comm}")
def test_MantarrayProcessesMonitor__logs_messages_from_data_analyzer(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "info", autospec=True)
data_analyzer_to_main = (
test_process_manager.queue_container().get_communication_queue_from_data_analyzer_to_main()
)
expected_comm = {
"communication_type": "finalized_data",
"well_index": 0,
"data": np.zeros((2, 10)),
}
data_analyzer_to_main.put_nowait(expected_comm)
assert is_queue_eventually_not_empty(data_analyzer_to_main) is True
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(data_analyzer_to_main) is True
mocked_logger.assert_called_once_with(f"Communication from the Data Analyzer: {expected_comm}")
def test_MantarrayProcessesMonitor__pulls_outgoing_data_from_data_analyzer_and_makes_it_available_to_server(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
shared_values_dict["system_status"] = LIVE_VIEW_ACTIVE_STATE
da_data_out_queue = test_process_manager.queue_container().get_data_analyzer_board_queues()[0][1]
pm_data_out_queue = test_process_manager.queue_container().get_data_queue_to_server()
# add dummy data here. In this test, the item is never actually looked at, so it can be any string value
expected_json_data = json.dumps({"well": 0, "data": [1, 2, 3, 4, 5]})
da_data_out_queue.put_nowait(expected_json_data)
confirm_queue_is_eventually_of_size(
da_data_out_queue, 1, sleep_after_confirm_seconds=QUEUE_CHECK_TIMEOUT_SECONDS
)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(da_data_out_queue)
confirm_queue_is_eventually_of_size(pm_data_out_queue, 1)
actual = pm_data_out_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
assert actual == expected_json_data
def test_MantarrayProcessesMonitor__logs_errors_from_instrument_comm_process(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "error", autospec=True)
mocker.patch.object(test_process_manager, "hard_stop_and_join_processes", autospec=True)
instrument_comm_error_queue = (
test_process_manager.queue_container().get_instrument_communication_error_queue()
)
expected_error = ValueError("something wrong")
expected_stack_trace = "my stack trace"
expected_message = f"Error raised by subprocess {test_process_manager.get_instrument_process()}\n{expected_stack_trace}\n{expected_error}"
instrument_comm_error_queue.put_nowait((expected_error, expected_stack_trace))
assert is_queue_eventually_not_empty(instrument_comm_error_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(instrument_comm_error_queue) is True
mocked_logger.assert_any_call(expected_message)
def test_MantarrayProcessesMonitor__logs_errors_from_file_writer(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "error", autospec=True)
mocker.patch.object(test_process_manager, "hard_stop_and_join_processes", autospec=True)
file_writer_error_queue = test_process_manager.queue_container().get_file_writer_error_queue()
expected_error = ValueError("something wrong when writing file")
expected_stack_trace = "my stack trace from writing a file"
expected_message = f"Error raised by subprocess {test_process_manager.get_file_writer_process()}\n{expected_stack_trace}\n{expected_error}"
file_writer_error_queue.put_nowait((expected_error, expected_stack_trace))
assert is_queue_eventually_not_empty(file_writer_error_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(file_writer_error_queue) is True
mocked_logger.assert_any_call(expected_message)
def test_MantarrayProcessesMonitor__logs_errors_from_data_analyzer(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "error", autospec=True)
mocker.patch.object(test_process_manager, "hard_stop_and_join_processes", autospec=True)
data_analyzer_error_queue = test_process_manager.queue_container().get_data_analyzer_error_queue()
expected_error = ValueError("something wrong when analyzing data")
expected_stack_trace = "my stack trace from analyzing some data"
expected_message = f"Error raised by subprocess {test_process_manager.get_data_analyzer_process()}\n{expected_stack_trace}\n{expected_error}"
put_object_into_queue_and_raise_error_if_eventually_still_empty(
(expected_error, expected_stack_trace), data_analyzer_error_queue
)
invoke_process_run_and_check_errors(monitor_thread)
assert is_queue_eventually_empty(data_analyzer_error_queue) is True
mocked_logger.assert_any_call(expected_message)
def test_MantarrayProcessesMonitor__hard_stops_and_joins_processes_and_logs_queue_items_when_error_is_raised_in_ok_comm_subprocess(
mocker, test_process_manager_creator, test_monitor
):
expected_ok_comm_item = "ok_comm_queue_item"
expected_file_writer_item = "file_writer_queue_item"
expected_da_item = "data_analyzer_queue_item"
expected_server_item = "server_manager_queue_item"
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "error", autospec=True)
okc_process = test_process_manager.get_instrument_process()
fw_process = test_process_manager.get_file_writer_process()
da_process = test_process_manager.get_data_analyzer_process()
server_manager = test_process_manager.get_server_manager()
mocked_okc_join = mocker.patch.object(okc_process, "join", autospec=True)
mocked_fw_join = mocker.patch.object(fw_process, "join", autospec=True)
mocked_da_join = mocker.patch.object(da_process, "join", autospec=True)
mocked_shutdown_server = mocker.patch.object(server_manager, "shutdown_server", autospec=True)
ok_comm_error_queue = test_process_manager.queue_container().get_data_analyzer_error_queue()
put_object_into_queue_and_raise_error_if_eventually_still_empty(
("error", "stack_trace"), ok_comm_error_queue
)
instrument_to_main = test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
put_object_into_queue_and_raise_error_if_eventually_still_empty(expected_ok_comm_item, instrument_to_main)
file_writer_to_main = (
test_process_manager.queue_container().get_communication_queue_from_main_to_file_writer()
)
put_object_into_queue_and_raise_error_if_eventually_still_empty(
expected_file_writer_item, file_writer_to_main
)
data_analyzer_to_main = (
test_process_manager.queue_container().get_communication_queue_from_main_to_data_analyzer()
)
put_object_into_queue_and_raise_error_if_eventually_still_empty(expected_da_item, data_analyzer_to_main)
server_to_main = test_process_manager.queue_container().get_communication_queue_from_server_to_main()
put_object_into_queue_and_raise_error_if_eventually_still_empty(expected_server_item, server_to_main)
invoke_process_run_and_check_errors(monitor_thread)
mocked_okc_join.assert_called_once()
mocked_fw_join.assert_called_once()
mocked_da_join.assert_called_once()
mocked_shutdown_server.assert_called_once()
actual = mocked_logger.call_args_list[1][0][0]
assert "Remaining items in process queues: {" in actual
assert expected_ok_comm_item in actual
assert expected_file_writer_item in actual
assert expected_da_item in actual
assert expected_server_item in actual
@freeze_time(datetime.datetime(year=2020, month=2, day=27, hour=12, minute=14, second=22, microsecond=336597))
def test_MantarrayProcessesMonitor__updates_timestamp_in_shared_values_dict_after_receiving_communication_from_start_acquisition(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
queue_command_to_instrument_comm(get_mutable_copy_of_START_MANAGED_ACQUISITION_COMMUNICATION())
comm_to_instrument_comm = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
assert is_queue_eventually_not_empty(comm_to_instrument_comm) is True
ok_comm_process = test_process_manager.get_instrument_process()
simulator = FrontPanelSimulator({})
simulator.initialize_board()
ok_comm_process.set_board_connection(0, simulator)
invoke_process_run_and_check_errors(ok_comm_process)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["utc_timestamps_of_beginning_of_data_acquisition"][0] == datetime.datetime(
year=2020, month=2, day=27, hour=12, minute=14, second=22, microsecond=336597
)
def test_MantarrayProcessesMonitor__correctly_sets_system_status_to_live_view_active_only_when_initial_required_number_of_data_dumps_become_available_from_data_analyzer(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
data_analyzer_process = test_process_manager.get_data_analyzer_process()
da_to_main_queue = (
test_process_manager.queue_container().get_communication_queue_from_data_analyzer_to_main()
)
dummy_data = {
"well5": [1, 2, 3],
"earliest_timepoint": 1,
"latest_timepoint": 3,
}
shared_values_dict["system_status"] = BUFFERING_STATE
data_analyzer_process._dump_data_into_queue(dummy_data) # pylint:disable=protected-access
assert is_queue_eventually_not_empty(da_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["system_status"] == BUFFERING_STATE
data_analyzer_process._dump_data_into_queue(dummy_data) # pylint:disable=protected-access
assert is_queue_eventually_not_empty(da_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["system_status"] == LIVE_VIEW_ACTIVE_STATE
shared_values_dict["system_status"] = RECORDING_STATE
data_analyzer_process._dump_data_into_queue(dummy_data) # pylint:disable=protected-access
assert is_queue_eventually_not_empty(da_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["system_status"] == RECORDING_STATE
def test_MantarrayProcessesMonitor__sets_system_status_to_server_ready_after_subprocesses_finish_start_up(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
okc_process = test_process_manager.get_instrument_process()
fw_process = test_process_manager.get_file_writer_process()
da_process = test_process_manager.get_data_analyzer_process()
mocked_okc_started = mocker.patch.object(okc_process, "is_start_up_complete", side_effect=[True, True])
mocked_fw_started = mocker.patch.object(fw_process, "is_start_up_complete", side_effect=[True, True])
mocked_da_started = mocker.patch.object(da_process, "is_start_up_complete", side_effect=[False, True])
invoke_process_run_and_check_errors(monitor_thread)
assert mocked_okc_started.call_count == 1
assert mocked_fw_started.call_count == 1
assert mocked_da_started.call_count == 1
assert shared_values_dict["system_status"] == SERVER_INITIALIZING_STATE
invoke_process_run_and_check_errors(monitor_thread)
assert mocked_okc_started.call_count == 2
assert mocked_fw_started.call_count == 2
assert mocked_da_started.call_count == 2
assert shared_values_dict["system_status"] == SERVER_READY_STATE
def test_MantarrayProcessesMonitor__does_not_check_start_up_status_after_subprocesses_finish_start_up(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_system_status = CALIBRATION_NEEDED_STATE
shared_values_dict["system_status"] = expected_system_status
okc_process = test_process_manager.get_instrument_process()
fw_process = test_process_manager.get_file_writer_process()
da_process = test_process_manager.get_data_analyzer_process()
spied_okc_started = mocker.spy(okc_process, "is_start_up_complete")
spied_fw_started = mocker.spy(fw_process, "is_start_up_complete")
spied_da_started = mocker.spy(da_process, "is_start_up_complete")
invoke_process_run_and_check_errors(monitor_thread)
assert spied_okc_started.call_count == 0
assert spied_fw_started.call_count == 0
assert spied_da_started.call_count == 0
assert shared_values_dict["system_status"] == expected_system_status
def test_MantarrayProcessesMonitor__sets_in_simulation_mode_to_false_when_connected_to_real_board(
patch_connection_to_board, test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
ok_comm_process = test_process_manager.get_instrument_process()
container = test_process_manager.queue_container()
instrument_comm_to_main_queue = container.get_communication_queue_from_instrument_comm_to_main(0)
ok_comm_process.create_connections_to_all_available_boards()
assert is_queue_eventually_not_empty(instrument_comm_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["in_simulation_mode"] is False
def test_MantarrayProcessesMonitor__sets_in_simulation_mode_to_true_when_connected_to_simulator(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
ok_comm_process = test_process_manager.get_instrument_process()
instrument_comm_to_main_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
ok_comm_process.create_connections_to_all_available_boards()
assert is_queue_eventually_not_empty(instrument_comm_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["in_simulation_mode"] is True
def test_MantarrayProcessesMonitor__sets_system_status_to_needs_calibration_after_start_up_script_completes(
test_monitor, test_process_manager_creator, mocker
):
mocked_path_str = os.path.join("tests", "test_xem_scripts", "xem_test_start_up.txt")
mocker.patch.object(ok_comm, "resource_path", autospec=True, return_value=mocked_path_str)
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
ok_comm_process = test_process_manager.get_instrument_process()
simulator = FrontPanelSimulator({})
simulator.initialize_board()
ok_comm_process.set_board_connection(0, simulator)
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
to_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
to_instrument_comm_queue.put_nowait({"communication_type": "xem_scripts", "script_type": "start_up"})
assert is_queue_eventually_not_empty(to_instrument_comm_queue) is True
invoke_process_run_and_check_errors(ok_comm_process)
assert is_queue_eventually_not_empty(from_instrument_comm_queue) is True
# Tanner (6/2/20): number of iterations should be 3 here because xem_scripts sends 3 messages to main, and the third one will contain the system status update
invoke_process_run_and_check_errors(monitor_thread, num_iterations=3)
assert shared_values_dict["system_status"] == CALIBRATION_NEEDED_STATE
def test_MantarrayProcessesMonitor__sets_system_status_to_calibrated_after_calibration_script_completes(
test_monitor, test_process_manager_creator, mocker
):
mocked_path_str = os.path.join("tests", "test_xem_scripts", "xem_test_start_calibration.txt")
mocker.patch.object(ok_comm, "resource_path", autospec=True, return_value=mocked_path_str)
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
ok_comm_process = test_process_manager.get_instrument_process()
simulator = RunningFIFOSimulator()
simulator.initialize_board()
ok_comm_process.set_board_connection(0, simulator)
ok_comm_process = test_process_manager.get_instrument_process()
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
to_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
to_instrument_comm_queue.put_nowait(
{"communication_type": "xem_scripts", "script_type": "start_calibration"}
)
assert is_queue_eventually_not_empty(to_instrument_comm_queue) is True
invoke_process_run_and_check_errors(ok_comm_process)
assert is_queue_eventually_not_empty(from_instrument_comm_queue) is True
# Tanner (6/29/20): number of iterations should be 51 here because xem_scripts sends 51 total messages, the last one containing the system status update
invoke_process_run_and_check_errors(monitor_thread, num_iterations=51)
assert shared_values_dict["system_status"] == CALIBRATED_STATE
def test_MantarrayProcessesMonitor__sets_system_status_to_calibrated_after_managed_acquisition_stops__and_resets_data_dump_buffer_size(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
monitor_thread._data_dump_buffer_size = OUTGOING_DATA_BUFFER_SIZE # pylint:disable=protected-access
ok_comm_process = test_process_manager.get_instrument_process()
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
to_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
simulator = FrontPanelSimulator({})
simulator.initialize_board()
simulator.start_acquisition()
ok_comm_process.set_board_connection(0, simulator)
to_instrument_comm_queue.put_nowait(STOP_MANAGED_ACQUISITION_COMMUNICATION)
assert is_queue_eventually_not_empty(to_instrument_comm_queue) is True
invoke_process_run_and_check_errors(ok_comm_process)
assert is_queue_eventually_not_empty(from_instrument_comm_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["system_status"] == CALIBRATED_STATE
assert monitor_thread._data_dump_buffer_size == 0 # pylint:disable=protected-access
def test_MantarrayProcessesMonitor__stores_device_information_after_connection__in_beta_1_mode(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
ok_comm_process = test_process_manager.get_instrument_process()
instrument_comm_to_main_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
ok_comm_process.create_connections_to_all_available_boards()
assert is_queue_eventually_not_empty(instrument_comm_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["xem_serial_number"][0] == RunningFIFOSimulator.default_xem_serial_number
assert (
shared_values_dict["mantarray_serial_number"][0]
== RunningFIFOSimulator.default_mantarray_serial_number
)
assert shared_values_dict["mantarray_nickname"][0] == RunningFIFOSimulator.default_mantarray_nickname
def test_MantarrayProcessesMonitor__sets_in_simulation_mode_after_connection__in_beta_2_mode(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(beta_2_mode=True, use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
mc_comm_process = test_process_manager.get_instrument_process()
instrument_comm_to_main_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
mc_comm_process.create_connections_to_all_available_boards()
assert is_queue_eventually_not_empty(instrument_comm_to_main_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["in_simulation_mode"] is True
def test_MantarrayProcessesMonitor__stores_device_information_from_metadata_comm__and_updates_system_status(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(beta_2_mode=True, use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
shared_values_dict["system_status"] = INSTRUMENT_INITIALIZING_STATE
board_idx = 0
instrument_comm_to_main_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(board_idx)
)
metadata_comm_dict = {
"communication_type": "metadata_comm",
"command": "get_metadata",
"metadata": MantarrayMcSimulator.default_metadata_values,
"board_index": board_idx,
}
instrument_comm_to_main_queue.put_nowait(metadata_comm_dict)
assert is_queue_eventually_not_empty(instrument_comm_to_main_queue) is True
invoke_process_run_and_check_errors(
monitor_thread,
num_iterations=2, # one cycle to retrieve metadata, one cycle to update system_status
)
assert shared_values_dict["system_status"] == CALIBRATION_NEEDED_STATE
assert (
shared_values_dict["main_firmware_version"][board_idx]
== MantarrayMcSimulator.default_firmware_version
)
assert (
shared_values_dict["mantarray_serial_number"][board_idx]
== MantarrayMcSimulator.default_mantarray_serial_number
)
assert (
shared_values_dict["mantarray_nickname"][board_idx] == MantarrayMcSimulator.default_mantarray_nickname
)
assert (
shared_values_dict["instrument_metadata"][board_idx] == MantarrayMcSimulator.default_metadata_values
)
def test_MantarrayProcessesMonitor__calls_boot_up_only_once_after_subprocesses_start_if_boot_up_after_processes_start_is_True(
test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
mocked_boot_up = mocker.patch.object(test_process_manager, "boot_up_instrument")
shared_values_dict = {
"system_status": SERVER_READY_STATE,
"beta_2_mode": False,
}
error_queue = TestingQueue()
the_lock = threading.Lock()
monitor = MantarrayProcessesMonitor(
shared_values_dict,
test_process_manager,
error_queue,
the_lock,
boot_up_after_processes_start=True,
)
invoke_process_run_and_check_errors(monitor)
assert mocked_boot_up.call_count == 1
invoke_process_run_and_check_errors(monitor)
assert mocked_boot_up.call_count == 1
def test_MantarrayProcessesMonitor__doesnt_call_boot_up_after_subprocesses_start_if_boot_up_after_processes_start_is_False(
test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
mocked_boot_up = mocker.patch.object(test_process_manager, "boot_up_instrument")
shared_values_dict = {
"system_status": SERVER_READY_STATE,
"beta_2_mode": False,
}
error_queue = TestingQueue()
the_lock = threading.Lock()
monitor = MantarrayProcessesMonitor(
shared_values_dict,
test_process_manager,
error_queue,
the_lock,
boot_up_after_processes_start=False,
)
invoke_process_run_and_check_errors(monitor)
assert mocked_boot_up.call_count == 0
def test_MantarrayProcessesMonitor__calls_boot_up_instrument_with_load_firmware_file_False_if_given_in_init__when_boot_up_after_processes_start_is_True(
test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
mocked_boot_up = mocker.patch.object(test_process_manager, "boot_up_instrument")
shared_values_dict = {
"system_status": SERVER_READY_STATE,
"beta_2_mode": False,
}
error_queue = TestingQueue()
the_lock = threading.Lock()
monitor = MantarrayProcessesMonitor(
shared_values_dict,
test_process_manager,
error_queue,
the_lock,
boot_up_after_processes_start=True,
load_firmware_file=False,
)
invoke_process_run_and_check_errors(monitor)
assert mocked_boot_up.call_count == 1
assert mocked_boot_up.call_args[1]["load_firmware_file"] is False
def test_MantarrayProcessesMonitor__stores_firmware_versions_during_instrument_boot_up(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
# Tanner (12/28/20): RunningFIFOSimulator ignores the name of bit file given, so we can mock this out so it will pass in Cloud9
mocker.patch.object(process_manager, "get_latest_firmware", autospec=True, return_value=None)
okc_process = test_process_manager.get_instrument_process()
to_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
simulator = RunningFIFOSimulator()
okc_process.set_board_connection(0, simulator)
test_process_manager.boot_up_instrument()
assert is_queue_eventually_not_empty(to_instrument_comm_queue) is True
invoke_process_run_and_check_errors(okc_process)
assert is_queue_eventually_not_empty(from_instrument_comm_queue) is True
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["main_firmware_version"][0] == RunningFIFOSimulator.default_firmware_version
assert shared_values_dict["sleep_firmware_version"][0] == "0.0.0"
def test_MantarrayProcessesMonitor__scrubs_username_from_bit_file_name_in_get_status_log_message(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
spied_info = mocker.spy(process_monitor.logger, "info")
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
test_communication = {
"communication_type": "debug_console",
"command": "get_status",
"response": {
"is_spi_running": False,
"is_board_initialized": True,
"bit_file_name": r"Users\username\AppData\main.bit",
},
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(
copy.deepcopy(test_communication), from_instrument_comm_queue
)
invoke_process_run_and_check_errors(monitor_thread)
expected_scrubbed_path = r"Users\********\AppData\main.bit"
assert expected_scrubbed_path in spied_info.call_args[0][0]
def test_MantarrayProcessesMonitor__scrubs_username_from_bit_file_name_in_boot_up_instrument_log_message(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
spied_info = mocker.spy(process_monitor.logger, "info")
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
test_communication = {
"communication_type": "boot_up_instrument",
"command": "initialize_board",
"suppress_error": False,
"allow_board_reinitialization": False,
"board_index": 0,
"main_firmware_version": "1.1.1",
"sleep_firmware_version": "0.0.0",
"bit_file_name": r"Users\username1\AppData\main.bit",
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(
copy.deepcopy(test_communication), from_instrument_comm_queue
)
invoke_process_run_and_check_errors(monitor_thread)
expected_scrubbed_path = r"Users\*********\AppData\main.bit"
assert expected_scrubbed_path in spied_info.call_args[0][0]
def test_MantarrayProcessesMonitor__scrubs_username_from_finalized_recording_files_in_log_message(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
spied_info = mocker.spy(process_monitor.logger, "info")
from_file_writer_queue = (
test_process_manager.queue_container().get_communication_queue_from_file_writer_to_main()
)
test_communication = {
"communication_type": "file_finalized",
"file_path": r"Users\Curi Customer\AppData\Roaming\MantarrayController\recordings\recorded_file.h5",
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(
copy.deepcopy(test_communication), from_file_writer_queue
)
invoke_process_run_and_check_errors(monitor_thread)
expected_scrubbed_path = (
r"Users\*************\AppData\Roaming\MantarrayController\recordings\recorded_file.h5"
)
assert expected_scrubbed_path in spied_info.call_args[0][0]
def test_MantarrayProcessesMonitor__sends_two_barcode_poll_commands_to_OKComm_at_correct_time_intervals(
test_monitor, test_process_manager_creator, mocker
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
to_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
)
expected_time_1 = 0
expected_time_2 = 15
mocker.patch.object(
process_monitor,
"_get_barcode_clear_time",
autospec=True,
side_effect=[expected_time_1, expected_time_2, None],
)
mocked_get_dur = mocker.patch.object(
process_monitor,
"_get_dur_since_last_barcode_clear",
autospec=True,
side_effect=[
BARCODE_POLL_PERIOD - 1,
BARCODE_POLL_PERIOD,
BARCODE_POLL_PERIOD - 1,
BARCODE_POLL_PERIOD,
],
)
expected_comm = {
"communication_type": "barcode_comm",
"command": "start_scan",
}
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(to_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_of_size(to_instrument_comm_queue, 1)
actual = to_instrument_comm_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
assert actual == expected_comm
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(to_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_of_size(to_instrument_comm_queue, 1)
actual = to_instrument_comm_queue.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
assert actual == expected_comm
assert mocked_get_dur.call_args_list[0][0][0] == expected_time_1
assert mocked_get_dur.call_args_list[1][0][0] == expected_time_1
assert mocked_get_dur.call_args_list[2][0][0] == expected_time_2
assert mocked_get_dur.call_args_list[3][0][0] == expected_time_2
@pytest.mark.parametrize(
"expected_barcode,test_valid,expected_status,test_description",
[
("MA200190000", True, BARCODE_VALID_UUID, "stores new valid barcode"),
("M$200190000", False, BARCODE_INVALID_UUID, "stores new invalid barcode"),
("", None, BARCODE_UNREADABLE_UUID, "stores no barcode"),
],
)
def test_MantarrayProcessesMonitor__stores_barcode_sent_from_instrument_comm__and_no_previously_stored_barcode(
expected_barcode,
test_valid,
expected_status,
test_description,
test_monitor,
test_process_manager_creator,
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_board_idx = 0
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(
expected_board_idx
)
)
barcode_comm = {
"communication_type": "barcode_comm",
"barcode": expected_barcode,
"board_idx": expected_board_idx,
}
if test_valid is not None:
barcode_comm["valid"] = test_valid
if test_valid is False:
# specifically want test_valid to be False here, not None since invalid barcodes have trailing `\x00`
barcode_comm["barcode"] += chr(0)
put_object_into_queue_and_raise_error_if_eventually_still_empty(barcode_comm, from_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["barcodes"][expected_board_idx] == {
"plate_barcode": expected_barcode,
"barcode_status": expected_status,
"frontend_needs_barcode_update": True,
}
@pytest.mark.parametrize(
"expected_barcode,test_valid,expected_status,test_description",
[
("MA200190000", True, BARCODE_VALID_UUID, "updates to new valid barcode"),
("M$200190000", False, BARCODE_INVALID_UUID, "updates to new invalid barcode"),
("", None, BARCODE_UNREADABLE_UUID, "updates to no barcode"),
],
)
def test_MantarrayProcessesMonitor__updates_to_new_barcode_sent_from_instrument_comm(
expected_barcode,
test_valid,
expected_status,
test_description,
test_monitor,
test_process_manager_creator,
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_board_idx = 0
shared_values_dict["barcodes"] = {
expected_board_idx: {
"plate_barcode": "old barcode",
"barcode_status": None,
"frontend_needs_barcode_update": None,
}
}
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(
expected_board_idx
)
)
barcode_comm = {
"communication_type": "barcode_comm",
"barcode": expected_barcode,
"board_idx": expected_board_idx,
}
if test_valid is not None:
barcode_comm["valid"] = test_valid
if test_valid is False:
# specifically want test_valid to be False here, not None since invalid barcodes have trailing `\x00`
barcode_comm["barcode"] += chr(0)
put_object_into_queue_and_raise_error_if_eventually_still_empty(barcode_comm, from_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["barcodes"][expected_board_idx] == {
"plate_barcode": expected_barcode,
"barcode_status": expected_status,
"frontend_needs_barcode_update": True,
}
@pytest.mark.parametrize(
"expected_barcode,test_valid,test_update,test_description",
[
("MA200190000", True, False, "does not update to current valid barcode"),
("M$200190000", False, True, "does not update to current invalid barcode"),
("", None, False, "does not update to current empty barcode"),
],
)
def test_MantarrayProcessesMonitor__does_not_update_any_values_if_new_barcode_matches_current_barcode(
expected_barcode,
test_valid,
test_update,
test_description,
test_monitor,
test_process_manager_creator,
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_board_idx = 0
expected_dict = {
"plate_barcode": expected_barcode,
"barcode_status": None,
"frontend_needs_barcode_update": test_update,
}
shared_values_dict["barcodes"] = {expected_board_idx: expected_dict}
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(
expected_board_idx
)
)
barcode_comm = {
"communication_type": "barcode_comm",
"barcode": expected_barcode,
"board_idx": expected_board_idx,
}
if test_valid is not None:
barcode_comm["valid"] = test_valid
if test_valid is False:
# specifically want test_valid to be False here, not None since invalid barcodes have trailing `\x00`
barcode_comm["barcode"] += chr(0)
put_object_into_queue_and_raise_error_if_eventually_still_empty(barcode_comm, from_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["barcodes"][expected_board_idx] == expected_dict
def test_MantarrayProcessesMonitor__trims_barcode_string_before_storing_in_shared_values_dict(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_board_idx = 0
from_instrument_comm_queue = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(
expected_board_idx
)
)
expected_barcode = "M020090048"
barcode_comm = {
"communication_type": "barcode_comm",
"barcode": expected_barcode + chr(0) * 2,
"board_idx": expected_board_idx,
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(barcode_comm, from_instrument_comm_queue)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["barcodes"][expected_board_idx]["plate_barcode"] == expected_barcode
def test_MantarrayProcessesMonitor__redacts_mantarray_nickname_from_logged_mantarray_naming_ok_comm_messages(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "info", autospec=True)
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
test_nickname = "MyMantarray"
test_comm = {
"communication_type": "mantarray_naming",
"command": "set_mantarray_nickname",
"mantarray_nickname": test_nickname,
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(test_comm, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(instrument_comm_to_main)
expected_comm = copy.deepcopy(test_comm)
expected_comm["mantarray_nickname"] = get_redacted_string(len(test_nickname))
mocked_logger.assert_called_once_with(f"Communication from the Instrument Controller: {expected_comm}")
def test_MantarrayProcessesMonitor__redacts_mantarray_nickname_from_logged_board_connection_status_change_ok_comm_messages(
mocker, test_process_manager_creator, test_monitor
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, *_ = test_monitor(test_process_manager)
mocked_logger = mocker.patch.object(process_monitor.logger, "info", autospec=True)
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
test_nickname = "MyOtherMantarray"
test_comm = {
"communication_type": "board_connection_status_change",
"board_index": 0,
"is_connected": True,
"mantarray_serial_number": RunningFIFOSimulator.default_mantarray_serial_number,
"xem_serial_number": RunningFIFOSimulator.default_xem_serial_number,
"mantarray_nickname": test_nickname,
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(test_comm, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(instrument_comm_to_main)
expected_comm = copy.deepcopy(test_comm)
expected_comm["mantarray_nickname"] = get_redacted_string(len(test_nickname))
mocked_logger.assert_called_once_with(f"Communication from the Instrument Controller: {expected_comm}")
def test_MantarrayProcessesMonitor__raises_error_if_config_dict_in_start_data_stream_command_response_from_instrument_does_not_match_expected_value(
test_process_manager_creator, test_monitor, patch_print
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
queues = test_process_manager.queue_container()
ic_to_main_queue = queues.get_communication_queue_from_instrument_comm_to_main(0)
test_num_wells = 24
expected_config_dict = create_magnetometer_config_dict(test_num_wells)
expected_sampling_period = 15000
shared_values_dict["magnetometer_config_dict"] = {
"magnetometer_config": copy.deepcopy(expected_config_dict),
"sampling_period": expected_sampling_period,
}
shared_values_dict["beta_2_mode"] = True
# Tanner (5/22/21): `x ^= True` flips the Boolean value of x. Doing this guards against changes to the default configuration value
expected_config_dict[1][SERIAL_COMM_SENSOR_AXIS_LOOKUP_TABLE["A"]["X"]] ^= True
put_object_into_queue_and_raise_error_if_eventually_still_empty(
{
"communication_type": "acquisition_manager",
"command": "start_managed_acquisition",
"magnetometer_config": expected_config_dict,
"sampling_period": expected_sampling_period,
"timestamp": None,
},
ic_to_main_queue,
)
with pytest.raises(IncorrectMagnetometerConfigFromInstrumentError, match=str(expected_config_dict)):
invoke_process_run_and_check_errors(monitor_thread)
def test_MantarrayProcessesMonitor__raises_error_if_sampling_period_in_start_data_stream_command_response_from_instrument_does_not_match_expected_value(
test_process_manager_creator, test_monitor, patch_print
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
queues = test_process_manager.queue_container()
ic_to_main_queue = queues.get_communication_queue_from_instrument_comm_to_main(0)
test_num_wells = 24
expected_config_dict = create_magnetometer_config_dict(test_num_wells)
expected_sampling_period = 16000
shared_values_dict["magnetometer_config_dict"] = {
"magnetometer_config": copy.deepcopy(expected_config_dict),
"sampling_period": expected_sampling_period,
}
shared_values_dict["beta_2_mode"] = True
bad_sampling_period = expected_sampling_period - 1
put_object_into_queue_and_raise_error_if_eventually_still_empty(
{
"communication_type": "acquisition_manager",
"command": "start_managed_acquisition",
"magnetometer_config": expected_config_dict,
"sampling_period": bad_sampling_period,
"timestamp": None,
},
ic_to_main_queue,
)
with pytest.raises(IncorrectSamplingPeriodFromInstrumentError, match=str(bad_sampling_period)):
invoke_process_run_and_check_errors(monitor_thread)
def test_MantarrayProcessesMonitor__drains_data_analyzer_data_out_queue_after_receiving_stop_managed_acquisition_command_receipt(
test_process_manager_creator,
test_monitor,
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
shared_values_dict["system_status"] = CALIBRATED_STATE
da_data_out_queue = test_process_manager.queue_container().get_data_analyzer_board_queues()[0][1]
put_object_into_queue_and_raise_error_if_eventually_still_empty("test_item", da_data_out_queue)
data_analyzer_to_main = (
test_process_manager.queue_container().get_communication_queue_from_data_analyzer_to_main()
)
put_object_into_queue_and_raise_error_if_eventually_still_empty(
STOP_MANAGED_ACQUISITION_COMMUNICATION, data_analyzer_to_main
)
invoke_process_run_and_check_errors(monitor_thread)
confirm_queue_is_eventually_empty(da_data_out_queue)
def test_MantarrayProcessesMonitor__updates_magnetometer_config_after_receiving_default_config_message_from_mc_comm(
test_process_manager_creator,
test_monitor,
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
expected_magnetometer_config_dict = {
"magnetometer_config": copy.deepcopy(DEFAULT_MAGNETOMETER_CONFIG),
"sampling_period": DEFAULT_SAMPLING_PERIOD,
}
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
main_to_da = test_process_manager.queue_container().get_communication_queue_from_main_to_data_analyzer()
main_to_ic = test_process_manager.queue_container().get_communication_to_instrument_comm_queue(0)
default_config_comm = {
"communication_type": "default_magnetometer_config",
"command": "change_magnetometer_config",
"magnetometer_config_dict": expected_magnetometer_config_dict,
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(
default_config_comm, instrument_comm_to_main
)
invoke_process_run_and_check_errors(monitor_thread)
# make sure update was stored
assert shared_values_dict["magnetometer_config_dict"] == expected_magnetometer_config_dict
# make sure update was passed to data analyzer
confirm_queue_is_eventually_of_size(main_to_da, 1)
comm_to_da = main_to_da.get(timeout=QUEUE_CHECK_TIMEOUT_SECONDS)
expected_comm_to_da = {
"communication_type": "acquisition_manager",
"command": "change_magnetometer_config",
}
expected_comm_to_da.update(expected_magnetometer_config_dict)
assert comm_to_da == expected_comm_to_da
# make sure update was not sent back to mc_comm
confirm_queue_is_eventually_empty(main_to_ic)
def test_MantarrayProcessesMonitor__sets_timestamp_and_stim_running_statuses_in_shared_values_dict_after_receiving_start_stimulation_command_response_from_instrument_comm(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
test_stim_info = create_random_stim_info()
shared_values_dict["utc_timestamps_of_beginning_of_stimulation"] = [None]
shared_values_dict["stimulation_info"] = test_stim_info
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
expected_timestamp = datetime.datetime(
year=2021, month=10, day=19, hour=10, minute=23, second=40, microsecond=123456
)
command_response = {
"communication_type": "stimulation",
"command": "start_stimulation",
"timestamp": expected_timestamp,
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(command_response, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["utc_timestamps_of_beginning_of_stimulation"][0] == expected_timestamp
assert shared_values_dict["stimulation_running"] == [
bool(
test_stim_info["protocol_assignments"][
GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index(well_idx)
]
)
for well_idx in range(24)
]
def test_MantarrayProcessesMonitor__clears_timestamp_and_updates_stim_running_statuses_shared_values_dict_after_receiving_stop_stimulation_command_response_from_instrument_comm(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
test_stim_info = create_random_stim_info()
shared_values_dict["stimulation_info"] = test_stim_info
shared_values_dict["stimulation_running"] = [
bool(
test_stim_info["protocol_assignments"][
GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index(well_idx)
]
)
for well_idx in range(24)
]
shared_values_dict["utc_timestamps_of_beginning_of_stimulation"] = [
datetime.datetime(year=2021, month=10, day=19, hour=10, minute=31, second=21, microsecond=123456)
]
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
command_response = {"communication_type": "stimulation", "command": "stop_stimulation"}
put_object_into_queue_and_raise_error_if_eventually_still_empty(command_response, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["utc_timestamps_of_beginning_of_stimulation"][0] is None
assert shared_values_dict["stimulation_running"] == [False] * 24
def test_MantarrayProcessesMonitor__updates_stimulation_running_list_and_stimulation_start_time_timestamp_when_status_update_message_from_instrument_comm(
test_monitor, test_process_manager_creator
):
test_process_manager = test_process_manager_creator(use_testing_queues=True)
monitor_thread, shared_values_dict, *_ = test_monitor(test_process_manager)
test_wells_running = {0, 5, 10, 15}
shared_values_dict["stimulation_running"] = [well_idx in test_wells_running for well_idx in range(24)]
test_timestamp = datetime.datetime(
year=2021, month=10, day=19, hour=12, minute=8, second=5, microsecond=123456
)
shared_values_dict["utc_timestamps_of_beginning_of_stimulation"] = [test_timestamp]
instrument_comm_to_main = (
test_process_manager.queue_container().get_communication_queue_from_instrument_comm_to_main(0)
)
# stop the first set of wells
test_wells_to_stop_1 = {0, 10}
msg_from_ic_1 = {
"communication_type": "stimulation",
"command": "status_update",
"wells_done_stimulating": list(test_wells_to_stop_1),
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(msg_from_ic_1, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
# make sure that statuses were update correctly and the start timestamp was not cleared
expected_updated_statuses_1 = [
well_idx in (test_wells_running - test_wells_to_stop_1) for well_idx in range(24)
]
assert shared_values_dict["stimulation_running"] == expected_updated_statuses_1
assert shared_values_dict["utc_timestamps_of_beginning_of_stimulation"] == [test_timestamp]
# stop remaining wells
msg_from_ic_2 = {
"communication_type": "stimulation",
"command": "status_update",
"wells_done_stimulating": list(test_wells_running - test_wells_to_stop_1),
}
put_object_into_queue_and_raise_error_if_eventually_still_empty(msg_from_ic_2, instrument_comm_to_main)
invoke_process_run_and_check_errors(monitor_thread)
assert shared_values_dict["stimulation_running"] == [False] * 24
assert shared_values_dict["utc_timestamps_of_beginning_of_stimulation"] == [None]
| [
"datetime.datetime",
"mantarray_desktop_app.MantarrayProcessesMonitor",
"xem_wrapper.FrontPanelSimulator",
"mantarray_desktop_app.create_magnetometer_config_dict",
"stdlib_utils.TestingQueue",
"threading.Lock",
"json.dumps",
"os.path.join",
"time.sleep",
"pytest.mark.parametrize",
"numpy.zeros",... | [((3377, 3400), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(12)'], {}), '(12)\n', (3396, 3400), False, 'import pytest\n'), ((43284, 43601), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expected_barcode,test_valid,expected_status,test_description"""', "[('MA200190000', True, BARCODE_VALID_UUID, 'stores new valid barcode'), (\n 'M$200190000', False, BARCODE_INVALID_UUID,\n 'stores new invalid barcode'), ('', None, BARCODE_UNREADABLE_UUID,\n 'stores no barcode')]"], {}), "(\n 'expected_barcode,test_valid,expected_status,test_description', [(\n 'MA200190000', True, BARCODE_VALID_UUID, 'stores new valid barcode'), (\n 'M$200190000', False, BARCODE_INVALID_UUID,\n 'stores new invalid barcode'), ('', None, BARCODE_UNREADABLE_UUID,\n 'stores no barcode')])\n", (43307, 43601), False, 'import pytest\n'), ((45022, 45351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expected_barcode,test_valid,expected_status,test_description"""', "[('MA200190000', True, BARCODE_VALID_UUID, 'updates to new valid barcode'),\n ('M$200190000', False, BARCODE_INVALID_UUID,\n 'updates to new invalid barcode'), ('', None, BARCODE_UNREADABLE_UUID,\n 'updates to no barcode')]"], {}), "(\n 'expected_barcode,test_valid,expected_status,test_description', [(\n 'MA200190000', True, BARCODE_VALID_UUID, 'updates to new valid barcode'\n ), ('M$200190000', False, BARCODE_INVALID_UUID,\n 'updates to new invalid barcode'), ('', None, BARCODE_UNREADABLE_UUID,\n 'updates to no barcode')])\n", (45045, 45351), False, 'import pytest\n'), ((46964, 47284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expected_barcode,test_valid,test_update,test_description"""', "[('MA200190000', True, False, 'does not update to current valid barcode'),\n ('M$200190000', False, True,\n 'does not update to current invalid barcode'), ('', None, False,\n 'does not update to current empty barcode')]"], {}), "(\n 'expected_barcode,test_valid,test_update,test_description', [(\n 'MA200190000', True, False, 'does not update to current valid barcode'),\n ('M$200190000', False, True,\n 'does not update to current invalid barcode'), ('', None, False,\n 'does not update to current empty barcode')])\n", (46987, 47284), False, 'import pytest\n'), ((3145, 3158), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3156, 3158), False, 'import queue\n'), ((3934, 3949), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3944, 3949), False, 'import time\n'), ((4605, 4619), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (4617, 4619), False, 'from stdlib_utils import TestingQueue\n'), ((5369, 5383), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (5381, 5383), False, 'from stdlib_utils import TestingQueue\n'), ((6417, 6431), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (6429, 6431), False, 'from stdlib_utils import TestingQueue\n'), ((7337, 7351), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (7349, 7351), False, 'from stdlib_utils import TestingQueue\n'), ((8392, 8443), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (8427, 8443), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((9397, 9448), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (9432, 9448), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((10373, 10424), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (10408, 10424), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((11316, 11364), 'json.dumps', 'json.dumps', (["{'well': 0, 'data': [1, 2, 3, 4, 5]}"], {}), "({'well': 0, 'data': [1, 2, 3, 4, 5]})\n", (11326, 11364), False, 'import json\n'), ((11556, 11607), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (11591, 11607), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((12836, 12887), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (12871, 12887), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((13997, 14048), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (14032, 14048), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((15165, 15216), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (15200, 15216), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((17734, 17785), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (17769, 17785), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((19081, 19104), 'xem_wrapper.FrontPanelSimulator', 'FrontPanelSimulator', (['{}'], {}), '({})\n', (19100, 19104), False, 'from xem_wrapper import FrontPanelSimulator\n'), ((19199, 19251), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['ok_comm_process'], {}), '(ok_comm_process)\n', (19234, 19251), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((19256, 19307), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (19291, 19307), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((18253, 18353), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(2)', 'day': '(27)', 'hour': '(12)', 'minute': '(14)', 'second': '(22)', 'microsecond': '(336597)'}), '(year=2020, month=2, day=27, hour=12, minute=14, second=22,\n microsecond=336597)\n', (18270, 18353), False, 'import datetime\n'), ((20441, 20492), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (20476, 20492), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((20726, 20777), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (20761, 20777), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((21076, 21127), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (21111, 21127), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((22044, 22095), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (22079, 22095), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((22313, 22364), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (22348, 22364), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((23427, 23478), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (23462, 23478), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((24401, 24452), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (24436, 24452), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((25192, 25243), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (25227, 25243), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((25496, 25562), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_xem_scripts"""', '"""xem_test_start_up.txt"""'], {}), "('tests', 'test_xem_scripts', 'xem_test_start_up.txt')\n", (25508, 25562), False, 'import os\n'), ((25905, 25928), 'xem_wrapper.FrontPanelSimulator', 'FrontPanelSimulator', (['{}'], {}), '({})\n', (25924, 25928), False, 'from xem_wrapper import FrontPanelSimulator\n'), ((26479, 26531), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['ok_comm_process'], {}), '(ok_comm_process)\n', (26514, 26531), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((26777, 26846), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {'num_iterations': '(3)'}), '(monitor_thread, num_iterations=3)\n', (26812, 26846), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((27110, 27185), 'os.path.join', 'os.path.join', (['"""tests"""', '"""test_xem_scripts"""', '"""xem_test_start_calibration.txt"""'], {}), "('tests', 'test_xem_scripts', 'xem_test_start_calibration.txt')\n", (27122, 27185), False, 'import os\n'), ((27527, 27549), 'mantarray_desktop_app.RunningFIFOSimulator', 'RunningFIFOSimulator', ([], {}), '()\n', (27547, 27549), False, 'from mantarray_desktop_app import RunningFIFOSimulator\n'), ((28191, 28243), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['ok_comm_process'], {}), '(ok_comm_process)\n', (28226, 28243), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((28483, 28553), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {'num_iterations': '(51)'}), '(monitor_thread, num_iterations=51)\n', (28518, 28553), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((29446, 29469), 'xem_wrapper.FrontPanelSimulator', 'FrontPanelSimulator', (['{}'], {}), '({})\n', (29465, 29469), False, 'from xem_wrapper import FrontPanelSimulator\n'), ((29752, 29804), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['ok_comm_process'], {}), '(ok_comm_process)\n', (29787, 29804), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((29887, 29938), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (29922, 29938), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((30770, 30821), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (30805, 30821), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((31864, 31915), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (31899, 31915), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((32929, 32998), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {'num_iterations': '(2)'}), '(monitor_thread, num_iterations=2)\n', (32964, 32998), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((34178, 34192), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (34190, 34192), False, 'from stdlib_utils import TestingQueue\n'), ((34208, 34224), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (34222, 34224), False, 'import threading\n'), ((34239, 34369), 'mantarray_desktop_app.MantarrayProcessesMonitor', 'MantarrayProcessesMonitor', (['shared_values_dict', 'test_process_manager', 'error_queue', 'the_lock'], {'boot_up_after_processes_start': '(True)'}), '(shared_values_dict, test_process_manager,\n error_queue, the_lock, boot_up_after_processes_start=True)\n', (34264, 34369), False, 'from mantarray_desktop_app import MantarrayProcessesMonitor\n'), ((34418, 34462), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor'], {}), '(monitor)\n', (34453, 34462), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((34509, 34553), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor'], {}), '(monitor)\n', (34544, 34553), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((35059, 35073), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (35071, 35073), False, 'from stdlib_utils import TestingQueue\n'), ((35089, 35105), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (35103, 35105), False, 'import threading\n'), ((35120, 35251), 'mantarray_desktop_app.MantarrayProcessesMonitor', 'MantarrayProcessesMonitor', (['shared_values_dict', 'test_process_manager', 'error_queue', 'the_lock'], {'boot_up_after_processes_start': '(False)'}), '(shared_values_dict, test_process_manager,\n error_queue, the_lock, boot_up_after_processes_start=False)\n', (35145, 35251), False, 'from mantarray_desktop_app import MantarrayProcessesMonitor\n'), ((35300, 35344), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor'], {}), '(monitor)\n', (35335, 35344), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((35879, 35893), 'stdlib_utils.TestingQueue', 'TestingQueue', ([], {}), '()\n', (35891, 35893), False, 'from stdlib_utils import TestingQueue\n'), ((35909, 35925), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (35923, 35925), False, 'import threading\n'), ((35940, 36100), 'mantarray_desktop_app.MantarrayProcessesMonitor', 'MantarrayProcessesMonitor', (['shared_values_dict', 'test_process_manager', 'error_queue', 'the_lock'], {'boot_up_after_processes_start': '(True)', 'load_firmware_file': '(False)'}), '(shared_values_dict, test_process_manager,\n error_queue, the_lock, boot_up_after_processes_start=True,\n load_firmware_file=False)\n', (35965, 36100), False, 'from mantarray_desktop_app import MantarrayProcessesMonitor\n'), ((36153, 36197), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor'], {}), '(monitor)\n', (36188, 36197), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((37208, 37230), 'mantarray_desktop_app.RunningFIFOSimulator', 'RunningFIFOSimulator', ([], {}), '()\n', (37228, 37230), False, 'from mantarray_desktop_app import RunningFIFOSimulator\n'), ((37407, 37455), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['okc_process'], {}), '(okc_process)\n', (37442, 37455), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((37538, 37589), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (37573, 37589), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((38712, 38763), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (38747, 38763), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((39933, 39984), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (39968, 39984), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((40948, 40999), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (40983, 40999), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((42289, 42340), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (42324, 42340), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((42409, 42460), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (42444, 42460), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((42649, 42700), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (42684, 42700), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((42769, 42820), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (42804, 42820), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((44760, 44811), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (44795, 44811), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((46702, 46753), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (46737, 46753), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((48663, 48714), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (48698, 48714), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((49629, 49680), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (49664, 49680), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((50629, 50680), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (50664, 50680), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((50765, 50789), 'copy.deepcopy', 'copy.deepcopy', (['test_comm'], {}), '(test_comm)\n', (50778, 50789), False, 'import copy\n'), ((52038, 52089), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (52073, 52089), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((52174, 52198), 'copy.deepcopy', 'copy.deepcopy', (['test_comm'], {}), '(test_comm)\n', (52187, 52198), False, 'import copy\n'), ((52954, 53001), 'mantarray_desktop_app.create_magnetometer_config_dict', 'create_magnetometer_config_dict', (['test_num_wells'], {}), '(test_num_wells)\n', (52985, 53001), False, 'from mantarray_desktop_app import create_magnetometer_config_dict\n'), ((54596, 54643), 'mantarray_desktop_app.create_magnetometer_config_dict', 'create_magnetometer_config_dict', (['test_num_wells'], {}), '(test_num_wells)\n', (54627, 54643), False, 'from mantarray_desktop_app import create_magnetometer_config_dict\n'), ((56392, 56443), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (56427, 56443), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((57713, 57764), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (57748, 57764), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((59159, 59261), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(10)', 'day': '(19)', 'hour': '(10)', 'minute': '(23)', 'second': '(40)', 'microsecond': '(123456)'}), '(year=2021, month=10, day=19, hour=10, minute=23, second=\n 40, microsecond=123456)\n', (59176, 59261), False, 'import datetime\n'), ((59544, 59595), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (59579, 59595), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((61267, 61318), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (61302, 61318), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((62012, 62111), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(10)', 'day': '(19)', 'hour': '(12)', 'minute': '(8)', 'second': '(5)', 'microsecond': '(123456)'}), '(year=2021, month=10, day=19, hour=12, minute=8, second=5,\n microsecond=123456)\n', (62029, 62111), False, 'import datetime\n'), ((62705, 62756), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (62740, 62756), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((63493, 63544), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (63528, 63544), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((3293, 3309), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3307, 3309), False, 'import threading\n'), ((4723, 4739), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4737, 4739), False, 'import threading\n'), ((4756, 4825), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""Process Monitor Exception"""'}), "(NotImplementedError, match='Process Monitor Exception')\n", (4769, 4825), False, 'import pytest\n'), ((4835, 4879), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['test_pm'], {}), '(test_pm)\n', (4870, 4879), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((5487, 5503), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5501, 5503), False, 'import threading\n'), ((6535, 6551), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (6549, 6551), False, 'import threading\n'), ((7455, 7471), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (7469, 7471), False, 'import threading\n'), ((10220, 10237), 'numpy.zeros', 'np.zeros', (['(2, 10)'], {}), '((2, 10))\n', (10228, 10237), True, 'import numpy as np\n'), ((19396, 19496), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2020)', 'month': '(2)', 'day': '(27)', 'hour': '(12)', 'minute': '(14)', 'second': '(22)', 'microsecond': '(336597)'}), '(year=2020, month=2, day=27, hour=12, minute=14, second=22,\n microsecond=336597)\n', (19413, 19496), False, 'import datetime\n'), ((38639, 38672), 'copy.deepcopy', 'copy.deepcopy', (['test_communication'], {}), '(test_communication)\n', (38652, 38672), False, 'import copy\n'), ((39860, 39893), 'copy.deepcopy', 'copy.deepcopy', (['test_communication'], {}), '(test_communication)\n', (39873, 39893), False, 'import copy\n'), ((40879, 40912), 'copy.deepcopy', 'copy.deepcopy', (['test_communication'], {}), '(test_communication)\n', (40892, 40912), False, 'import copy\n'), ((53125, 53160), 'copy.deepcopy', 'copy.deepcopy', (['expected_config_dict'], {}), '(expected_config_dict)\n', (53138, 53160), False, 'import copy\n'), ((53975, 54026), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (54010, 54026), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((54767, 54802), 'copy.deepcopy', 'copy.deepcopy', (['expected_config_dict'], {}), '(expected_config_dict)\n', (54780, 54802), False, 'import copy\n'), ((55443, 55494), 'stdlib_utils.invoke_process_run_and_check_errors', 'invoke_process_run_and_check_errors', (['monitor_thread'], {}), '(monitor_thread)\n', (55478, 55494), False, 'from stdlib_utils import invoke_process_run_and_check_errors\n'), ((56910, 56952), 'copy.deepcopy', 'copy.deepcopy', (['DEFAULT_MAGNETOMETER_CONFIG'], {}), '(DEFAULT_MAGNETOMETER_CONFIG)\n', (56923, 56952), False, 'import copy\n'), ((60812, 60914), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2021)', 'month': '(10)', 'day': '(19)', 'hour': '(10)', 'minute': '(31)', 'second': '(21)', 'microsecond': '(123456)'}), '(year=2021, month=10, day=19, hour=10, minute=31, second=\n 21, microsecond=123456)\n', (60829, 60914), False, 'import datetime\n'), ((60600, 60666), 'mantarray_desktop_app.constants.GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index', 'GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index', (['well_idx'], {}), '(well_idx)\n', (60656, 60666), False, 'from mantarray_desktop_app.constants import GENERIC_24_WELL_DEFINITION\n'), ((59837, 59903), 'mantarray_desktop_app.constants.GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index', 'GENERIC_24_WELL_DEFINITION.get_well_name_from_well_index', (['well_idx'], {}), '(well_idx)\n', (59893, 59903), False, 'from mantarray_desktop_app.constants import GENERIC_24_WELL_DEFINITION\n')] |
"""
.. module:: mflike
:Synopsis: Definition of simplistic likelihood for Simons Observatory
:Authors: <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>.
"""
import os
from typing import Optional
import numpy as np
# from cobaya.conventions import _packages_path
_packages_path = 'packages_path'
from cobaya.likelihoods._base_classes import _InstallableLikelihood
from cobaya.log import LoggedError
from cobaya.tools import are_different_params_lists
from cobaya.theory import HelperTheory
from .gaussian import GaussianData, GaussianLikelihood
from .ps import PSLikelihood
class MFLike(GaussianLikelihood, _InstallableLikelihood):
_url = "https://portal.nersc.gov/cfs/sobs/users/MFLike_data"
_release = "v0.6"
install_options = {"download_url": "{}/{}.tar.gz".format(_url, _release)}
# attributes set from .yaml
input_file: Optional[str]
cov_Bbl_file: Optional[str]
data: dict
defaults: dict
foregrounds: dict
def initialize(self):
self.log.info("Initialising.")
# Set path to data
if (not getattr(self, "path", None)) and (not getattr(self, _packages_path, None)):
raise LoggedError(
self.log,
"No path given to MFLike data. "
"Set the likelihood property "
"'path' or the common property '%s'.",
_packages_path,
)
# If no path specified, use the modules path
data_file_path = os.path.normpath(
getattr(self, "path", None) or os.path.join(self.packages_path, "data")
)
self.data_folder = os.path.join(data_file_path, self.data_folder)
if not os.path.exists(self.data_folder):
if not getattr(self, "path", None):
self.install(path=self.packages_path)
else:
raise LoggedError(
self.log,
"The 'data_folder' directory does not exist. " "Check the given path [%s].",
self.data_folder,
)
# Read data
self.prepare_data()
# State requisites to the theory code
self.requested_cls = ["tt", "te", "ee"]
self.expected_params = ["a_tSZ", "a_kSZ", "a_p", "beta_p",
"a_c", "beta_c", "n_CIBC", "a_s", "T_d"]
# def get_helper_theories(self):
# """
# Foreground model is a helper theory
# """
# self._foreground = Foreground(
# self, "foreground", dict(stop_at_error=self.stop_at_error), timing=self.timer
# )
# # setattr(self._camb_transfers, _requires, self._transfer_requires)
# return {"foreground": self._foreground}
def initialize_with_params(self):
# Check that the parameters are the right ones
differences = are_different_params_lists(
self.input_params, self.expected_params,
name_A="given", name_B="expected")
if differences:
raise LoggedError(
self.log, "Configuration error in parameters: %r.",
differences)
def get_requirements(self):
reqs = dict(Cl={k: max(c, 9000) for k, c in self.lcuts.items()})
# reqs["foreground_model"] = {}
return reqs
def _get_theory(self, **params_values):
cl = self.provider.get_Cl(ell_factor=True)
return self._get_power_spectra(cl, **params_values)
def prepare_data(self, verbose=False):
import sacc
data = self.data
# Read data
input_fname = os.path.join(self.data_folder, self.input_file)
s = sacc.Sacc.load_fits(input_fname)
# Read extra file containing covariance and windows if needed.
cbbl_extra = False
s_b = s
if self.cov_Bbl_file:
if self.cov_Bbl_file != self.input_file:
cov_Bbl_fname = os.path.join(self.data_folder,
self.cov_Bbl_file)
s_b = sacc.Sacc.load_fits(cov_Bbl_fname)
cbbl_extra = True
try:
default_cuts = self.defaults
except AttributeError:
raise KeyError('You must provide a list of default cuts')
# Translation betwen TEB and sacc C_ell types
pol_dict = {'T': '0',
'E': 'e',
'B': 'b'}
ppol_dict = {'TT': 'tt',
'EE': 'ee',
'TE': 'te',
'ET': 'te',
'BB': 'bb',
'EB': 'eb',
'BE': 'eb',
'TB': 'tb',
'BT': 'tb',
'BB': 'bb'}
def xp_nu(xp, nu):
return xp + '_' + str(nu)
def get_cl_meta(spec):
# For each of the entries of the `spectra` section of the
# yaml file, extract the relevant information: experiments,
# frequencies, polarization combinations, scale cuts and
# whether TE should be symmetrized.
# Experiments/frequencies
exp_1, exp_2 = spec['experiments']
freq_1, freq_2 = spec['frequencies']
# Read off polarization channel combinations
pols = spectrum.get('polarizations',
default_cuts['polarizations']).copy()
# Read off scale cuts
scls = spectrum.get('scales',
default_cuts['scales']).copy()
# For the same two channels, do not include ET and TE, only TE
if (exp_1 == exp_2) and (freq_1 == freq_2):
if 'ET' in pols:
pols.remove('ET')
if 'TE' not in pols:
pols.append('TE')
scls['TE'] = scls['ET']
symm = False
else:
# Symmetrization
if ('TE' in pols) and ('ET' in pols):
symm = spectrum.get('symmetrize',
default_cuts['symmetrize'])
else:
symm = False
return exp_1, exp_2, freq_1, freq_2, pols, scls, symm
def get_sacc_names(pol, exp_1, exp_2, freq_1, freq_2):
# Translate the polarization combination, experiment
# and frequency names of a given entry in the `spectra`
# part of the input yaml file into the names expected
# in the SACC files.
p1, p2 = pol
tname_1 = xp_nu(exp_1, freq_1)
tname_2 = xp_nu(exp_2, freq_2)
if p1 in ['E', 'B']:
tname_1 += '_s2'
else:
tname_1 += '_s0'
if p2 in ['E', 'B']:
tname_2 += '_s2'
else:
tname_2 += '_s0'
if p2 == 'T':
dtype = 'cl_' + pol_dict[p2] + pol_dict[p1]
else:
dtype = 'cl_' + pol_dict[p1] + pol_dict[p2]
return tname_1, tname_2, dtype
# First we trim the SACC file so it only contains
# the parts of the data we care about.
# Indices to be kept
indices = []
indices_b = []
# Length of the final data vector
len_compressed = 0
for spectrum in data['spectra']:
(exp_1, exp_2, freq_1, freq_2,
pols, scls, symm) = get_cl_meta(spectrum)
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2,
freq_1, freq_2)
lmin, lmax = scls[pol]
ind = s.indices(dtype, # Power spectrum type
(tname_1, tname_2), # Channel combinations
ell__gt=lmin, ell__lt=lmax) # Scale cuts
indices += list(ind)
# Note that data in the cov_Bbl file may be in different order.
if cbbl_extra:
ind_b = s_b.indices(dtype,
(tname_1, tname_2),
ell__gt=lmin, ell__lt=lmax)
indices_b += list(ind_b)
if symm and pol == 'ET':
pass
else:
len_compressed += ind.size
if verbose:
print(tname_1, tname_2, dtype, ind.shape, lmin, lmax)
# Get rid of all the unselected power spectra.
# Sacc takes care of performing the same cuts in the
# covariance matrix, window functions etc.
s.keep_indices(np.array(indices))
if cbbl_extra:
s_b.keep_indices(np.array(indices_b))
# Now create metadata for each spectrum
self.spec_meta = []
len_full = s.mean.size
# These are the matrices we'll use to compress the data if
# `symmetrize` is true.
# Note that a lot of the complication in this function is caused by the
# symmetrization option, for which SACC doesn't have native support.
mat_compress = np.zeros([len_compressed, len_full])
mat_compress_b = np.zeros([len_compressed, len_full])
bands = {}
self.lcuts = {k: c[1] for k, c in default_cuts['scales'].items()}
index_sofar = 0
self.l_bpws = None
for spectrum in data['spectra']:
(exp_1, exp_2, freq_1, freq_2,
pols, scls, symm) = get_cl_meta(spectrum)
bands[xp_nu(exp_1, freq_1)] = freq_1
bands[xp_nu(exp_2, freq_2)] = freq_2
for k in scls.keys():
self.lcuts[k] = max(self.lcuts[k], scls[k][1])
for pol in pols:
tname_1, tname_2, dtype = get_sacc_names(pol, exp_1, exp_2,
freq_1, freq_2)
# The only reason why we need indices is the symmetrization.
# Otherwise all of this could have been done in the previous
# loop over data['spectra'].
ls, cls, ind = s.get_ell_cl(dtype, tname_1, tname_2, return_ind=True)
if cbbl_extra:
ind_b = s_b.indices(dtype,
(tname_1, tname_2))
ws = s_b.get_bandpower_windows(ind_b)
else:
ws = s.get_bandpower_windows(ind)
if self.l_bpws is None:
# The assumption here is that bandpower windows
# will all be sampled at the same ells.
self.l_bpws = ws.values
# Symmetrize if needed.
if (pol in ['TE', 'ET']) and symm:
pol2 = pol[::-1]
pols.remove(pol2)
tname_1, tname_2, dtype = get_sacc_names(pol2,
exp_1, exp_2,
freq_1, freq_2)
ind2 = s.indices(dtype,
(tname_1, tname_2))
cls2 = s.get_ell_cl(dtype, tname_1, tname_2)[1]
cls = 0.5 * (cls + cls2)
for i, (j1, j2) in enumerate(zip(ind, ind2)):
mat_compress[index_sofar + i, j1] = 0.5
mat_compress[index_sofar + i, j2] = 0.5
if cbbl_extra:
ind2_b = s_b.indices(dtype,
(tname_1, tname_2))
for i, (j1, j2) in enumerate(zip(ind_b, ind2_b)):
mat_compress_b[index_sofar + i, j1] = 0.5
mat_compress_b[index_sofar + i, j2] = 0.5
else:
for i, j1 in enumerate(ind):
mat_compress[index_sofar + i, j1] = 1
if cbbl_extra:
for i, j1 in enumerate(ind_b):
mat_compress_b[index_sofar + i, j1] = 1
# The fields marked with # below aren't really used, but
# we store them just in case.
self.spec_meta.append({'ids': (index_sofar +
np.arange(cls.size,
dtype=int)),
'pol': ppol_dict[pol],
't1': xp_nu(exp_1, freq_1), #
't2': xp_nu(exp_2, freq_2), #
'nu1': freq_1,
'nu2': freq_2,
'leff': ls, #
'cl_data': cls, #
'bpw': ws})
index_sofar += cls.size
if not cbbl_extra:
mat_compress_b = mat_compress
# Put data and covariance in the right order.
self.data_vec = np.dot(mat_compress, s.mean)
self.cov = np.dot(mat_compress_b,
s_b.covariance.covmat.dot(mat_compress_b.T))
self.inv_cov = np.linalg.inv(self.cov)
self.logp_const = np.log(2 * np.pi) * (-len(self.data_vec) / 2)
self.logp_const -= 0.5 * np.linalg.slogdet(self.cov)[1]
# TODO: we should actually be using bandpass integration
self.bands = sorted(bands)
self.freqs = np.array([bands[b] for b in self.bands])
# Put lcuts in a format that is recognisable by CAMB.
self.lcuts = {k.lower(): c for k, c in self.lcuts.items()}
if 'et' in self.lcuts:
del self.lcuts['et']
ell_vec = np.zeros_like(self.data_vec)
for m in self.spec_meta:
i = m["ids"]
ell_vec[i] = m["leff"]
self.ell_vec = ell_vec
self.data = GaussianData("mflike", self.ell_vec, self.data_vec, self.cov)
def loglike(self, cl, **params_values):
ps_vec = self._get_power_spectra(cl, **params_values)
delta = self.data_vec - ps_vec
logp = -0.5 * (delta @ self.inv_cov @ delta)
logp += self.logp_const
self.log.debug(
"Log-likelihood value computed "
"= {} (Χ² = {})".format(logp, -2 * (logp - self.logp_const)))
return logp
def _get_power_spectra(self, cl, **params_values):
# Get Cl's from the theory code
Dls = {s: cl[s][self.l_bpws] for s, _ in self.lcuts.items()}
# Get new foreground model given its nuisance parameters
fg_model = self._get_foreground_model(
{k: params_values[k] for k in self.expected_params})
ps_vec = np.zeros_like(self.data_vec)
for m in self.spec_meta:
p = m['pol']
i = m['ids']
w = m['bpw'].weight.T
clt = np.dot(w, Dls[p] + fg_model[p, 'all', m['nu1'], m['nu2']])
ps_vec[i] = clt
return ps_vec
def _get_foreground_model(self, fg_params):
return get_foreground_model(fg_params=fg_params,
fg_model=self.foregrounds,
frequencies=self.freqs,
ell=self.l_bpws,
requested_cls=self.requested_cls)
# Standalone function to return the foregroung model
# given the nuisance parameters
def get_foreground_model(fg_params, fg_model,
frequencies, ell,
requested_cls=["tt", "te", "ee"]):
normalisation = fg_model["normalisation"]
nu_0 = normalisation["nu_0"]
ell_0 = normalisation["ell_0"]
from fgspectra import cross as fgc
from fgspectra import frequency as fgf
from fgspectra import power as fgp
# We don't seem to be using this
# cirrus = fgc.FactorizedCrossSpectrum(fgf.PowerLaw(), fgp.PowerLaw())
ksz = fgc.FactorizedCrossSpectrum(fgf.ConstantSED(), fgp.kSZ_bat())
cibp = fgc.FactorizedCrossSpectrum(fgf.ModifiedBlackBody(), fgp.PowerLaw())
radio = fgc.FactorizedCrossSpectrum(fgf.PowerLaw(), fgp.PowerLaw())
tsz = fgc.FactorizedCrossSpectrum(fgf.ThermalSZ(), fgp.tSZ_150_bat())
cibc = fgc.FactorizedCrossSpectrum(fgf.CIB(), fgp.PowerLaw())
# Make sure to pass a numpy array to fgspectra
if not isinstance(frequencies, np.ndarray):
frequencies = np.array(frequencies)
model = {}
model["tt", "kSZ"] = fg_params["a_kSZ"] * ksz(
{"nu": frequencies},
{"ell": ell, "ell_0": ell_0})
model["tt", "cibp"] = fg_params["a_p"] * cibp(
{"nu": frequencies, "nu_0": nu_0,
"temp": fg_params["T_d"], "beta": fg_params["beta_p"]},
{"ell": ell, "ell_0": ell_0, "alpha": 2})
model["tt", "radio"] = fg_params["a_s"] * radio(
{"nu": frequencies, "nu_0": nu_0, "beta": -0.5 - 2},
{"ell": ell, "ell_0": ell_0, "alpha": 2})
model["tt", "tSZ"] = fg_params["a_tSZ"] * tsz(
{"nu": frequencies, "nu_0": nu_0},
{"ell": ell, "ell_0": ell_0})
model["tt", "cibc"] = fg_params["a_c"] * cibc(
{"nu": frequencies, "nu_0": nu_0,
"temp": fg_params["T_d"], "beta": fg_params["beta_c"]},
{"ell": ell, "ell_0": ell_0, "alpha": 2 - fg_params["n_CIBC"]})
components = fg_model["components"]
component_list = {s: components[s] for s in requested_cls}
fg_dict = {}
for c1, f1 in enumerate(frequencies):
for c2, f2 in enumerate(frequencies):
for s in requested_cls:
fg_dict[s, "all", f1, f2] = np.zeros(len(ell))
for comp in component_list[s]:
fg_dict[s, comp, f1, f2] = model[s, comp][c1, c2]
fg_dict[s, "all", f1, f2] += fg_dict[s, comp, f1, f2]
return fg_dict
| [
"numpy.log",
"numpy.array",
"fgspectra.frequency.ModifiedBlackBody",
"fgspectra.power.tSZ_150_bat",
"numpy.arange",
"os.path.exists",
"fgspectra.frequency.ConstantSED",
"cobaya.tools.are_different_params_lists",
"numpy.dot",
"fgspectra.frequency.PowerLaw",
"fgspectra.frequency.ThermalSZ",
"sac... | [((1619, 1665), 'os.path.join', 'os.path.join', (['data_file_path', 'self.data_folder'], {}), '(data_file_path, self.data_folder)\n', (1631, 1665), False, 'import os\n'), ((2840, 2947), 'cobaya.tools.are_different_params_lists', 'are_different_params_lists', (['self.input_params', 'self.expected_params'], {'name_A': '"""given"""', 'name_B': '"""expected"""'}), "(self.input_params, self.expected_params, name_A=\n 'given', name_B='expected')\n", (2866, 2947), False, 'from cobaya.tools import are_different_params_lists\n'), ((3573, 3620), 'os.path.join', 'os.path.join', (['self.data_folder', 'self.input_file'], {}), '(self.data_folder, self.input_file)\n', (3585, 3620), False, 'import os\n'), ((3633, 3665), 'sacc.Sacc.load_fits', 'sacc.Sacc.load_fits', (['input_fname'], {}), '(input_fname)\n', (3652, 3665), False, 'import sacc\n'), ((9174, 9210), 'numpy.zeros', 'np.zeros', (['[len_compressed, len_full]'], {}), '([len_compressed, len_full])\n', (9182, 9210), True, 'import numpy as np\n'), ((9236, 9272), 'numpy.zeros', 'np.zeros', (['[len_compressed, len_full]'], {}), '([len_compressed, len_full])\n', (9244, 9272), True, 'import numpy as np\n'), ((13129, 13157), 'numpy.dot', 'np.dot', (['mat_compress', 's.mean'], {}), '(mat_compress, s.mean)\n', (13135, 13157), True, 'import numpy as np\n'), ((13294, 13317), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov'], {}), '(self.cov)\n', (13307, 13317), True, 'import numpy as np\n'), ((13576, 13616), 'numpy.array', 'np.array', (['[bands[b] for b in self.bands]'], {}), '([bands[b] for b in self.bands])\n', (13584, 13616), True, 'import numpy as np\n'), ((13830, 13858), 'numpy.zeros_like', 'np.zeros_like', (['self.data_vec'], {}), '(self.data_vec)\n', (13843, 13858), True, 'import numpy as np\n'), ((14821, 14849), 'numpy.zeros_like', 'np.zeros_like', (['self.data_vec'], {}), '(self.data_vec)\n', (14834, 14849), True, 'import numpy as np\n'), ((16078, 16095), 'fgspectra.frequency.ConstantSED', 'fgf.ConstantSED', ([], {}), '()\n', (16093, 16095), True, 'from fgspectra import frequency as fgf\n'), ((16097, 16110), 'fgspectra.power.kSZ_bat', 'fgp.kSZ_bat', ([], {}), '()\n', (16108, 16110), True, 'from fgspectra import power as fgp\n'), ((16151, 16174), 'fgspectra.frequency.ModifiedBlackBody', 'fgf.ModifiedBlackBody', ([], {}), '()\n', (16172, 16174), True, 'from fgspectra import frequency as fgf\n'), ((16176, 16190), 'fgspectra.power.PowerLaw', 'fgp.PowerLaw', ([], {}), '()\n', (16188, 16190), True, 'from fgspectra import power as fgp\n'), ((16232, 16246), 'fgspectra.frequency.PowerLaw', 'fgf.PowerLaw', ([], {}), '()\n', (16244, 16246), True, 'from fgspectra import frequency as fgf\n'), ((16248, 16262), 'fgspectra.power.PowerLaw', 'fgp.PowerLaw', ([], {}), '()\n', (16260, 16262), True, 'from fgspectra import power as fgp\n'), ((16302, 16317), 'fgspectra.frequency.ThermalSZ', 'fgf.ThermalSZ', ([], {}), '()\n', (16315, 16317), True, 'from fgspectra import frequency as fgf\n'), ((16319, 16336), 'fgspectra.power.tSZ_150_bat', 'fgp.tSZ_150_bat', ([], {}), '()\n', (16334, 16336), True, 'from fgspectra import power as fgp\n'), ((16377, 16386), 'fgspectra.frequency.CIB', 'fgf.CIB', ([], {}), '()\n', (16384, 16386), True, 'from fgspectra import frequency as fgf\n'), ((16388, 16402), 'fgspectra.power.PowerLaw', 'fgp.PowerLaw', ([], {}), '()\n', (16400, 16402), True, 'from fgspectra import power as fgp\n'), ((16526, 16547), 'numpy.array', 'np.array', (['frequencies'], {}), '(frequencies)\n', (16534, 16547), True, 'import numpy as np\n'), ((1165, 1308), 'cobaya.log.LoggedError', 'LoggedError', (['self.log', '"""No path given to MFLike data. Set the likelihood property \'path\' or the common property \'%s\'."""', '_packages_path'], {}), '(self.log,\n "No path given to MFLike data. Set the likelihood property \'path\' or the common property \'%s\'."\n , _packages_path)\n', (1176, 1308), False, 'from cobaya.log import LoggedError\n'), ((1681, 1713), 'os.path.exists', 'os.path.exists', (['self.data_folder'], {}), '(self.data_folder)\n', (1695, 1713), False, 'import os\n'), ((3010, 3086), 'cobaya.log.LoggedError', 'LoggedError', (['self.log', '"""Configuration error in parameters: %r."""', 'differences'], {}), "(self.log, 'Configuration error in parameters: %r.', differences)\n", (3021, 3086), False, 'from cobaya.log import LoggedError\n'), ((8695, 8712), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (8703, 8712), True, 'import numpy as np\n'), ((13344, 13361), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (13350, 13361), True, 'import numpy as np\n'), ((14985, 15043), 'numpy.dot', 'np.dot', (['w', "(Dls[p] + fg_model[p, 'all', m['nu1'], m['nu2']])"], {}), "(w, Dls[p] + fg_model[p, 'all', m['nu1'], m['nu2']])\n", (14991, 15043), True, 'import numpy as np\n'), ((1540, 1580), 'os.path.join', 'os.path.join', (['self.packages_path', '"""data"""'], {}), "(self.packages_path, 'data')\n", (1552, 1580), False, 'import os\n'), ((1857, 1978), 'cobaya.log.LoggedError', 'LoggedError', (['self.log', '"""The \'data_folder\' directory does not exist. Check the given path [%s]."""', 'self.data_folder'], {}), '(self.log,\n "The \'data_folder\' directory does not exist. Check the given path [%s].",\n self.data_folder)\n', (1868, 1978), False, 'from cobaya.log import LoggedError\n'), ((3896, 3945), 'os.path.join', 'os.path.join', (['self.data_folder', 'self.cov_Bbl_file'], {}), '(self.data_folder, self.cov_Bbl_file)\n', (3908, 3945), False, 'import os\n'), ((4013, 4047), 'sacc.Sacc.load_fits', 'sacc.Sacc.load_fits', (['cov_Bbl_fname'], {}), '(cov_Bbl_fname)\n', (4032, 4047), False, 'import sacc\n'), ((8766, 8785), 'numpy.array', 'np.array', (['indices_b'], {}), '(indices_b)\n', (8774, 8785), True, 'import numpy as np\n'), ((13423, 13450), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['self.cov'], {}), '(self.cov)\n', (13440, 13450), True, 'import numpy as np\n'), ((12379, 12409), 'numpy.arange', 'np.arange', (['cls.size'], {'dtype': 'int'}), '(cls.size, dtype=int)\n', (12388, 12409), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout, HEAD_SIZE_DEFAULT)
from mne.channels.layout import (_box_size, _find_topomap_coords,
generate_2d_layout)
from mne.utils import run_tests_if_main
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
from mne.utils import _TempDir
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_Frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout():
"""Test IO with .lout files."""
tempdir = _TempDir()
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay():
"""Test IO with .lay files."""
tempdir = _TempDir()
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_find_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Use channel locations
kwargs = dict(ignore_overlap=False, to_sphere=True,
sphere=HEAD_SIZE_DEFAULT)
l0 = _find_topomap_coords(info, picks, **kwargs)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1, l0, atol=1e-3)
for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.),
(0., HEAD_SIZE_DEFAULT, 0.)):
info['dig'][-1]['r'] = z_pt
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
with pytest.raises(ValueError, match='Cannot determine location'):
_find_topomap_coords(info, mag_picks, **kwargs)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
with pytest.raises(ValueError, match='overlapping positions'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
with pytest.raises(RuntimeError, match='Did not find any digitization'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without any digitization points, it should fail
info['dig'] = None
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
info['dig'] = []
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
def test_make_eeg_layout():
"""Test creation of EEG layout."""
tempdir = _TempDir()
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout():
"""Test creation of grid layout."""
tempdir = _TempDir()
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 13', 'MEG 14', 'MEG 15', 'MEG 16']
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
# fallback for missing IDs
raw_kit.info['kit_system_id'] = 35
lout = find_layout(raw_kit.info)
assert lout.kind == 'custom'
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
# Correct number elements
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
# Make sure background image normalizing is correct
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
run_tests_if_main()
| [
"mne.channels.find_layout",
"numpy.testing.assert_equal",
"mne.pick_info",
"numpy.array",
"mne.utils.run_tests_if_main",
"copy.deepcopy",
"mne.channels.make_eeg_layout",
"mne.channels.read_layout",
"numpy.random.RandomState",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"mne.io.r... | [((893, 941), 'os.path.join', 'op.join', (['io_dir', '"""tests"""', '"""data"""', '"""test_raw.fif"""'], {}), "(io_dir, 'tests', 'data', 'test_raw.fif')\n", (900, 941), True, 'import os.path as op\n'), ((954, 986), 'os.path.join', 'op.join', (['io_dir', '"""tests"""', '"""data"""'], {}), "(io_dir, 'tests', 'data')\n", (961, 986), True, 'import os.path as op\n'), ((997, 1036), 'os.path.join', 'op.join', (['io_dir', '"""bti"""', '"""tests"""', '"""data"""'], {}), "(io_dir, 'bti', 'tests', 'data')\n", (1004, 1036), True, 'import os.path as op\n'), ((1053, 1110), 'os.path.join', 'op.join', (['io_dir', '"""tests"""', '"""data"""', '"""test_ctf_comp_raw.fif"""'], {}), "(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')\n", (1060, 1110), True, 'import os.path as op\n'), ((1127, 1178), 'os.path.join', 'op.join', (['io_dir', '"""kit"""', '"""tests"""', '"""data"""', '"""test.sqd"""'], {}), "(io_dir, 'kit', 'tests', 'data', 'test.sqd')\n", (1134, 1178), True, 'import os.path as op\n'), ((1195, 1254), 'os.path.join', 'op.join', (['io_dir', '"""kit"""', '"""tests"""', '"""data"""', '"""test_umd-raw.sqd"""'], {}), "(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')\n", (1202, 1254), True, 'import os.path as op\n'), ((14271, 14290), 'mne.utils.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (14288, 14290), False, 'from mne.utils import run_tests_if_main\n'), ((841, 861), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (851, 861), True, 'import os.path as op\n'), ((1321, 1338), 'mne.io._empty_info', '_empty_info', (['(1000)'], {}), '(1000)\n', (1332, 1338), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((1349, 1441), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],\n dtype=np.float32)\n', (1357, 1441), True, 'import numpy as np\n'), ((2238, 2248), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (2246, 2248), False, 'from mne.utils import _TempDir\n'), ((2262, 2304), 'mne.channels.read_layout', 'read_layout', (['"""Vectorview-all"""'], {'scale': '(False)'}), "('Vectorview-all', scale=False)\n", (2273, 2304), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((2475, 2540), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['layout.pos', 'layout_read.pos'], {'decimal': '(2)'}), '(layout.pos, layout_read.pos, decimal=2)\n', (2500, 2540), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((2694, 2704), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (2702, 2704), False, 'from mne.utils import _TempDir\n'), ((2718, 2752), 'mne.channels.read_layout', 'read_layout', (['"""CTF151"""'], {'scale': '(False)'}), "('CTF151', scale=False)\n", (2729, 2752), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((2921, 2986), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['layout.pos', 'layout_read.pos'], {'decimal': '(2)'}), '(layout.pos, layout_read.pos, decimal=2)\n', (2946, 2986), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((3134, 3154), 'mne.io.read_info', 'read_info', (['fif_fname'], {}), '(fif_fname)\n', (3143, 3154), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((3167, 3227), 'mne.pick_types', 'pick_types', (['info'], {'meg': '(False)', 'eeg': '(True)', 'eog': '(False)', 'stim': '(False)'}), '(info, meg=False, eeg=True, eog=False, stim=False)\n', (3177, 3227), False, 'from mne import pick_types, pick_info\n'), ((3494, 3537), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (3514, 3537), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((3695, 3738), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (3715, 3738), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((3743, 3778), 'numpy.testing.assert_allclose', 'assert_allclose', (['l1', 'l0'], {'atol': '(0.001)'}), '(l1, l0, atol=0.001)\n', (3758, 3778), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((4136, 4163), 'mne.pick_types', 'pick_types', (['info'], {'meg': '"""mag"""'}), "(info, meg='mag')\n", (4146, 4163), False, 'from mne import pick_types, pick_info\n'), ((5742, 5752), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (5750, 5752), False, 'from mne.utils import _TempDir\n'), ((5817, 5860), 'mne.channels.read_layout', 'read_layout', ([], {'kind': 'lout_name', 'path': 'lout_path'}), '(kind=lout_name, path=lout_path)\n', (5828, 5860), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((5872, 5892), 'mne.io.read_info', 'read_info', (['fif_fname'], {}), '(fif_fname)\n', (5881, 5892), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((5953, 5986), 'mne.channels.make_eeg_layout', 'make_eeg_layout', (['info'], {'exclude': '[]'}), '(info, exclude=[])\n', (5968, 5986), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((6206, 6259), 'mne.channels.read_layout', 'read_layout', ([], {'kind': 'tmp_name', 'path': 'tempdir', 'scale': '(False)'}), '(kind=tmp_name, path=tempdir, scale=False)\n', (6217, 6259), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((6264, 6307), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lout_new.kind', 'tmp_name'], {}), '(lout_new.kind, tmp_name)\n', (6282, 6307), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((6312, 6363), 'numpy.testing.assert_allclose', 'assert_allclose', (['layout.pos', 'lout_new.pos'], {'atol': '(0.1)'}), '(layout.pos, lout_new.pos, atol=0.1)\n', (6327, 6363), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((6368, 6419), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lout_orig.names', 'lout_new.names'], {}), '(lout_orig.names, lout_new.names)\n', (6386, 6419), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((6453, 6514), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'radius': '(-0.1)'}), '(ValueError, make_eeg_layout, info, radius=-0.1)\n', (6466, 6514), False, 'import pytest\n'), ((6519, 6579), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'radius': '(0.6)'}), '(ValueError, make_eeg_layout, info, radius=0.6)\n', (6532, 6579), False, 'import pytest\n'), ((6584, 6644), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'width': '(-0.1)'}), '(ValueError, make_eeg_layout, info, width=-0.1)\n', (6597, 6644), False, 'import pytest\n'), ((6649, 6708), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'width': '(1.1)'}), '(ValueError, make_eeg_layout, info, width=1.1)\n', (6662, 6708), False, 'import pytest\n'), ((6713, 6774), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'height': '(-0.1)'}), '(ValueError, make_eeg_layout, info, height=-0.1)\n', (6726, 6774), False, 'import pytest\n'), ((6779, 6839), 'pytest.raises', 'pytest.raises', (['ValueError', 'make_eeg_layout', 'info'], {'height': '(1.1)'}), '(ValueError, make_eeg_layout, info, height=1.1)\n', (6792, 6839), False, 'import pytest\n'), ((6925, 6935), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (6933, 6935), False, 'from mne.utils import _TempDir\n'), ((7000, 7043), 'mne.channels.read_layout', 'read_layout', ([], {'kind': 'lout_name', 'path': 'lout_path'}), '(kind=lout_name, path=lout_path)\n', (7011, 7043), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((7161, 7201), 'mne.channels.read_layout', 'read_layout', ([], {'kind': 'tmp_name', 'path': 'tempdir'}), '(kind=tmp_name, path=tempdir)\n', (7172, 7201), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((7206, 7249), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lout_new.kind', 'tmp_name'], {}), '(lout_new.kind, tmp_name)\n', (7224, 7249), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((7254, 7301), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lout_orig.pos', 'lout_new.pos'], {}), '(lout_orig.pos, lout_new.pos)\n', (7272, 7301), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((7306, 7357), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lout_orig.names', 'lout_new.names'], {}), '(lout_orig.names, lout_new.names)\n', (7324, 7357), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((7699, 7755), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['layout.pos[0, 3:]', 'layout.pos[1, 3:]'], {}), '(layout.pos[0, 3:], layout.pos[1, 3:])\n', (7717, 7755), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((7909, 7929), 'mne.io.read_info', 'read_info', (['fif_fname'], {}), '(fif_fname)\n', (7918, 7929), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((7942, 7977), 'mne.pick_types', 'pick_types', (['sample_info'], {'meg': '"""grad"""'}), "(sample_info, meg='grad')\n", (7952, 7977), False, 'from mne import pick_types, pick_info\n'), ((7997, 8026), 'mne.pick_info', 'pick_info', (['sample_info', 'grads'], {}), '(sample_info, grads)\n', (8006, 8026), False, 'from mne import pick_types, pick_info\n'), ((8039, 8073), 'mne.pick_types', 'pick_types', (['sample_info'], {'meg': '"""mag"""'}), "(sample_info, meg='mag')\n", (8049, 8073), False, 'from mne import pick_types, pick_info\n'), ((8093, 8121), 'mne.pick_info', 'pick_info', (['sample_info', 'mags'], {}), '(sample_info, mags)\n', (8102, 8121), False, 'from mne import pick_types, pick_info\n'), ((8168, 8194), 'copy.deepcopy', 'copy.deepcopy', (['sample_info'], {}), '(sample_info)\n', (8181, 8194), False, 'import copy\n'), ((8349, 8393), 'mne.pick_types', 'pick_types', (['sample_info'], {'meg': '(False)', 'eeg': '(True)'}), '(sample_info, meg=False, eeg=True)\n', (8359, 8393), False, 'from mne import pick_types, pick_info\n'), ((8413, 8441), 'mne.pick_info', 'pick_info', (['sample_info', 'eegs'], {}), '(sample_info, eegs)\n', (8422, 8441), False, 'from mne import pick_types, pick_info\n'), ((8454, 8492), 'mne.channels.find_layout', 'find_layout', (['sample_info'], {'ch_type': 'None'}), '(sample_info, ch_type=None)\n', (8465, 8492), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((8591, 8631), 'mne.channels.find_layout', 'find_layout', (['sample_info2'], {'ch_type': '"""meg"""'}), "(sample_info2, ch_type='meg')\n", (8602, 8631), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((8636, 8677), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-all"""'], {}), "(lout.kind, 'Vectorview-all')\n", (8648, 8677), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((8717, 8756), 'mne.channels.find_layout', 'find_layout', (['sample_info4'], {'ch_type': 'None'}), '(sample_info4, ch_type=None)\n', (8728, 8756), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((8761, 8802), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-all"""'], {}), "(lout.kind, 'Vectorview-all')\n", (8773, 8802), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((8864, 8904), 'mne.channels.find_layout', 'find_layout', (['sample_info'], {'ch_type': '"""grad"""'}), "(sample_info, ch_type='grad')\n", (8875, 8904), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((8909, 8951), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-grad"""'], {}), "(lout.kind, 'Vectorview-grad')\n", (8921, 8951), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((8963, 8988), 'mne.channels.find_layout', 'find_layout', (['sample_info2'], {}), '(sample_info2)\n', (8974, 8988), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((8993, 9035), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-grad"""'], {}), "(lout.kind, 'Vectorview-grad')\n", (9005, 9035), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9047, 9088), 'mne.channels.find_layout', 'find_layout', (['sample_info2'], {'ch_type': '"""grad"""'}), "(sample_info2, ch_type='grad')\n", (9058, 9088), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9093, 9135), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-grad"""'], {}), "(lout.kind, 'Vectorview-grad')\n", (9105, 9135), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9147, 9187), 'mne.channels.find_layout', 'find_layout', (['sample_info2'], {'ch_type': '"""meg"""'}), "(sample_info2, ch_type='meg')\n", (9158, 9187), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9192, 9233), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-all"""'], {}), "(lout.kind, 'Vectorview-all')\n", (9204, 9233), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9246, 9285), 'mne.channels.find_layout', 'find_layout', (['sample_info'], {'ch_type': '"""mag"""'}), "(sample_info, ch_type='mag')\n", (9257, 9285), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9290, 9331), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-mag"""'], {}), "(lout.kind, 'Vectorview-mag')\n", (9302, 9331), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9343, 9368), 'mne.channels.find_layout', 'find_layout', (['sample_info3'], {}), '(sample_info3)\n', (9354, 9368), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9373, 9414), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-mag"""'], {}), "(lout.kind, 'Vectorview-mag')\n", (9385, 9414), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9426, 9466), 'mne.channels.find_layout', 'find_layout', (['sample_info3'], {'ch_type': '"""mag"""'}), "(sample_info3, ch_type='mag')\n", (9437, 9466), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9471, 9512), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-mag"""'], {}), "(lout.kind, 'Vectorview-mag')\n", (9483, 9512), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9524, 9564), 'mne.channels.find_layout', 'find_layout', (['sample_info3'], {'ch_type': '"""meg"""'}), "(sample_info3, ch_type='meg')\n", (9535, 9564), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9569, 9610), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""Vectorview-all"""'], {}), "(lout.kind, 'Vectorview-all')\n", (9581, 9610), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9623, 9662), 'mne.channels.find_layout', 'find_layout', (['sample_info'], {'ch_type': '"""eeg"""'}), "(sample_info, ch_type='eeg')\n", (9634, 9662), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9667, 9697), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""EEG"""'], {}), "(lout.kind, 'EEG')\n", (9679, 9697), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9709, 9734), 'mne.channels.find_layout', 'find_layout', (['sample_info5'], {}), '(sample_info5)\n', (9720, 9734), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9739, 9769), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""EEG"""'], {}), "(lout.kind, 'EEG')\n", (9751, 9769), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9781, 9821), 'mne.channels.find_layout', 'find_layout', (['sample_info5'], {'ch_type': '"""eeg"""'}), "(sample_info5, ch_type='eeg')\n", (9792, 9821), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((9826, 9856), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""EEG"""'], {}), "(lout.kind, 'EEG')\n", (9838, 9856), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((9962, 9996), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""CTF-275"""'], {}), "(lout.kind, 'CTF-275')\n", (9974, 9996), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((10018, 10062), 'os.path.join', 'op.join', (['bti_dir', '"""exported4D_linux_raw.fif"""'], {}), "(bti_dir, 'exported4D_linux_raw.fif')\n", (10025, 10062), True, 'import os.path as op\n'), ((10116, 10155), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""magnesWH3600"""'], {}), "(lout.kind, 'magnesWH3600')\n", (10128, 10155), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((10171, 10198), 'mne.io.read_raw_kit', 'read_raw_kit', (['fname_kit_157'], {}), '(fname_kit_157)\n', (10183, 10198), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((10210, 10235), 'mne.channels.find_layout', 'find_layout', (['raw_kit.info'], {}), '(raw_kit.info)\n', (10221, 10235), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((10240, 10274), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""KIT-157"""'], {}), "(lout.kind, 'KIT-157')\n", (10252, 10274), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((10359, 10384), 'mne.channels.find_layout', 'find_layout', (['raw_kit.info'], {}), '(raw_kit.info)\n', (10370, 10384), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((10389, 10423), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""KIT-157"""'], {}), "(lout.kind, 'KIT-157')\n", (10401, 10423), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((10505, 10530), 'mne.channels.find_layout', 'find_layout', (['raw_kit.info'], {}), '(raw_kit.info)\n', (10516, 10530), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((10579, 10606), 'mne.io.read_raw_kit', 'read_raw_kit', (['fname_kit_umd'], {}), '(fname_kit_umd)\n', (10591, 10606), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((10618, 10643), 'mne.channels.find_layout', 'find_layout', (['raw_umd.info'], {}), '(raw_umd.info)\n', (10629, 10643), False, 'from mne.channels import make_eeg_layout, make_grid_layout, read_layout, find_layout, HEAD_SIZE_DEFAULT\n'), ((10648, 10684), 'numpy.testing.assert_equal', 'assert_equal', (['lout.kind', '"""KIT-UMD-3"""'], {}), "(lout.kind, 'KIT-UMD-3')\n", (10660, 10684), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((10761, 10777), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10770, 10777), True, 'import matplotlib.pyplot as plt\n'), ((11674, 11699), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (11695, 11699), True, 'import numpy as np\n'), ((11750, 11767), 'mne.channels.layout._box_size', '_box_size', (['points'], {}), '(points)\n', (11759, 11767), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((13533, 13579), 'numpy.array', 'np.array', (['[(i, j) for i in side for j in side]'], {}), '([(i, j) for i in side for j in side])\n', (13541, 13579), True, 'import numpy as np\n'), ((13589, 13621), 'mne.channels.layout.generate_2d_layout', 'generate_2d_layout', (['xy'], {'w': 'w', 'h': 'h'}), '(xy, w=w, h=h)\n', (13607, 13621), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((13957, 13999), 'numpy.testing.assert_allclose', 'assert_allclose', (['lt.pos[0, [2, 3]]', '[w, h]'], {}), '(lt.pos[0, [2, 3]], [w, h])\n', (13972, 13999), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((14160, 14201), 'mne.channels.layout.generate_2d_layout', 'generate_2d_layout', (['xy'], {'bg_image': 'bg_image'}), '(xy, bg_image=bg_image)\n', (14178, 14201), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((2321, 2352), 'os.path.join', 'op.join', (['tempdir', '"""foobar.lout"""'], {}), "(tempdir, 'foobar.lout')\n", (2328, 2352), True, 'import os.path as op\n'), ((2384, 2415), 'os.path.join', 'op.join', (['tempdir', '"""foobar.lout"""'], {}), "(tempdir, 'foobar.lout')\n", (2391, 2415), True, 'import os.path as op\n'), ((2769, 2799), 'os.path.join', 'op.join', (['tempdir', '"""foobar.lay"""'], {}), "(tempdir, 'foobar.lay')\n", (2776, 2799), True, 'import os.path as op\n'), ((2831, 2861), 'os.path.join', 'op.join', (['tempdir', '"""foobar.lay"""'], {}), "(tempdir, 'foobar.lay')\n", (2838, 2861), True, 'import os.path as op\n'), ((3921, 3964), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (3941, 3964), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((3973, 4045), 'numpy.testing.assert_allclose', 'assert_allclose', (['l1[-1]', 'z_pt[:2]'], {'err_msg': '"""Z=0 point moved"""', 'atol': '(1e-06)'}), "(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-06)\n", (3988, 4045), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal\n'), ((4173, 4233), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Cannot determine location"""'}), "(ValueError, match='Cannot determine location')\n", (4186, 4233), False, 'import pytest\n'), ((4243, 4290), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'mag_picks'], {}), '(info, mag_picks, **kwargs)\n', (4263, 4290), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((4446, 4514), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Number of EEG digitization points"""'}), "(ValueError, match='Number of EEG digitization points')\n", (4459, 4514), False, 'import pytest\n'), ((4524, 4567), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (4544, 4567), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((4689, 4757), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Number of EEG digitization points"""'}), "(ValueError, match='Number of EEG digitization points')\n", (4702, 4757), False, 'import pytest\n'), ((4767, 4810), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (4787, 4810), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((4902, 4958), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""overlapping positions"""'}), "(ValueError, match='overlapping positions')\n", (4915, 4958), False, 'import pytest\n'), ((4968, 5011), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (4988, 5011), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((5170, 5236), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Did not find any digitization"""'}), "(RuntimeError, match='Did not find any digitization')\n", (5183, 5236), False, 'import pytest\n'), ((5246, 5289), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (5266, 5289), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((5391, 5456), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""No digitization points found"""'}), "(RuntimeError, match='No digitization points found')\n", (5404, 5456), False, 'import pytest\n'), ((5466, 5509), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (5486, 5509), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((5540, 5605), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""No digitization points found"""'}), "(RuntimeError, match='No digitization points found')\n", (5553, 5605), False, 'import pytest\n'), ((5615, 5658), 'mne.channels.layout._find_topomap_coords', '_find_topomap_coords', (['info', 'picks'], {}), '(info, picks, **kwargs)\n', (5635, 5658), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((6153, 6189), 'os.path.join', 'op.join', (['tempdir', "(tmp_name + '.lout')"], {}), "(tempdir, tmp_name + '.lout')\n", (6160, 6189), True, 'import os.path as op\n'), ((7108, 7144), 'os.path.join', 'op.join', (['tempdir', "(tmp_name + '.lout')"], {}), "(tempdir, tmp_name + '.lout')\n", (7115, 7144), True, 'import os.path as op\n'), ((9932, 9956), 'mne.io.read_info', 'read_info', (['fname_ctf_raw'], {}), '(fname_ctf_raw)\n', (9941, 9956), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((10086, 10110), 'mne.io.read_info', 'read_info', (['fname_bti_raw'], {}), '(fname_bti_raw)\n', (10095, 10110), False, 'from mne.io import read_raw_kit, _empty_info, read_info\n'), ((10903, 10916), 'mne.channels.layout._box_size', '_box_size', (['[]'], {}), '([])\n', (10912, 10916), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((11020, 11036), 'mne.channels.layout._box_size', '_box_size', (['point'], {}), '(point)\n', (11029, 11036), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((11162, 11179), 'mne.channels.layout._box_size', '_box_size', (['points'], {}), '(points)\n', (11171, 11179), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((11327, 11344), 'mne.channels.layout._box_size', '_box_size', (['points'], {}), '(points)\n', (11336, 11344), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((11444, 11470), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(11)'], {}), '(-0.5, 0.5, 11)\n', (11455, 11470), True, 'import numpy as np\n'), ((11472, 11498), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(11)'], {}), '(-0.5, 0.5, 11)\n', (11483, 11498), True, 'import numpy as np\n'), ((11552, 11574), 'mne.channels.layout._box_size', '_box_size', (['np.c_[x, y]'], {}), '(np.c_[x, y])\n', (11561, 11574), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((11944, 11972), 'mne.channels.layout._box_size', '_box_size', (['points'], {'width': '(0.4)'}), '(points, width=0.4)\n', (11953, 11972), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((12153, 12181), 'mne.channels.layout._box_size', '_box_size', (['points'], {'width': '(0.2)'}), '(points, width=0.2)\n', (12162, 12181), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((12313, 12342), 'mne.channels.layout._box_size', '_box_size', (['points'], {'height': '(0.4)'}), '(points, height=0.4)\n', (12322, 12342), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((12523, 12552), 'mne.channels.layout._box_size', '_box_size', (['points'], {'height': '(0.1)'}), '(points, height=0.1)\n', (12532, 12552), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((12737, 12777), 'mne.channels.layout._box_size', '_box_size', (['points'], {'width': '(0.1)', 'height': '(0.1)'}), '(points, width=0.1, height=0.1)\n', (12746, 12777), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((13007, 13033), 'mne.channels.layout._box_size', '_box_size', (['points'], {'width': '(1)'}), '(points, width=1)\n', (13016, 13033), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((13228, 13258), 'mne.channels.layout._box_size', '_box_size', (['points'], {'padding': '(0.1)'}), '(points, padding=0.1)\n', (13237, 13258), False, 'from mne.channels.layout import _box_size, _find_topomap_coords, generate_2d_layout\n'), ((13784, 13813), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (13795, 13813), True, 'import numpy as np\n'), ((10742, 10755), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10751, 10755), True, 'import numpy as np\n'), ((13436, 13461), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (13457, 13461), True, 'import numpy as np\n')] |
import unittest
from scipy import stats
import numpy as np
import copy
from functools import partial
from pyapprox.approximate import approximate, adaptive_approximate, \
cross_validate_pce_degree, compute_l2_error, \
cross_validate_approximation, LinearLeastSquaresCV
from pyapprox.benchmarks.benchmarks import setup_benchmark
import pyapprox as pya
class TestApproximate(unittest.TestCase):
def setUp(self):
np.random.seed(1)
def test_approximate_sparse_grid_default_options(self):
nvars = 3
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
univariate_variables = [stats.uniform(0, 1)]*nvars
approx = adaptive_approximate(
benchmark.fun, univariate_variables, "sparse_grid").approx
nsamples = 100
error = compute_l2_error(
approx, benchmark.fun, approx.variable_transformation.variable,
nsamples)
assert error < 1e-12
def test_approximate_sparse_grid_discrete(self):
def fun(samples):
return np.cos(samples.sum(axis=0)/20)[:, None]
nvars = 2
univariate_variables = [stats.binom(20, 0.5)]*nvars
approx = adaptive_approximate(
fun, univariate_variables, "sparse_grid").approx
nsamples = 100
error = compute_l2_error(
approx, fun, approx.variable_transformation.variable,
nsamples)
assert error < 1e-12
# check leja samples are nested. Sparse grid uses christoffel
# leja sequence that does not change preconditioner everytime
# lu pivot is performed, but we can still enforce nestedness
# by specifiying initial points. This tests make sure this is done
# correctly
for ll in range(1, len(approx.samples_1d[0])):
n = approx.samples_1d[0][ll-1].shape[0]
assert np.allclose(approx.samples_1d[0][ll][:n],
approx.samples_1d[0][ll-1])
def test_approximate_sparse_grid_user_options(self):
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
univariate_variables = benchmark["variable"].all_variables()
errors = []
def callback(approx):
nsamples = 1000
error = compute_l2_error(
approx, benchmark.fun, approx.variable_transformation.variable,
nsamples)
errors.append(error)
univariate_quad_rule_info = [
pya.clenshaw_curtis_in_polynomial_order,
pya.clenshaw_curtis_rule_growth, None, None]
# ishigami has same value at first 3 points in clenshaw curtis rule
# and so adaptivity will not work so use different rule
# growth_rule=partial(pya.constant_increment_growth_rule,4)
# univariate_quad_rule_info = [
# pya.get_univariate_leja_quadrature_rule(
# univariate_variables[0],growth_rule),growth_rule]
refinement_indicator = partial(
pya.variance_refinement_indicator, convex_param=0.5)
options = {"univariate_quad_rule_info": univariate_quad_rule_info,
"max_nsamples": 300, "tol": 0,
"callback": callback, "verbose": 0,
"refinement_indicator": refinement_indicator}
adaptive_approximate(
benchmark.fun, univariate_variables, "sparse_grid", options).approx
# print(np.min(errors))
assert np.min(errors) < 1e-3
def test_approximate_polynomial_chaos_leja(self):
nvars = 3
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
# we can use different univariate variables than specified by
# benchmark. In this case we use the same but setup them uphear
# to demonstrate this functionality
univariate_variables = [stats.uniform(0, 1)]*nvars
approx = adaptive_approximate(
benchmark.fun, univariate_variables,
method="polynomial_chaos",
options={"method": "leja",
"options": {"max_nsamples": 100}}).approx
nsamples = 100
error = compute_l2_error(
approx, benchmark.fun, approx.variable_transformation.variable,
nsamples)
assert error < 1e-12
def test_approximate_polynomial_chaos_induced(self):
nvars = 3
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
# we can use different univariate variables than specified by
# benchmark. In this case we use the same but setup them uphear
# to demonstrate this functionality
univariate_variables = [stats.uniform(0, 1)]*nvars
# approx = adaptive_approximate(
# benchmark.fun, univariate_variables,
# method="polynomial_chaos",
# options={"method": "induced",
# "options": {"max_nsamples": 200,
# "induced_sampling": True,
# "cond_tol": 1e8}}).approx
# nsamples = 100
# error = compute_l2_error(
# approx, benchmark.fun, approx.variable_transformation.variable,
# nsamples)
# print(error)
# assert error < 1e-5
# probablility sampling
approx = adaptive_approximate(
benchmark.fun, univariate_variables,
method="polynomial_chaos",
options={"method": "induced",
"options": {"max_nsamples": 100,
"induced_sampling": False,
"cond_tol": 1e4,
"max_level_1d": 4, "verbose": 3}}).approx
nsamples = 100
error = compute_l2_error(
approx, benchmark.fun, approx.variable_transformation.variable,
nsamples)
print(error)
assert error < 1e-5
def test_approximate_polynomial_chaos_custom_poly_type(self):
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
nvars = benchmark.variable.num_vars()
# this test purposefully select wrong variable to make sure
# poly_type overide is activated
univariate_variables = [stats.beta(5, 5, -np.pi, 2*np.pi)]*nvars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
# specify correct basis so it is not chosen from var_trans.variable
poly_opts = {"var_trans": var_trans}
# but rather from another variable which will invoke Legendre polys
basis_opts = pya.define_poly_options_from_variable(
pya.IndependentMultivariateRandomVariable([stats.uniform()]*nvars))
poly_opts["poly_types"] = basis_opts
options = {"poly_opts": poly_opts, "variable": variable,
"options": {"max_num_step_increases": 1}}
ntrain_samples = 400
train_samples = np.random.uniform(
-np.pi, np.pi, (nvars, ntrain_samples))
train_vals = benchmark.fun(train_samples)
approx = approximate(
train_samples, train_vals,
method="polynomial_chaos", options=options).approx
nsamples = 100
error = compute_l2_error(
approx, benchmark.fun, approx.var_trans.variable,
nsamples, rel=True)
# print(error)
assert error < 1e-4
assert np.allclose(approx.mean(), benchmark.mean, atol=error)
def help_cross_validate_pce_degree(self, solver_type, solver_options):
print(solver_type, solver_options)
num_vars = 2
univariate_variables = [stats.uniform(-1, 2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree = 3
poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
# factor of 2 does not pass test but 2.2 does
num_samples = int(poly.num_terms()*2.2)
coef = np.random.normal(0, 1, (poly.indices.shape[1], 2))
coef[pya.nchoosek(num_vars+2, 2):, 0] = 0
# for first qoi make degree 2 the best degree
poly.set_coefficients(coef)
train_samples = pya.generate_independent_random_samples(
variable, num_samples)
train_vals = poly(train_samples)
true_poly = poly
poly = approximate(
train_samples, train_vals, "polynomial_chaos",
{"basis_type": "hyperbolic_cross", "variable": variable,
"options": {"verbose": 3, "solver_type": solver_type,
"min_degree": 1, "max_degree": degree+1,
"linear_solver_options": solver_options}}).approx
num_validation_samples = 10
validation_samples = pya.generate_independent_random_samples(
variable, num_validation_samples)
assert np.allclose(
poly(validation_samples), true_poly(validation_samples))
poly = copy.deepcopy(true_poly)
approx_res = cross_validate_pce_degree(
poly, train_samples, train_vals, 1, degree+1,
solver_type=solver_type, linear_solver_options=solver_options)
assert np.allclose(approx_res.degrees, [2, 3])
def test_cross_validate_pce_degree(self):
# lasso and omp do not pass this test so recommend not using them
solver_type_list = ["lstsq", "lstsq", "lasso"] # , "omp"]#, "lars"]
solver_options_list = [
{"alphas": [1e-14], "cv":22}, {"cv": 10},
{"max_iter": 20, "cv": 21}]
for solver_type, solver_options in zip(
solver_type_list, solver_options_list):
self.help_cross_validate_pce_degree(solver_type, solver_options)
def test_pce_basis_expansion(self):
num_vars = 2
univariate_variables = [stats.uniform(-1, 2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree, hcross_strength = 7, 0.4
poly.set_indices(
pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
num_samples = poly.num_terms()*2
degrees = poly.indices.sum(axis=0)
coef = np.random.normal(
0, 1, (poly.indices.shape[1], 2))/(degrees[:, np.newaxis]+1)**2
# set some coefficients to zero to make sure that different qoi
# are treated correctly.
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 0] = 0
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 1] = 0
poly.set_coefficients(coef)
train_samples = pya.generate_independent_random_samples(
variable, num_samples)
train_vals = poly(train_samples)
true_poly = poly
poly = approximate(
train_samples, train_vals, "polynomial_chaos",
{"basis_type": "expanding_basis", "variable": variable,
"options": {"max_num_expansion_steps_iter": 1, "verbose": 3,
"max_num_terms": 1000,
"max_num_step_increases": 2,
"max_num_init_terms": 33}}).approx
num_validation_samples = 100
validation_samples = pya.generate_independent_random_samples(
variable, num_validation_samples)
validation_samples = train_samples
error = np.linalg.norm(poly(validation_samples)-true_poly(
validation_samples))/np.sqrt(num_validation_samples)
assert np.allclose(
poly(validation_samples), true_poly(validation_samples),
atol=1e-8), error
def test_approximate_gaussian_process(self):
from sklearn.gaussian_process.kernels import Matern
num_vars = 1
univariate_variables = [stats.uniform(-1, 2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
num_samples = 100
train_samples = pya.generate_independent_random_samples(
variable, num_samples)
# Generate random function
nu = np.inf # 2.5
kernel = Matern(0.5, nu=nu)
X = np.linspace(-1, 1, 1000)[np.newaxis, :]
alpha = np.random.normal(0, 1, X.shape[1])
train_vals = kernel(train_samples.T, X.T).dot(alpha)[:, np.newaxis]
gp = approximate(
train_samples, train_vals, "gaussian_process",
{"nu": nu, "noise_level": 1e-8}).approx
error = np.linalg.norm(gp(X)[:, 0]-kernel(X.T, X.T).dot(alpha)) /\
np.sqrt(X.shape[1])
assert error < 1e-5
# import matplotlib.pyplot as plt
# plt.plot(X[0,:],kernel(X.T,X.T).dot(alpha),"r--",zorder=100)
# vals,std = gp(X,return_std=True)
# plt.plot(X[0,:],vals[:,0],c="b")
# plt.fill_between(
# X[0,:],vals[:,0]-2*std,vals[:,0]+2*std,color="b",alpha=0.5)
# plt.plot(train_samples[0,:], train_vals[:,0],"ro")
# plt.show()
def test_adaptive_approximate_gaussian_process(self):
from sklearn.gaussian_process.kernels import Matern
num_vars = 1
univariate_variables = [stats.uniform(-1, 2)]*num_vars
# Generate random function
nu = np.inf # 2.5
kernel = Matern(0.1, nu=nu)
X = np.linspace(-1, 1, 1000)[np.newaxis, :]
alpha = np.random.normal(0, 1, X.shape[1])
def fun(x):
return kernel(x.T, X.T).dot(alpha)[:, np.newaxis]
# return np.cos(2*np.pi*x.sum(axis=0)/num_vars)[:, np.newaxis]
errors = []
validation_samples = np.random.uniform(-1, 1, (num_vars, 100))
validation_values = fun(validation_samples)
def callback(gp):
gp_vals = gp(validation_samples)
assert gp_vals.shape == validation_values.shape
error = np.linalg.norm(gp_vals-validation_values)/np.linalg.norm(
validation_values)
print(error, gp.y_train_.shape[0])
errors.append(error)
adaptive_approximate(
fun, univariate_variables, "gaussian_process",
{"nu": nu, "noise_level": None, "normalize_y": True,
"alpha": 1e-10,
"ncandidate_samples": 1e3, "callback": callback}).approx
assert errors[-1] < 1e-8
def test_adaptive_approximate_gaussian_process_normalize_inputs(self):
from sklearn.gaussian_process.kernels import Matern
num_vars = 1
univariate_variables = [stats.beta(5, 10, 0, 2)]*num_vars
# Generate random function
nu = np.inf # 2.5
kernel = Matern(0.1, nu=nu)
X = np.linspace(-1, 1, 1000)[np.newaxis, :]
alpha = np.random.normal(0, 1, X.shape[1])
def fun(x):
return kernel(x.T, X.T).dot(alpha)[:, np.newaxis]
# return np.cos(2*np.pi*x.sum(axis=0)/num_vars)[:, np.newaxis]
errors = []
validation_samples = pya.generate_independent_random_samples(
pya.IndependentMultivariateRandomVariable(univariate_variables),
100)
validation_values = fun(validation_samples)
def callback(gp):
gp_vals = gp(validation_samples)
assert gp_vals.shape == validation_values.shape
error = np.linalg.norm(gp_vals-validation_values)/np.linalg.norm(
validation_values)
print(error, gp.y_train_.shape[0])
errors.append(error)
weight_function = partial(
pya.tensor_product_pdf,
univariate_pdfs=[v.pdf for v in univariate_variables])
gp = adaptive_approximate(
fun, univariate_variables, "gaussian_process",
{"nu": nu, "noise_level": None, "normalize_y": True,
"alpha": 1e-10, "normalize_inputs": True,
"weight_function": weight_function,
"ncandidate_samples": 1e3, "callback": callback}).approx
# import matplotlib.pyplot as plt
# plt.plot(gp.X_train_.T[0, :], 0*gp.X_train_.T[0, :], "s")
# plt.plot(gp.get_training_samples()[0, :], 0*gp.get_training_samples()[0, :], "x")
# plt.plot(gp.sampler.candidate_samples[0, :], 0*gp.sampler.candidate_samples[0, :], "^")
# plt.plot(validation_samples[0, :], validation_values[:, 0], "o")
# var = univariate_variables[0]
# lb, ub = var.interval(1)
# xx = np.linspace(lb, ub, 101)
# plt.plot(xx, var.pdf(xx), "r-")
# plt.show()
print(errors[-1])
assert errors[-1] < 1e-7
def test_approximate_fixed_pce(self):
num_vars = 2
univariate_variables = [stats.uniform(-1, 2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree, hcross_strength = 7, 0.4
poly.set_indices(
pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
num_samples = poly.num_terms()*2
degrees = poly.indices.sum(axis=0)
coef = np.random.normal(
0, 1, (poly.indices.shape[1], 2))/(degrees[:, np.newaxis]+1)**2
# set some coefficients to zero to make sure that different qoi
# are treated correctly.
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 0] = 0
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 1] = 0
poly.set_coefficients(coef)
train_samples = pya.generate_independent_random_samples(
variable, num_samples)
train_vals = poly(train_samples)
indices = pya.compute_hyperbolic_indices(num_vars, 1, 1)
nfolds = 10
method = "polynomial_chaos"
options = {"basis_type": "fixed", "variable": variable,
"options": {"linear_solver_options": {},
"indices": indices, "solver_type": "lstsq"}}
approx_list, residues_list, cv_score = \
cross_validate_approximation(
train_samples, train_vals, options, nfolds, method,
random_folds=False)
solver = LinearLeastSquaresCV(cv=nfolds, random_folds=False)
poly.set_indices(indices)
basis_matrix = poly.basis_matrix(train_samples)
solver.fit(basis_matrix, train_vals[:, 0:1])
assert np.allclose(solver.cv_score_, cv_score[0])
solver.fit(basis_matrix, train_vals[:, 1:2])
assert np.allclose(solver.cv_score_, cv_score[1])
def test_cross_validate_approximation_after_regularization_selection(self):
"""
This test is useful as it shows how to use cross_validate_approximation
to produce a list of approximations on each cross validation fold
once regularization parameters have been chosen.
These can be used to show variance in predictions of values,
sensitivity indices, etc.
Ideally this could be avoided if sklearn stored the coefficients
and alphas for each fold and then we can just find the coefficients
that correspond to the first time the path drops below the best_alpha
"""
num_vars = 2
univariate_variables = [stats.uniform(-1, 2)]*num_vars
variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(
var_trans)
poly.configure(poly_opts)
degree, hcross_strength = 7, 0.4
poly.set_indices(
pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
num_samples = poly.num_terms()*2
degrees = poly.indices.sum(axis=0)
coef = np.random.normal(
0, 1, (poly.indices.shape[1], 2))/(degrees[:, np.newaxis]+1)**2
# set some coefficients to zero to make sure that different qoi
# are treated correctly.
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 0] = 0
II = np.random.permutation(coef.shape[0])[:coef.shape[0]//2]
coef[II, 1] = 0
poly.set_coefficients(coef)
train_samples = pya.generate_independent_random_samples(
variable, num_samples)
train_vals = poly(train_samples)
# true_poly = poly
result = approximate(
train_samples, train_vals, "polynomial_chaos",
{"basis_type": "expanding_basis", "variable": variable})
# Even with the same folds, iterative methods such as Lars, LarsLasso
# and OMP will not have cv_score from approximate and cross validate
# approximation exactly the same because iterative methods interpolate
# residuals to compute cross validation scores
nfolds = 10
linear_solver_options = [
{"alpha": result.reg_params[0]}, {"alpha": result.reg_params[1]}]
indices = [result.approx.indices[:, np.where(np.absolute(c) > 0)[0]]
for c in result.approx.coefficients.T]
options = {"basis_type": "fixed", "variable": variable,
"options": {"linear_solver_options": linear_solver_options,
"indices": indices}}
approx_list, residues_list, cv_score = \
cross_validate_approximation(
train_samples, train_vals, options, nfolds, "polynomial_chaos",
random_folds="sklearn")
assert (np.all(cv_score < 6e-14) and np.all(result.scores < 4e-13))
def test_approximate_neural_network(self):
np.random.seed(2)
benchmark = setup_benchmark("ishigami", a=7, b=0.1)
nvars = benchmark.variable.num_vars()
nqoi = 1
maxiter = 30000
print(benchmark.variable)
# var_trans = pya.AffineRandomVariableTransformation(
# [stats.uniform(-2, 4)]*nvars)
var_trans = pya.AffineRandomVariableTransformation(benchmark.variable)
network_opts = {"activation_func": "sigmoid",
"layers": [nvars, 75, nqoi],
"loss_func": "squared_loss",
"var_trans": var_trans, "lag_mult": 0}
optimizer_opts = {"method": "L-BFGS-B",
"options": {"maxiter": maxiter, "iprint": -1,
"gtol": 1e-6}}
opts = {"network_opts": network_opts, "verbosity": 3,
"optimizer_opts": optimizer_opts}
ntrain_samples = 500
train_samples = pya.generate_independent_random_samples(
var_trans.variable, ntrain_samples)
train_samples = var_trans.map_from_canonical_space(
np.cos(np.random.uniform(0, np.pi, (nvars, ntrain_samples))))
train_vals = benchmark.fun(train_samples)
opts = {"network_opts": network_opts, "verbosity": 3,
"optimizer_opts": optimizer_opts, "x0": 1}
approx = approximate(
train_samples, train_vals, "neural_network", opts).approx
nsamples = 100
error = compute_l2_error(
approx, benchmark.fun, var_trans.variable,
nsamples)
print(error)
if __name__ == "__main__":
approximate_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestApproximate)
unittest.TextTestRunner(verbosity=2).run(approximate_test_suite)
| [
"pyapprox.approximate.adaptive_approximate",
"numpy.sqrt",
"pyapprox.benchmarks.benchmarks.setup_benchmark",
"pyapprox.AffineRandomVariableTransformation",
"pyapprox.IndependentMultivariateRandomVariable",
"scipy.stats.beta",
"copy.deepcopy",
"numpy.linalg.norm",
"pyapprox.generate_independent_rando... | [((433, 450), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (447, 450), True, 'import numpy as np\n'), ((550, 589), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (565, 589), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((798, 893), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'approx.variable_transformation.variable', 'nsamples'], {}), '(approx, benchmark.fun, approx.variable_transformation.\n variable, nsamples)\n', (814, 893), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((1299, 1384), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'fun', 'approx.variable_transformation.variable', 'nsamples'], {}), '(approx, fun, approx.variable_transformation.variable, nsamples\n )\n', (1315, 1384), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((2043, 2082), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (2058, 2082), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((2956, 3016), 'functools.partial', 'partial', (['pya.variance_refinement_indicator'], {'convex_param': '(0.5)'}), '(pya.variance_refinement_indicator, convex_param=0.5)\n', (2963, 3016), False, 'from functools import partial\n'), ((3547, 3586), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (3562, 3586), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((4100, 4195), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'approx.variable_transformation.variable', 'nsamples'], {}), '(approx, benchmark.fun, approx.variable_transformation.\n variable, nsamples)\n', (4116, 4195), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((4341, 4380), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (4356, 4380), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((5677, 5772), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'approx.variable_transformation.variable', 'nsamples'], {}), '(approx, benchmark.fun, approx.variable_transformation.\n variable, nsamples)\n', (5693, 5772), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((5929, 5968), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (5944, 5968), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((6216, 6279), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (6257, 6279), True, 'import pyapprox as pya\n'), ((6313, 6361), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['variable'], {}), '(variable)\n', (6351, 6361), True, 'import pyapprox as pya\n'), ((6923, 6980), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', '(nvars, ntrain_samples)'], {}), '(-np.pi, np.pi, (nvars, ntrain_samples))\n', (6940, 6980), True, 'import numpy as np\n'), ((7215, 7305), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'approx.var_trans.variable', 'nsamples'], {'rel': '(True)'}), '(approx, benchmark.fun, approx.var_trans.variable, nsamples,\n rel=True)\n', (7231, 7305), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((7670, 7733), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (7711, 7733), True, 'import pyapprox as pya\n'), ((7767, 7815), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['variable'], {}), '(variable)\n', (7805, 7815), True, 'import pyapprox as pya\n'), ((7831, 7861), 'pyapprox.PolynomialChaosExpansion', 'pya.PolynomialChaosExpansion', ([], {}), '()\n', (7859, 7861), True, 'import pyapprox as pya\n'), ((7882, 7945), 'pyapprox.define_poly_options_from_variable_transformation', 'pya.define_poly_options_from_variable_transformation', (['var_trans'], {}), '(var_trans)\n', (7934, 7945), True, 'import pyapprox as pya\n'), ((8210, 8260), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(poly.indices.shape[1], 2)'], {}), '(0, 1, (poly.indices.shape[1], 2))\n', (8226, 8260), True, 'import numpy as np\n'), ((8426, 8488), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_samples'], {}), '(variable, num_samples)\n', (8465, 8488), True, 'import pyapprox as pya\n'), ((8999, 9072), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_validation_samples'], {}), '(variable, num_validation_samples)\n', (9038, 9072), True, 'import pyapprox as pya\n'), ((9199, 9223), 'copy.deepcopy', 'copy.deepcopy', (['true_poly'], {}), '(true_poly)\n', (9212, 9223), False, 'import copy\n'), ((9245, 9385), 'pyapprox.approximate.cross_validate_pce_degree', 'cross_validate_pce_degree', (['poly', 'train_samples', 'train_vals', '(1)', '(degree + 1)'], {'solver_type': 'solver_type', 'linear_solver_options': 'solver_options'}), '(poly, train_samples, train_vals, 1, degree + 1,\n solver_type=solver_type, linear_solver_options=solver_options)\n', (9270, 9385), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((9420, 9459), 'numpy.allclose', 'np.allclose', (['approx_res.degrees', '[2, 3]'], {}), '(approx_res.degrees, [2, 3])\n', (9431, 9459), True, 'import numpy as np\n'), ((10109, 10172), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (10150, 10172), True, 'import pyapprox as pya\n'), ((10206, 10254), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['variable'], {}), '(variable)\n', (10244, 10254), True, 'import pyapprox as pya\n'), ((10270, 10300), 'pyapprox.PolynomialChaosExpansion', 'pya.PolynomialChaosExpansion', ([], {}), '()\n', (10298, 10300), True, 'import pyapprox as pya\n'), ((10321, 10384), 'pyapprox.define_poly_options_from_variable_transformation', 'pya.define_poly_options_from_variable_transformation', (['var_trans'], {}), '(var_trans)\n', (10373, 10384), True, 'import pyapprox as pya\n'), ((11123, 11185), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_samples'], {}), '(variable, num_samples)\n', (11162, 11185), True, 'import pyapprox as pya\n'), ((11724, 11797), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_validation_samples'], {}), '(variable, num_validation_samples)\n', (11763, 11797), True, 'import pyapprox as pya\n'), ((12326, 12389), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (12367, 12389), True, 'import pyapprox as pya\n'), ((12453, 12515), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_samples'], {}), '(variable, num_samples)\n', (12492, 12515), True, 'import pyapprox as pya\n'), ((12609, 12627), 'sklearn.gaussian_process.kernels.Matern', 'Matern', (['(0.5)'], {'nu': 'nu'}), '(0.5, nu=nu)\n', (12615, 12627), False, 'from sklearn.gaussian_process.kernels import Matern\n'), ((12696, 12730), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'X.shape[1]'], {}), '(0, 1, X.shape[1])\n', (12712, 12730), True, 'import numpy as np\n'), ((13748, 13766), 'sklearn.gaussian_process.kernels.Matern', 'Matern', (['(0.1)'], {'nu': 'nu'}), '(0.1, nu=nu)\n', (13754, 13766), False, 'from sklearn.gaussian_process.kernels import Matern\n'), ((13835, 13869), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'X.shape[1]'], {}), '(0, 1, X.shape[1])\n', (13851, 13869), True, 'import numpy as np\n'), ((14078, 14119), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(num_vars, 100)'], {}), '(-1, 1, (num_vars, 100))\n', (14095, 14119), True, 'import numpy as np\n'), ((15087, 15105), 'sklearn.gaussian_process.kernels.Matern', 'Matern', (['(0.1)'], {'nu': 'nu'}), '(0.1, nu=nu)\n', (15093, 15105), False, 'from sklearn.gaussian_process.kernels import Matern\n'), ((15174, 15208), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'X.shape[1]'], {}), '(0, 1, X.shape[1])\n', (15190, 15208), True, 'import numpy as np\n'), ((15956, 16046), 'functools.partial', 'partial', (['pya.tensor_product_pdf'], {'univariate_pdfs': '[v.pdf for v in univariate_variables]'}), '(pya.tensor_product_pdf, univariate_pdfs=[v.pdf for v in\n univariate_variables])\n', (15963, 16046), False, 'from functools import partial\n'), ((17162, 17225), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (17203, 17225), True, 'import pyapprox as pya\n'), ((17259, 17307), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['variable'], {}), '(variable)\n', (17297, 17307), True, 'import pyapprox as pya\n'), ((17323, 17353), 'pyapprox.PolynomialChaosExpansion', 'pya.PolynomialChaosExpansion', ([], {}), '()\n', (17351, 17353), True, 'import pyapprox as pya\n'), ((17374, 17437), 'pyapprox.define_poly_options_from_variable_transformation', 'pya.define_poly_options_from_variable_transformation', (['var_trans'], {}), '(var_trans)\n', (17426, 17437), True, 'import pyapprox as pya\n'), ((18176, 18238), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_samples'], {}), '(variable, num_samples)\n', (18215, 18238), True, 'import pyapprox as pya\n'), ((18312, 18358), 'pyapprox.compute_hyperbolic_indices', 'pya.compute_hyperbolic_indices', (['num_vars', '(1)', '(1)'], {}), '(num_vars, 1, 1)\n', (18342, 18358), True, 'import pyapprox as pya\n'), ((18676, 18780), 'pyapprox.approximate.cross_validate_approximation', 'cross_validate_approximation', (['train_samples', 'train_vals', 'options', 'nfolds', 'method'], {'random_folds': '(False)'}), '(train_samples, train_vals, options, nfolds,\n method, random_folds=False)\n', (18704, 18780), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((18828, 18879), 'pyapprox.approximate.LinearLeastSquaresCV', 'LinearLeastSquaresCV', ([], {'cv': 'nfolds', 'random_folds': '(False)'}), '(cv=nfolds, random_folds=False)\n', (18848, 18879), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((19038, 19080), 'numpy.allclose', 'np.allclose', (['solver.cv_score_', 'cv_score[0]'], {}), '(solver.cv_score_, cv_score[0])\n', (19049, 19080), True, 'import numpy as np\n'), ((19150, 19192), 'numpy.allclose', 'np.allclose', (['solver.cv_score_', 'cv_score[1]'], {}), '(solver.cv_score_, cv_score[1])\n', (19161, 19192), True, 'import numpy as np\n'), ((19943, 20006), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (19984, 20006), True, 'import pyapprox as pya\n'), ((20040, 20088), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['variable'], {}), '(variable)\n', (20078, 20088), True, 'import pyapprox as pya\n'), ((20104, 20134), 'pyapprox.PolynomialChaosExpansion', 'pya.PolynomialChaosExpansion', ([], {}), '()\n', (20132, 20134), True, 'import pyapprox as pya\n'), ((20155, 20218), 'pyapprox.define_poly_options_from_variable_transformation', 'pya.define_poly_options_from_variable_transformation', (['var_trans'], {}), '(var_trans)\n', (20207, 20218), True, 'import pyapprox as pya\n'), ((20957, 21019), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['variable', 'num_samples'], {}), '(variable, num_samples)\n', (20996, 21019), True, 'import pyapprox as pya\n'), ((21119, 21238), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals', '"""polynomial_chaos"""', "{'basis_type': 'expanding_basis', 'variable': variable}"], {}), "(train_samples, train_vals, 'polynomial_chaos', {'basis_type':\n 'expanding_basis', 'variable': variable})\n", (21130, 21238), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((22073, 22193), 'pyapprox.approximate.cross_validate_approximation', 'cross_validate_approximation', (['train_samples', 'train_vals', 'options', 'nfolds', '"""polynomial_chaos"""'], {'random_folds': '"""sklearn"""'}), "(train_samples, train_vals, options, nfolds,\n 'polynomial_chaos', random_folds='sklearn')\n", (22101, 22193), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((22356, 22373), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (22370, 22373), True, 'import numpy as np\n'), ((22394, 22433), 'pyapprox.benchmarks.benchmarks.setup_benchmark', 'setup_benchmark', (['"""ishigami"""'], {'a': '(7)', 'b': '(0.1)'}), "('ishigami', a=7, b=0.1)\n", (22409, 22433), False, 'from pyapprox.benchmarks.benchmarks import setup_benchmark\n'), ((22682, 22740), 'pyapprox.AffineRandomVariableTransformation', 'pya.AffineRandomVariableTransformation', (['benchmark.variable'], {}), '(benchmark.variable)\n', (22720, 22740), True, 'import pyapprox as pya\n'), ((23302, 23377), 'pyapprox.generate_independent_random_samples', 'pya.generate_independent_random_samples', (['var_trans.variable', 'ntrain_samples'], {}), '(var_trans.variable, ntrain_samples)\n', (23341, 23377), True, 'import pyapprox as pya\n'), ((23836, 23905), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'var_trans.variable', 'nsamples'], {}), '(approx, benchmark.fun, var_trans.variable, nsamples)\n', (23852, 23905), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((666, 738), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['benchmark.fun', 'univariate_variables', '"""sparse_grid"""'], {}), "(benchmark.fun, univariate_variables, 'sparse_grid')\n", (686, 738), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((1177, 1239), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['fun', 'univariate_variables', '"""sparse_grid"""'], {}), "(fun, univariate_variables, 'sparse_grid')\n", (1197, 1239), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((1864, 1935), 'numpy.allclose', 'np.allclose', (['approx.samples_1d[0][ll][:n]', 'approx.samples_1d[0][ll - 1]'], {}), '(approx.samples_1d[0][ll][:n], approx.samples_1d[0][ll - 1])\n', (1875, 1935), True, 'import numpy as np\n'), ((2251, 2346), 'pyapprox.approximate.compute_l2_error', 'compute_l2_error', (['approx', 'benchmark.fun', 'approx.variable_transformation.variable', 'nsamples'], {}), '(approx, benchmark.fun, approx.variable_transformation.\n variable, nsamples)\n', (2267, 2346), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((3283, 3368), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['benchmark.fun', 'univariate_variables', '"""sparse_grid"""', 'options'], {}), "(benchmark.fun, univariate_variables, 'sparse_grid',\n options)\n", (3303, 3368), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((3432, 3446), 'numpy.min', 'np.min', (['errors'], {}), '(errors)\n', (3438, 3446), True, 'import numpy as np\n'), ((3849, 4005), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['benchmark.fun', 'univariate_variables'], {'method': '"""polynomial_chaos"""', 'options': "{'method': 'leja', 'options': {'max_nsamples': 100}}"}), "(benchmark.fun, univariate_variables, method=\n 'polynomial_chaos', options={'method': 'leja', 'options': {\n 'max_nsamples': 100}})\n", (3869, 4005), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((5247, 5491), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['benchmark.fun', 'univariate_variables'], {'method': '"""polynomial_chaos"""', 'options': "{'method': 'induced', 'options': {'max_nsamples': 100, 'induced_sampling': \n False, 'cond_tol': 10000.0, 'max_level_1d': 4, 'verbose': 3}}"}), "(benchmark.fun, univariate_variables, method=\n 'polynomial_chaos', options={'method': 'induced', 'options': {\n 'max_nsamples': 100, 'induced_sampling': False, 'cond_tol': 10000.0,\n 'max_level_1d': 4, 'verbose': 3}})\n", (5267, 5491), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((7061, 7148), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals'], {'method': '"""polynomial_chaos"""', 'options': 'options'}), "(train_samples, train_vals, method='polynomial_chaos', options=\n options)\n", (7072, 7148), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((8038, 8091), 'pyapprox.compute_hyperbolic_indices', 'pya.compute_hyperbolic_indices', (['num_vars', 'degree', '(1.0)'], {}), '(num_vars, degree, 1.0)\n', (8068, 8091), True, 'import pyapprox as pya\n'), ((8584, 8851), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals', '"""polynomial_chaos"""', "{'basis_type': 'hyperbolic_cross', 'variable': variable, 'options': {\n 'verbose': 3, 'solver_type': solver_type, 'min_degree': 1, 'max_degree':\n degree + 1, 'linear_solver_options': solver_options}}"], {}), "(train_samples, train_vals, 'polynomial_chaos', {'basis_type':\n 'hyperbolic_cross', 'variable': variable, 'options': {'verbose': 3,\n 'solver_type': solver_type, 'min_degree': 1, 'max_degree': degree + 1,\n 'linear_solver_options': solver_options}})\n", (8595, 8851), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((10512, 10577), 'pyapprox.compute_hyperbolic_indices', 'pya.compute_hyperbolic_indices', (['num_vars', 'degree', 'hcross_strength'], {}), '(num_vars, degree, hcross_strength)\n', (10542, 10577), True, 'import pyapprox as pya\n'), ((10678, 10728), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(poly.indices.shape[1], 2)'], {}), '(0, 1, (poly.indices.shape[1], 2))\n', (10694, 10728), True, 'import numpy as np\n'), ((10890, 10926), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (10911, 10926), True, 'import numpy as np\n'), ((10983, 11019), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (11004, 11019), True, 'import numpy as np\n'), ((11281, 11549), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals', '"""polynomial_chaos"""', "{'basis_type': 'expanding_basis', 'variable': variable, 'options': {\n 'max_num_expansion_steps_iter': 1, 'verbose': 3, 'max_num_terms': 1000,\n 'max_num_step_increases': 2, 'max_num_init_terms': 33}}"], {}), "(train_samples, train_vals, 'polynomial_chaos', {'basis_type':\n 'expanding_basis', 'variable': variable, 'options': {\n 'max_num_expansion_steps_iter': 1, 'verbose': 3, 'max_num_terms': 1000,\n 'max_num_step_increases': 2, 'max_num_init_terms': 33}})\n", (11292, 11549), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((11954, 11985), 'numpy.sqrt', 'np.sqrt', (['num_validation_samples'], {}), '(num_validation_samples)\n', (11961, 11985), True, 'import numpy as np\n'), ((12640, 12664), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (12651, 12664), True, 'import numpy as np\n'), ((12821, 12917), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals', '"""gaussian_process"""', "{'nu': nu, 'noise_level': 1e-08}"], {}), "(train_samples, train_vals, 'gaussian_process', {'nu': nu,\n 'noise_level': 1e-08})\n", (12832, 12917), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((13033, 13052), 'numpy.sqrt', 'np.sqrt', (['X.shape[1]'], {}), '(X.shape[1])\n', (13040, 13052), True, 'import numpy as np\n'), ((13779, 13803), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (13790, 13803), True, 'import numpy as np\n'), ((14506, 14703), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['fun', 'univariate_variables', '"""gaussian_process"""', "{'nu': nu, 'noise_level': None, 'normalize_y': True, 'alpha': 1e-10,\n 'ncandidate_samples': 1000.0, 'callback': callback}"], {}), "(fun, univariate_variables, 'gaussian_process', {'nu':\n nu, 'noise_level': None, 'normalize_y': True, 'alpha': 1e-10,\n 'ncandidate_samples': 1000.0, 'callback': callback})\n", (14526, 14703), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((15118, 15142), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (15129, 15142), True, 'import numpy as np\n'), ((15470, 15533), 'pyapprox.IndependentMultivariateRandomVariable', 'pya.IndependentMultivariateRandomVariable', (['univariate_variables'], {}), '(univariate_variables)\n', (15511, 15533), True, 'import pyapprox as pya\n'), ((16082, 16345), 'pyapprox.approximate.adaptive_approximate', 'adaptive_approximate', (['fun', 'univariate_variables', '"""gaussian_process"""', "{'nu': nu, 'noise_level': None, 'normalize_y': True, 'alpha': 1e-10,\n 'normalize_inputs': True, 'weight_function': weight_function,\n 'ncandidate_samples': 1000.0, 'callback': callback}"], {}), "(fun, univariate_variables, 'gaussian_process', {'nu':\n nu, 'noise_level': None, 'normalize_y': True, 'alpha': 1e-10,\n 'normalize_inputs': True, 'weight_function': weight_function,\n 'ncandidate_samples': 1000.0, 'callback': callback})\n", (16102, 16345), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((17565, 17630), 'pyapprox.compute_hyperbolic_indices', 'pya.compute_hyperbolic_indices', (['num_vars', 'degree', 'hcross_strength'], {}), '(num_vars, degree, hcross_strength)\n', (17595, 17630), True, 'import pyapprox as pya\n'), ((17731, 17781), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(poly.indices.shape[1], 2)'], {}), '(0, 1, (poly.indices.shape[1], 2))\n', (17747, 17781), True, 'import numpy as np\n'), ((17943, 17979), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (17964, 17979), True, 'import numpy as np\n'), ((18036, 18072), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (18057, 18072), True, 'import numpy as np\n'), ((20346, 20411), 'pyapprox.compute_hyperbolic_indices', 'pya.compute_hyperbolic_indices', (['num_vars', 'degree', 'hcross_strength'], {}), '(num_vars, degree, hcross_strength)\n', (20376, 20411), True, 'import pyapprox as pya\n'), ((20512, 20562), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(poly.indices.shape[1], 2)'], {}), '(0, 1, (poly.indices.shape[1], 2))\n', (20528, 20562), True, 'import numpy as np\n'), ((20724, 20760), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (20745, 20760), True, 'import numpy as np\n'), ((20817, 20853), 'numpy.random.permutation', 'np.random.permutation', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (20838, 20853), True, 'import numpy as np\n'), ((22240, 22264), 'numpy.all', 'np.all', (['(cv_score < 6e-14)'], {}), '(cv_score < 6e-14)\n', (22246, 22264), True, 'import numpy as np\n'), ((22269, 22298), 'numpy.all', 'np.all', (['(result.scores < 4e-13)'], {}), '(result.scores < 4e-13)\n', (22275, 22298), True, 'import numpy as np\n'), ((23714, 23776), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals', '"""neural_network"""', 'opts'], {}), "(train_samples, train_vals, 'neural_network', opts)\n", (23725, 23776), False, 'from pyapprox.approximate import approximate, adaptive_approximate, cross_validate_pce_degree, compute_l2_error, cross_validate_approximation, LinearLeastSquaresCV\n'), ((24010, 24031), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (24029, 24031), False, 'import unittest\n'), ((24084, 24120), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (24107, 24120), False, 'import unittest\n'), ((622, 641), 'scipy.stats.uniform', 'stats.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (635, 641), False, 'from scipy import stats\n'), ((1132, 1152), 'scipy.stats.binom', 'stats.binom', (['(20)', '(0.5)'], {}), '(20, 0.5)\n', (1143, 1152), False, 'from scipy import stats\n'), ((3805, 3824), 'scipy.stats.uniform', 'stats.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3818, 3824), False, 'from scipy import stats\n'), ((4599, 4618), 'scipy.stats.uniform', 'stats.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4612, 4618), False, 'from scipy import stats\n'), ((6156, 6191), 'scipy.stats.beta', 'stats.beta', (['(5)', '(5)', '(-np.pi)', '(2 * np.pi)'], {}), '(5, 5, -np.pi, 2 * np.pi)\n', (6166, 6191), False, 'from scipy import stats\n'), ((7620, 7640), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (7633, 7640), False, 'from scipy import stats\n'), ((10059, 10079), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (10072, 10079), False, 'from scipy import stats\n'), ((12276, 12296), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (12289, 12296), False, 'from scipy import stats\n'), ((13637, 13657), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (13650, 13657), False, 'from scipy import stats\n'), ((14324, 14367), 'numpy.linalg.norm', 'np.linalg.norm', (['(gp_vals - validation_values)'], {}), '(gp_vals - validation_values)\n', (14338, 14367), True, 'import numpy as np\n'), ((14366, 14399), 'numpy.linalg.norm', 'np.linalg.norm', (['validation_values'], {}), '(validation_values)\n', (14380, 14399), True, 'import numpy as np\n'), ((14973, 14996), 'scipy.stats.beta', 'stats.beta', (['(5)', '(10)', '(0)', '(2)'], {}), '(5, 10, 0, 2)\n', (14983, 14996), False, 'from scipy import stats\n'), ((15756, 15799), 'numpy.linalg.norm', 'np.linalg.norm', (['(gp_vals - validation_values)'], {}), '(gp_vals - validation_values)\n', (15770, 15799), True, 'import numpy as np\n'), ((15798, 15831), 'numpy.linalg.norm', 'np.linalg.norm', (['validation_values'], {}), '(validation_values)\n', (15812, 15831), True, 'import numpy as np\n'), ((17112, 17132), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (17125, 17132), False, 'from scipy import stats\n'), ((19893, 19913), 'scipy.stats.uniform', 'stats.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (19906, 19913), False, 'from scipy import stats\n'), ((23470, 23522), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'np.pi', '(nvars, ntrain_samples)'], {}), '(0, np.pi, (nvars, ntrain_samples))\n', (23487, 23522), True, 'import numpy as np\n'), ((8274, 8303), 'pyapprox.nchoosek', 'pya.nchoosek', (['(num_vars + 2)', '(2)'], {}), '(num_vars + 2, 2)\n', (8286, 8303), True, 'import pyapprox as pya\n'), ((6674, 6689), 'scipy.stats.uniform', 'stats.uniform', ([], {}), '()\n', (6687, 6689), False, 'from scipy import stats\n'), ((21735, 21749), 'numpy.absolute', 'np.absolute', (['c'], {}), '(c)\n', (21746, 21749), True, 'import numpy as np\n')] |
"""
Tests for transformer objects.
"""
from __future__ import division
from __future__ import unicode_literals
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
class TestTransformers(unittest.TestCase):
"""
Test top-level API for transformer objects.
"""
def setUp(self):
super(TestTransformers, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_y_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_transform_unlabelled(self):
ul_dataset = dc.data.tests.load_unlabelled_data()
# transforming y should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(
transform_X=True, dataset=ul_dataset).transform(ul_dataset)
def test_X_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select(self):
"""Tests logarithmic data transformer with selection."""
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_y=True, tasks=tasks, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select(self):
# Tests logarithmic data transformer with selection.
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_X=True, features=features, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# TODO(rbharath): Untransform doesn't work properly for binary feature
# vectors. Need to figure out what's wrong here. (low priority)
## Check that untransform does the right thing.
# np.testing.assert_allclose(normalization_transformer.untransform(X_t), X)
def test_cdf_X_transformer(self):
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_y_transformer(self):
# Test CDF transformer on Gaussian normal dataset.
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
np.testing.assert_allclose(cdf_transformer.untransform(y_t), y)
def test_clipping_X_transformer(self):
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer(self):
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
def test_power_X_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_X=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_y=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers) * y.shape[1])
np.testing.assert_allclose(y, y_t[:, :2])
np.testing.assert_allclose(np.power(y, 2), y_t[:, 2:4])
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 4:])
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t), y)
def test_singletask_balancing_transformer(self):
"""Test balancing transformer on single-task dataset."""
classification_dataset = dc.data.tests.load_classification_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=classification_dataset)
X, y, w, ids = (classification_dataset.X, classification_dataset.y,
classification_dataset.w, classification_dataset.ids)
classification_dataset = balancing_transformer.transform(
classification_dataset)
X_t, y_t, w_t, ids_t = (classification_dataset.X, classification_dataset.y,
classification_dataset.w,
classification_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(classification_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_multitask_balancing_transformer(self):
"""Test balancing transformer on multitask dataset."""
multitask_dataset = dc.data.tests.load_multitask_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = balancing_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(multitask_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_coulomb_fit_transformer(self):
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
def test_IRV_transformer(self):
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(
X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
def test_featurization_transformer(self):
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
transform_X=True,
dataset=train,
featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
self.assertEqual(new_train.y.shape, train.y.shape)
self.assertEqual(new_train.X.shape[-1], fp_size)
| [
"deepchem.data.tests.load_unlabelled_data",
"deepchem.data.tests.load_multitask_data",
"numpy.random.rand",
"numpy.log",
"numpy.array",
"deepchem.data.NumpyDataset",
"deepchem.data.tests.load_classification_data",
"deepchem.trans.IRVTransformer",
"numpy.arange",
"numpy.testing.assert_allclose",
... | [((742, 778), 'deepchem.data.tests.load_solubility_data', 'dc.data.tests.load_solubility_data', ([], {}), '()\n', (776, 778), True, 'import deepchem as dc\n'), ((801, 870), 'deepchem.trans.LogTransformer', 'dc.trans.LogTransformer', ([], {'transform_y': '(True)', 'dataset': 'solubility_dataset'}), '(transform_y=True, dataset=solubility_dataset)\n', (824, 870), True, 'import deepchem as dc\n'), ((1397, 1431), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (1423, 1431), True, 'import numpy as np\n'), ((1493, 1527), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (1519, 1527), True, 'import numpy as np\n'), ((1809, 1845), 'deepchem.data.tests.load_unlabelled_data', 'dc.data.tests.load_unlabelled_data', ([], {}), '()\n', (1843, 1845), True, 'import deepchem as dc\n'), ((2455, 2491), 'deepchem.data.tests.load_solubility_data', 'dc.data.tests.load_solubility_data', ([], {}), '()\n', (2489, 2491), True, 'import deepchem as dc\n'), ((2514, 2583), 'deepchem.trans.LogTransformer', 'dc.trans.LogTransformer', ([], {'transform_X': '(True)', 'dataset': 'solubility_dataset'}), '(transform_X=True, dataset=solubility_dataset)\n', (2537, 2583), True, 'import deepchem as dc\n'), ((3110, 3144), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (3136, 3144), True, 'import numpy as np\n'), ((3206, 3240), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (3232, 3240), True, 'import numpy as np\n'), ((3594, 3634), 'deepchem.data.tests.load_feat_multitask_data', 'dc.data.tests.load_feat_multitask_data', ([], {}), '()\n', (3632, 3634), True, 'import deepchem as dc\n'), ((4059, 4145), 'deepchem.trans.LogTransformer', 'dc.trans.LogTransformer', ([], {'transform_y': '(True)', 'tasks': 'tasks', 'dataset': 'multitask_dataset'}), '(transform_y=True, tasks=tasks, dataset=\n multitask_dataset)\n', (4082, 4145), True, 'import deepchem as dc\n'), ((4657, 4691), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (4683, 4691), True, 'import numpy as np\n'), ((4753, 4787), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (4779, 4787), True, 'import numpy as np\n'), ((5157, 5197), 'deepchem.data.tests.load_feat_multitask_data', 'dc.data.tests.load_feat_multitask_data', ([], {}), '()\n', (5195, 5197), True, 'import deepchem as dc\n'), ((5652, 5744), 'deepchem.trans.LogTransformer', 'dc.trans.LogTransformer', ([], {'transform_X': '(True)', 'features': 'features', 'dataset': 'multitask_dataset'}), '(transform_X=True, features=features, dataset=\n multitask_dataset)\n', (5675, 5744), True, 'import deepchem as dc\n'), ((6256, 6290), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (6282, 6290), True, 'import numpy as np\n'), ((6352, 6386), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (6378, 6386), True, 'import numpy as np\n'), ((6752, 6788), 'deepchem.data.tests.load_solubility_data', 'dc.data.tests.load_solubility_data', ([], {}), '()\n', (6786, 6788), True, 'import deepchem as dc\n'), ((6821, 6900), 'deepchem.trans.NormalizationTransformer', 'dc.trans.NormalizationTransformer', ([], {'transform_y': '(True)', 'dataset': 'solubility_dataset'}), '(transform_y=True, dataset=solubility_dataset)\n', (6854, 6900), True, 'import deepchem as dc\n'), ((7436, 7470), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (7462, 7470), True, 'import numpy as np\n'), ((7532, 7566), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (7558, 7566), True, 'import numpy as np\n'), ((7933, 7969), 'deepchem.data.tests.load_solubility_data', 'dc.data.tests.load_solubility_data', ([], {}), '()\n', (7967, 7969), True, 'import deepchem as dc\n'), ((8002, 8081), 'deepchem.trans.NormalizationTransformer', 'dc.trans.NormalizationTransformer', ([], {'transform_X': '(True)', 'dataset': 'solubility_dataset'}), '(transform_X=True, dataset=solubility_dataset)\n', (8035, 8081), True, 'import deepchem as dc\n'), ((8617, 8651), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (8643, 8651), True, 'import numpy as np\n'), ((8713, 8747), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (8739, 8747), True, 'import numpy as np\n'), ((9703, 9741), 'deepchem.data.tests.load_gaussian_cdf_data', 'dc.data.tests.load_gaussian_cdf_data', ([], {}), '()\n', (9739, 9741), True, 'import deepchem as dc\n'), ((9780, 9858), 'deepchem.trans.CDFTransformer', 'dc.trans.CDFTransformer', ([], {'transform_X': '(True)', 'dataset': 'gaussian_dataset', 'bins': 'bins'}), '(transform_X=True, dataset=gaussian_dataset, bins=bins)\n', (9803, 9858), True, 'import deepchem as dc\n'), ((10377, 10411), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (10403, 10411), True, 'import numpy as np\n'), ((10474, 10508), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (10500, 10508), True, 'import numpy as np\n'), ((10582, 10602), 'numpy.sort', 'np.sort', (['X_t'], {'axis': '(0)'}), '(X_t, axis=0)\n', (10589, 10602), True, 'import numpy as np\n'), ((10607, 10649), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sorted', 'target'], {}), '(sorted, target)\n', (10633, 10649), True, 'import numpy as np\n'), ((10903, 10941), 'deepchem.data.tests.load_gaussian_cdf_data', 'dc.data.tests.load_gaussian_cdf_data', ([], {}), '()\n', (10939, 10941), True, 'import deepchem as dc\n'), ((10980, 11058), 'deepchem.trans.CDFTransformer', 'dc.trans.CDFTransformer', ([], {'transform_y': '(True)', 'dataset': 'gaussian_dataset', 'bins': 'bins'}), '(transform_y=True, dataset=gaussian_dataset, bins=bins)\n', (11003, 11058), True, 'import deepchem as dc\n'), ((11577, 11611), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (11603, 11611), True, 'import numpy as np\n'), ((11674, 11708), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (11700, 11708), True, 'import numpy as np\n'), ((11782, 11802), 'numpy.sort', 'np.sort', (['y_t'], {'axis': '(0)'}), '(y_t, axis=0)\n', (11789, 11802), True, 'import numpy as np\n'), ((11807, 11849), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sorted', 'target'], {}), '(sorted, target)\n', (11833, 11849), True, 'import numpy as np\n'), ((12140, 12160), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (12149, 12160), True, 'import numpy as np\n'), ((12169, 12201), 'numpy.ones', 'np.ones', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (12176, 12201), True, 'import numpy as np\n'), ((12242, 12272), 'numpy.zeros', 'np.zeros', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (12250, 12272), True, 'import numpy as np\n'), ((12281, 12310), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (12288, 12310), True, 'import numpy as np\n'), ((12325, 12359), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (12345, 12359), True, 'import deepchem as dc\n'), ((12378, 12435), 'deepchem.trans.ClippingTransformer', 'dc.trans.ClippingTransformer', ([], {'transform_X': '(True)', 'x_max': '(5.0)'}), '(transform_X=True, x_max=5.0)\n', (12406, 12435), True, 'import deepchem as dc\n'), ((12792, 12826), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (12818, 12826), True, 'import numpy as np\n'), ((12889, 12923), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (12915, 12923), True, 'import numpy as np\n'), ((12988, 13027), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_t', 'target'], {}), '(X_t, target)\n', (13014, 13027), True, 'import numpy as np\n'), ((13198, 13218), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (13207, 13218), True, 'import numpy as np\n'), ((13227, 13260), 'numpy.zeros', 'np.zeros', (['(n_samples, n_features)'], {}), '((n_samples, n_features))\n', (13235, 13260), True, 'import numpy as np\n'), ((13269, 13298), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (13276, 13298), True, 'import numpy as np\n'), ((13339, 13368), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (13346, 13368), True, 'import numpy as np\n'), ((13383, 13417), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (13403, 13417), True, 'import deepchem as dc\n'), ((13436, 13493), 'deepchem.trans.ClippingTransformer', 'dc.trans.ClippingTransformer', ([], {'transform_y': '(True)', 'y_max': '(5.0)'}), '(transform_y=True, y_max=5.0)\n', (13464, 13493), True, 'import deepchem as dc\n'), ((13849, 13883), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (13875, 13883), True, 'import numpy as np\n'), ((13945, 13979), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (13971, 13979), True, 'import numpy as np\n'), ((14044, 14083), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_t', 'target'], {}), '(y_t, target)\n', (14070, 14083), True, 'import numpy as np\n'), ((14207, 14245), 'deepchem.data.tests.load_gaussian_cdf_data', 'dc.data.tests.load_gaussian_cdf_data', ([], {}), '()\n', (14243, 14245), True, 'import deepchem as dc\n'), ((14295, 14353), 'deepchem.trans.PowerTransformer', 'dc.trans.PowerTransformer', ([], {'transform_X': '(True)', 'powers': 'powers'}), '(transform_X=True, powers=powers)\n', (14320, 14353), True, 'import deepchem as dc\n'), ((14868, 14902), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (14894, 14902), True, 'import numpy as np\n'), ((14965, 14999), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (14991, 14999), True, 'import numpy as np\n'), ((15138, 15179), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t[:, :2]'], {}), '(X, X_t[:, :2])\n', (15164, 15179), True, 'import numpy as np\n'), ((15424, 15462), 'deepchem.data.tests.load_gaussian_cdf_data', 'dc.data.tests.load_gaussian_cdf_data', ([], {}), '()\n', (15460, 15462), True, 'import deepchem as dc\n'), ((15512, 15570), 'deepchem.trans.PowerTransformer', 'dc.trans.PowerTransformer', ([], {'transform_y': '(True)', 'powers': 'powers'}), '(transform_y=True, powers=powers)\n', (15537, 15570), True, 'import deepchem as dc\n'), ((16085, 16119), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (16111, 16119), True, 'import numpy as np\n'), ((16182, 16216), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['w', 'w_t'], {}), '(w, w_t)\n', (16208, 16216), True, 'import numpy as np\n'), ((16355, 16396), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t[:, :2]'], {}), '(y, y_t[:, :2])\n', (16381, 16396), True, 'import numpy as np\n'), ((16783, 16823), 'deepchem.data.tests.load_classification_data', 'dc.data.tests.load_classification_data', ([], {}), '()\n', (16821, 16823), True, 'import deepchem as dc\n'), ((16852, 16931), 'deepchem.trans.BalancingTransformer', 'dc.trans.BalancingTransformer', ([], {'transform_w': '(True)', 'dataset': 'classification_dataset'}), '(transform_w=True, dataset=classification_dataset)\n', (16881, 16931), True, 'import deepchem as dc\n'), ((17540, 17574), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (17566, 17574), True, 'import numpy as np\n'), ((17636, 17670), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (17662, 17670), True, 'import numpy as np\n'), ((18327, 18362), 'deepchem.data.tests.load_multitask_data', 'dc.data.tests.load_multitask_data', ([], {}), '()\n', (18360, 18362), True, 'import deepchem as dc\n'), ((18391, 18465), 'deepchem.trans.BalancingTransformer', 'dc.trans.BalancingTransformer', ([], {'transform_w': '(True)', 'dataset': 'multitask_dataset'}), '(transform_w=True, dataset=multitask_dataset)\n', (18420, 18465), True, 'import deepchem as dc\n'), ((18987, 19021), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X', 'X_t'], {}), '(X, X_t)\n', (19013, 19021), True, 'import numpy as np\n'), ((19083, 19117), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y', 'y_t'], {}), '(y, y_t)\n', (19109, 19117), True, 'import numpy as np\n'), ((19804, 19824), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (19813, 19824), True, 'import numpy as np\n'), ((19833, 19882), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_features', 'n_features'], {}), '(n_samples, n_features, n_features)\n', (19847, 19882), True, 'import numpy as np\n'), ((19891, 19921), 'numpy.zeros', 'np.zeros', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (19899, 19921), True, 'import numpy as np\n'), ((19930, 19959), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (19937, 19959), True, 'import numpy as np\n'), ((19974, 20008), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (19994, 20008), True, 'import deepchem as dc\n'), ((20031, 20070), 'deepchem.trans.CoulombFitTransformer', 'dc.trans.CoulombFitTransformer', (['dataset'], {}), '(dataset)\n', (20061, 20070), True, 'import deepchem as dc\n'), ((20271, 20321), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(n_samples, n_features)'}), '(2, size=(n_samples, n_features))\n', (20288, 20321), True, 'import numpy as np\n'), ((20330, 20360), 'numpy.zeros', 'np.zeros', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (20338, 20360), True, 'import numpy as np\n'), ((20369, 20398), 'numpy.ones', 'np.ones', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (20376, 20398), True, 'import numpy as np\n'), ((20413, 20452), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w'], {'ids': 'None'}), '(X, y, w, ids=None)\n', (20433, 20452), True, 'import deepchem as dc\n'), ((20466, 20519), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(test_samples, n_features)'}), '(2, size=(test_samples, n_features))\n', (20483, 20519), True, 'import numpy as np\n'), ((20533, 20566), 'numpy.zeros', 'np.zeros', (['(test_samples, n_tasks)'], {}), '((test_samples, n_tasks))\n', (20541, 20566), True, 'import numpy as np\n'), ((20580, 20612), 'numpy.ones', 'np.ones', (['(test_samples, n_tasks)'], {}), '((test_samples, n_tasks))\n', (20587, 20612), True, 'import numpy as np\n'), ((20632, 20686), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X_test', 'y_test', 'w_test'], {'ids': 'None'}), '(X_test, y_test, w_test, ids=None)\n', (20652, 20686), True, 'import deepchem as dc\n'), ((20883, 20928), 'deepchem.trans.IRVTransformer', 'dc.trans.IRVTransformer', (['(10)', 'n_tasks', 'dataset'], {}), '(10, n_tasks, dataset)\n', (20906, 20928), True, 'import deepchem as dc\n'), ((21130, 21182), 'numpy.allclose', 'np.allclose', (['test_dataset_trans.X[0, :10]', 'sims[:10]'], {}), '(test_dataset_trans.X[0, :10], sims[:10])\n', (21141, 21182), True, 'import numpy as np\n'), ((21194, 21247), 'numpy.allclose', 'np.allclose', (['test_dataset_trans.X[0, 10:20]', '([0] * 10)'], {}), '(test_dataset_trans.X[0, 10:20], [0] * 10)\n', (21205, 21247), True, 'import numpy as np\n'), ((21404, 21423), 'deepchem.molnet.load_delaney', 'load_delaney', (['"""Raw"""'], {}), "('Raw')\n", (21416, 21423), False, 'from deepchem.molnet import load_delaney\n'), ((607, 632), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (622, 632), False, 'import os\n'), ((1617, 1630), 'numpy.log', 'np.log', (['(y + 1)'], {}), '(y + 1)\n', (1623, 1630), True, 'import numpy as np\n'), ((3330, 3343), 'numpy.log', 'np.log', (['(X + 1)'], {}), '(X + 1)\n', (3336, 3343), True, 'import numpy as np\n'), ((3666, 3745), 'os.path.join', 'os.path.join', (['self.current_dir', '"""../../models/tests/feat_multitask_example.csv"""'], {}), "(self.current_dir, '../../models/tests/feat_multitask_example.csv')\n", (3678, 3745), False, 'import os\n'), ((4887, 4910), 'numpy.log', 'np.log', (['(y[:, tasks] + 1)'], {}), '(y[:, tasks] + 1)\n', (4893, 4910), True, 'import numpy as np\n'), ((5229, 5308), 'os.path.join', 'os.path.join', (['self.current_dir', '"""../../models/tests/feat_multitask_example.csv"""'], {}), "(self.current_dir, '../../models/tests/feat_multitask_example.csv')\n", (5241, 5308), False, 'import os\n'), ((6489, 6515), 'numpy.log', 'np.log', (['(X[:, features] + 1)'], {}), '(X[:, features] + 1)\n', (6495, 6515), True, 'import numpy as np\n'), ((15211, 15225), 'numpy.power', 'np.power', (['X', '(2)'], {}), '(X, 2)\n', (15219, 15225), True, 'import numpy as np\n'), ((15271, 15287), 'numpy.power', 'np.power', (['X', '(0.5)'], {}), '(X, 0.5)\n', (15279, 15287), True, 'import numpy as np\n'), ((16428, 16442), 'numpy.power', 'np.power', (['y', '(2)'], {}), '(y, 2)\n', (16436, 16442), True, 'import numpy as np\n'), ((16488, 16504), 'numpy.power', 'np.power', (['y', '(0.5)'], {}), '(y, 0.5)\n', (16496, 16504), True, 'import numpy as np\n'), ((20698, 20743), 'numpy.sum', 'np.sum', (['(X_test[0, :] * X)'], {'axis': '(1)', 'dtype': 'float'}), '(X_test[0, :] * X, axis=1, dtype=float)\n', (20704, 20743), True, 'import numpy as np\n'), ((21263, 21301), 'numpy.isclose', 'np.isclose', (['dataset_trans.X[0, 0]', '(1.0)'], {}), '(dataset_trans.X[0, 0], 1.0)\n', (21273, 21301), True, 'import numpy as np\n'), ((2244, 2315), 'deepchem.trans.NormalizationTransformer', 'dc.trans.NormalizationTransformer', ([], {'transform_X': '(True)', 'dataset': 'ul_dataset'}), '(transform_X=True, dataset=ul_dataset)\n', (2277, 2315), True, 'import deepchem as dc\n'), ((9111, 9134), 'numpy.isclose', 'np.isclose', (['orig_std', '(0)'], {}), '(orig_std, 0)\n', (9121, 9134), True, 'import numpy as np\n'), ((9151, 9169), 'numpy.isclose', 'np.isclose', (['std', '(1)'], {}), '(std, 1)\n', (9161, 9169), True, 'import numpy as np\n'), ((9577, 9604), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(1001)'], {}), '(0.0, 1.0, 1001)\n', (9588, 9604), True, 'import numpy as np\n'), ((9640, 9677), 'numpy.append', 'np.append', (['[target]', '[target]'], {'axis': '(0)'}), '([target], [target], axis=0)\n', (9649, 9677), True, 'import numpy as np\n'), ((10777, 10804), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(1001)'], {}), '(0.0, 1.0, 1001)\n', (10788, 10804), True, 'import numpy as np\n'), ((10840, 10877), 'numpy.append', 'np.append', (['[target]', '[target]'], {'axis': '(0)'}), '([target], [target], axis=0)\n', (10849, 10877), True, 'import numpy as np\n'), ((17984, 18023), 'numpy.zeros_like', 'np.zeros_like', (['w_task[w_orig_task == 0]'], {}), '(w_task[w_orig_task == 0])\n', (17997, 18023), True, 'import numpy as np\n'), ((18135, 18162), 'numpy.sum', 'np.sum', (['w_task[y_task == 0]'], {}), '(w_task[y_task == 0])\n', (18141, 18162), True, 'import numpy as np\n'), ((18164, 18191), 'numpy.sum', 'np.sum', (['w_task[y_task == 1]'], {}), '(w_task[y_task == 1])\n', (18170, 18191), True, 'import numpy as np\n'), ((19426, 19465), 'numpy.zeros_like', 'np.zeros_like', (['w_task[w_orig_task == 0]'], {}), '(w_task[w_orig_task == 0])\n', (19439, 19465), True, 'import numpy as np\n'), ((19577, 19604), 'numpy.sum', 'np.sum', (['w_task[y_task == 0]'], {}), '(w_task[y_task == 0])\n', (19583, 19604), True, 'import numpy as np\n'), ((19606, 19633), 'numpy.sum', 'np.sum', (['w_task[y_task == 1]'], {}), '(w_task[y_task == 1])\n', (19612, 19633), True, 'import numpy as np\n'), ((20775, 20800), 'numpy.sign', 'np.sign', (['(X_test[0, :] + X)'], {}), '(X_test[0, :] + X)\n', (20782, 20800), True, 'import numpy as np\n'), ((21563, 21604), 'deepchem.feat.CircularFingerprint', 'dc.feat.CircularFingerprint', ([], {'size': 'fp_size'}), '(size=fp_size)\n', (21590, 21604), True, 'import deepchem as dc\n'), ((1950, 2001), 'deepchem.trans.transformers.Transformer', 'dc.trans.transformers.Transformer', ([], {'transform_y': '(True)'}), '(transform_y=True)\n', (1983, 2001), True, 'import deepchem as dc\n'), ((2129, 2180), 'deepchem.trans.transformers.Transformer', 'dc.trans.transformers.Transformer', ([], {'transform_w': '(True)'}), '(transform_w=True)\n', (2162, 2180), True, 'import deepchem as dc\n'), ((3990, 4006), 'numpy.array', 'np.array', (['[tiid]'], {}), '([tiid])\n', (3998, 4006), True, 'import numpy as np\n'), ((5580, 5596), 'numpy.array', 'np.array', (['[fiid]'], {}), '([fiid])\n', (5588, 5596), True, 'import numpy as np\n'), ((8898, 8917), 'numpy.zeros_like', 'np.zeros_like', (['mean'], {}), '(mean)\n', (8911, 8917), True, 'import numpy as np\n')] |
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ie_serving.models.local_model import LocalModel
from ie_serving.models.model_version_status import ModelVersionStatus
from ie_serving.server.rest_service import create_rest_api
from tensorflow_serving.apis import prediction_service_pb2, \
get_model_status_pb2, model_service_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import get_model_metadata_pb2
from ie_serving.server.service import PredictionServiceServicer, \
ModelServiceServicer
from ie_serving.models.ir_engine import IrEngine
from tensorflow.contrib.util import make_tensor_proto
from falcon import testing
import grpc_testing
import numpy as np
import pytest
from config import DEFAULT_INPUT_KEY, DEFAULT_OUTPUT_KEY
PREDICT_SERVICE = prediction_service_pb2. \
DESCRIPTOR.services_by_name['PredictionService']
MODEL_SERVICE = model_service_pb2. \
DESCRIPTOR.services_by_name['ModelService']
class Layer:
def __init__(self, precision, shape, layout):
self.precision = precision
self.shape = shape
self.layout = layout
@pytest.fixture
def get_fake_model():
model_xml = 'model1.xml'
model_bin = 'model1.bin'
mapping_config = 'mapping_config.json'
exec_net = None
net = None
plugin = None
batch_size = None
input_key = DEFAULT_INPUT_KEY
output_key = DEFAULT_OUTPUT_KEY
inputs = {input_key: Layer('FP32', [1, 1, 1], 'NCHW')}
outputs = {output_key: Layer('FP32', [1, 1, 1], 'NCHW')}
engine = IrEngine(model_bin=model_bin, model_xml=model_xml,
mapping_config=mapping_config, exec_net=exec_net,
inputs=inputs, outputs=outputs, net=net, plugin=plugin,
batch_size=batch_size)
new_engines = {1: engine, 2: engine, 3: engine}
available_versions = [1, 2, 3]
model_name = "test"
versions_statuses = {}
for version in available_versions:
versions_statuses[version] = ModelVersionStatus(model_name, version)
new_model = LocalModel(model_name=model_name,
model_directory='fake_path/model/',
available_versions=[1, 2, 3], engines=new_engines,
batch_size=batch_size,
version_policy_filter=lambda versions: versions[:],
versions_statuses=versions_statuses)
return new_model
@pytest.fixture
def get_fake_ir_engine():
model_xml = 'model1.xml'
model_bin = 'model1.bin'
mapping_config = 'mapping_config.json'
exec_net = None
net = None
batch_size = None
plugin = None
input_key = DEFAULT_INPUT_KEY
output_key = DEFAULT_OUTPUT_KEY
inputs = {input_key: Layer('FP32', [1, 1, 1], 'NCHW')}
outputs = {output_key: Layer('FP32', [1, 1, 1], 'NCHW')}
engine = IrEngine(model_bin=model_bin, model_xml=model_xml,
mapping_config=mapping_config, exec_net=exec_net,
inputs=inputs, outputs=outputs, net=net, plugin=plugin,
batch_size=batch_size)
return engine
@pytest.fixture
def get_grpc_service_for_predict(get_fake_model):
_real_time = grpc_testing.strict_real_time()
servicer = PredictionServiceServicer(models={'test': get_fake_model})
descriptors_to_servicers = {
PREDICT_SERVICE: servicer
}
_real_time_server = grpc_testing.server_from_dictionary(
descriptors_to_servicers, _real_time)
return _real_time_server
@pytest.fixture
def get_grpc_service_for_model_status(get_fake_model):
_real_time = grpc_testing.strict_real_time()
servicer = ModelServiceServicer(models={'test': get_fake_model})
descriptors_to_servicers = {
MODEL_SERVICE: servicer
}
_real_time_server = grpc_testing.server_from_dictionary(
descriptors_to_servicers, _real_time)
return _real_time_server
def get_fake_request(model_name, data_shape, input_blob, version=None):
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
if version is not None:
request.model_spec.version.value = version
data = np.ones(shape=data_shape)
request.inputs[input_blob].CopyFrom(
make_tensor_proto(data, shape=data.shape))
return request
def get_fake_model_metadata_request(model_name, metadata_field, version=None):
request = get_model_metadata_pb2.GetModelMetadataRequest()
request.model_spec.name = model_name
if version is not None:
request.model_spec.version.value = version
request.metadata_field.append(metadata_field)
return request
def get_fake_model_status_request(model_name, version=None):
request = get_model_status_pb2.GetModelStatusRequest()
request.model_spec.name = model_name
if version is not None:
request.model_spec.version.value = version
return request
@pytest.fixture()
def client(get_fake_model):
rest_api = create_rest_api(models={"test": get_fake_model})
return testing.TestClient(rest_api)
| [
"falcon.testing.TestClient",
"ie_serving.models.local_model.LocalModel",
"grpc_testing.server_from_dictionary",
"ie_serving.server.service.ModelServiceServicer",
"numpy.ones",
"tensorflow_serving.apis.get_model_metadata_pb2.GetModelMetadataRequest",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
... | [((5606, 5622), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (5620, 5622), False, 'import pytest\n'), ((2137, 2326), 'ie_serving.models.ir_engine.IrEngine', 'IrEngine', ([], {'model_bin': 'model_bin', 'model_xml': 'model_xml', 'mapping_config': 'mapping_config', 'exec_net': 'exec_net', 'inputs': 'inputs', 'outputs': 'outputs', 'net': 'net', 'plugin': 'plugin', 'batch_size': 'batch_size'}), '(model_bin=model_bin, model_xml=model_xml, mapping_config=\n mapping_config, exec_net=exec_net, inputs=inputs, outputs=outputs, net=\n net, plugin=plugin, batch_size=batch_size)\n', (2145, 2326), False, 'from ie_serving.models.ir_engine import IrEngine\n'), ((2663, 2908), 'ie_serving.models.local_model.LocalModel', 'LocalModel', ([], {'model_name': 'model_name', 'model_directory': '"""fake_path/model/"""', 'available_versions': '[1, 2, 3]', 'engines': 'new_engines', 'batch_size': 'batch_size', 'version_policy_filter': '(lambda versions: versions[:])', 'versions_statuses': 'versions_statuses'}), "(model_name=model_name, model_directory='fake_path/model/',\n available_versions=[1, 2, 3], engines=new_engines, batch_size=\n batch_size, version_policy_filter=lambda versions: versions[:],\n versions_statuses=versions_statuses)\n", (2673, 2908), False, 'from ie_serving.models.local_model import LocalModel\n'), ((3497, 3686), 'ie_serving.models.ir_engine.IrEngine', 'IrEngine', ([], {'model_bin': 'model_bin', 'model_xml': 'model_xml', 'mapping_config': 'mapping_config', 'exec_net': 'exec_net', 'inputs': 'inputs', 'outputs': 'outputs', 'net': 'net', 'plugin': 'plugin', 'batch_size': 'batch_size'}), '(model_bin=model_bin, model_xml=model_xml, mapping_config=\n mapping_config, exec_net=exec_net, inputs=inputs, outputs=outputs, net=\n net, plugin=plugin, batch_size=batch_size)\n', (3505, 3686), False, 'from ie_serving.models.ir_engine import IrEngine\n'), ((3857, 3888), 'grpc_testing.strict_real_time', 'grpc_testing.strict_real_time', ([], {}), '()\n', (3886, 3888), False, 'import grpc_testing\n'), ((3905, 3963), 'ie_serving.server.service.PredictionServiceServicer', 'PredictionServiceServicer', ([], {'models': "{'test': get_fake_model}"}), "(models={'test': get_fake_model})\n", (3930, 3963), False, 'from ie_serving.server.service import PredictionServiceServicer, ModelServiceServicer\n'), ((4065, 4138), 'grpc_testing.server_from_dictionary', 'grpc_testing.server_from_dictionary', (['descriptors_to_servicers', '_real_time'], {}), '(descriptors_to_servicers, _real_time)\n', (4100, 4138), False, 'import grpc_testing\n'), ((4276, 4307), 'grpc_testing.strict_real_time', 'grpc_testing.strict_real_time', ([], {}), '()\n', (4305, 4307), False, 'import grpc_testing\n'), ((4324, 4377), 'ie_serving.server.service.ModelServiceServicer', 'ModelServiceServicer', ([], {'models': "{'test': get_fake_model}"}), "(models={'test': get_fake_model})\n", (4344, 4377), False, 'from ie_serving.server.service import PredictionServiceServicer, ModelServiceServicer\n'), ((4477, 4550), 'grpc_testing.server_from_dictionary', 'grpc_testing.server_from_dictionary', (['descriptors_to_servicers', '_real_time'], {}), '(descriptors_to_servicers, _real_time)\n', (4512, 4550), False, 'import grpc_testing\n'), ((4685, 4713), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (4711, 4713), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((4849, 4874), 'numpy.ones', 'np.ones', ([], {'shape': 'data_shape'}), '(shape=data_shape)\n', (4856, 4874), True, 'import numpy as np\n'), ((5088, 5136), 'tensorflow_serving.apis.get_model_metadata_pb2.GetModelMetadataRequest', 'get_model_metadata_pb2.GetModelMetadataRequest', ([], {}), '()\n', (5134, 5136), False, 'from tensorflow_serving.apis import get_model_metadata_pb2\n'), ((5412, 5456), 'tensorflow_serving.apis.get_model_status_pb2.GetModelStatusRequest', 'get_model_status_pb2.GetModelStatusRequest', ([], {}), '()\n', (5454, 5456), False, 'from tensorflow_serving.apis import prediction_service_pb2, get_model_status_pb2, model_service_pb2\n'), ((5668, 5716), 'ie_serving.server.rest_service.create_rest_api', 'create_rest_api', ([], {'models': "{'test': get_fake_model}"}), "(models={'test': get_fake_model})\n", (5683, 5716), False, 'from ie_serving.server.rest_service import create_rest_api\n'), ((5729, 5757), 'falcon.testing.TestClient', 'testing.TestClient', (['rest_api'], {}), '(rest_api)\n', (5747, 5757), False, 'from falcon import testing\n'), ((2606, 2645), 'ie_serving.models.model_version_status.ModelVersionStatus', 'ModelVersionStatus', (['model_name', 'version'], {}), '(model_name, version)\n', (2624, 2645), False, 'from ie_serving.models.model_version_status import ModelVersionStatus\n'), ((4926, 4967), 'tensorflow.contrib.util.make_tensor_proto', 'make_tensor_proto', (['data'], {'shape': 'data.shape'}), '(data, shape=data.shape)\n', (4943, 4967), False, 'from tensorflow.contrib.util import make_tensor_proto\n')] |
from functools import partial
from multiprocessing.dummy import Pool
from scipy.io import wavfile
import argparse
from glob import glob
from parse import parse
import os, sys
import numpy as np
def gen_noise(n, type, sigma, noise_folder="../../data/NoiseDB/NoiseX_16kHz"):
"""Generate noise of a certain type and std."""
if type == "white":
noise_filename = os.path.join(noise_folder, "{}_16kHz.wav".format(type))
_, loaded_noise = wavfile.read(noise_filename)
try:
assert(n < loaded_noise.shape[0])
except AssertionError as e:
print("Noise file: {} is too short.".format(noise_filename), file=sys.stderr)
# Find a random section in file.
istart = np.random.randint(loaded_noise.shape[0] - n)
raw_noise = loaded_noise[istart:istart+n]
else:
print("Unknown {} noise".format(type), file=sys.stderr)
raw_noise = 0
return raw_noise / raw_noise.std() * sigma
def new_filename(file, ntype, snr):
"""Append noise tyep and power at the end of wav filename."""
return file.replace(".WAV", ".WAV.{}.{}dB".format(ntype, snr))
def corrupt_data(s, ntype, snr):
"""Corrupt a signal with a particular noise."""
s_std = np.std(s)
n_std = 10 ** (- snr / 20) * s_std
n = gen_noise(s.shape[0], ntype, n_std)
sn = (s + n).astype(s.dtype)
return sn
def corrupt_wav(file, ntype=None, snr=None):
"""Corrupt a wav file with noise and write to a new file."""
rate, s = wavfile.read(file)
sn = corrupt_data(s, ntype, snr)
wavfile.write(new_filename(file, ntype, snr), rate, sn)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Add a particular noise type to WAV files.")
parser.add_argument('-timit', metavar="<Timit location>", type=str)
parser.add_argument('-opt', metavar="<Signal to Noise Ratio (dB)>", type=str)
parser.add_argument('-j', metavar="<Number of jobs (default: numcpu)>",
type=int, default=os.cpu_count())
args = parser.parse_args()
try:
dset, ntype, snr = parse("{}.{}.{:d}dB", args.opt)
except TypeError as e:
print("No noise to be added with option: {}.\nExit.".format(args.opt),file=sys.stderr)
sys.exit(0)
if dset == "test":
wavs = glob(os.path.join(args.timit, "TEST", "**" , "*.WAV"), recursive=True)
f = partial(corrupt_wav, ntype=ntype, snr=snr)
with Pool(args.j) as pool:
pool.map(f, wavs)
sys.exit(0) | [
"parse.parse",
"argparse.ArgumentParser",
"numpy.std",
"os.path.join",
"numpy.random.randint",
"scipy.io.wavfile.read",
"functools.partial",
"os.cpu_count",
"sys.exit",
"multiprocessing.dummy.Pool"
] | [((1244, 1253), 'numpy.std', 'np.std', (['s'], {}), '(s)\n', (1250, 1253), True, 'import numpy as np\n'), ((1510, 1528), 'scipy.io.wavfile.read', 'wavfile.read', (['file'], {}), '(file)\n', (1522, 1528), False, 'from scipy.io import wavfile\n'), ((1681, 1766), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Add a particular noise type to WAV files."""'}), "(description='Add a particular noise type to WAV files.'\n )\n", (1704, 1766), False, 'import argparse\n'), ((2528, 2539), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2536, 2539), False, 'import os, sys\n'), ((460, 488), 'scipy.io.wavfile.read', 'wavfile.read', (['noise_filename'], {}), '(noise_filename)\n', (472, 488), False, 'from scipy.io import wavfile\n'), ((734, 778), 'numpy.random.randint', 'np.random.randint', (['(loaded_noise.shape[0] - n)'], {}), '(loaded_noise.shape[0] - n)\n', (751, 778), True, 'import numpy as np\n'), ((2119, 2150), 'parse.parse', 'parse', (['"""{}.{}.{:d}dB"""', 'args.opt'], {}), "('{}.{}.{:d}dB', args.opt)\n", (2124, 2150), False, 'from parse import parse\n'), ((2415, 2457), 'functools.partial', 'partial', (['corrupt_wav'], {'ntype': 'ntype', 'snr': 'snr'}), '(corrupt_wav, ntype=ntype, snr=snr)\n', (2422, 2457), False, 'from functools import partial\n'), ((2035, 2049), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2047, 2049), False, 'import os, sys\n'), ((2281, 2292), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2289, 2292), False, 'import os, sys\n'), ((2337, 2384), 'os.path.join', 'os.path.join', (['args.timit', '"""TEST"""', '"""**"""', '"""*.WAV"""'], {}), "(args.timit, 'TEST', '**', '*.WAV')\n", (2349, 2384), False, 'import os, sys\n'), ((2471, 2483), 'multiprocessing.dummy.Pool', 'Pool', (['args.j'], {}), '(args.j)\n', (2475, 2483), False, 'from multiprocessing.dummy import Pool\n')] |
#!/usr/bin/env python
"""Code from the paper "A signature-based machine
learning model for bipolar disorder and borderline
personality disorder".
Given a participant of the study, trains the model
using all other participants from the cohort
in order to test the model with this participant
then. This provides three non-negative numbers
(p_1, p_2, p_3) with p_1 + p_2 + p_3 = 1, where p_i
indicates the number of 20-observations buckets that
was classified as belonging to the clinical group i.
This is done for all participants of the cohort,
in order to plot a heat map on a triangle then.
"""
import argparse
import datetime
import numpy as np
import scipy
from sklearn.ensemble import RandomForestRegressor
import seaborn as sns
from esig import tosig
import matplotlib.pyplot as plt
import math
import os
import pickle
import random
import shutil
from tqdm import tqdm
import psychiatry
from logger import Logger
__author__ = "<NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class Model:
def __init__(self, reg):
"""The signature-based machine learning model introduced
in the original paper.
Parameters
----------
reg : RandomForestRegressor
The trained random forest regressor.
"""
self.reg=reg
def test(self, path, order=2, is_sig=False):
"""Tests the model against a particular participant.
Parameters
----------
path : str
Path of the pickle file containing the streams
of data from the participant.
order : int, optional
Order of the signature.
Default is 2.
is_sig : bool, optional
Whether the test set files contain signatures.
Default is false, in which case conversion to signatures will be carried out here.
Returns
-------
list
3-dimensional vector indicating how often the participant
has buckets that were classified in each clinical group.
"""
# We load the pickle file of the participant
file = open(path,'rb')
collection = pickle.load(file)
file.close()
# Each clinical group is assigned a point
# on the plane, which was found using cross-validation.
threshold = np.array([[1, 0], # Borderline participants
[0, 1], # Healthy participants
[-1/np.sqrt(2), -1/np.sqrt(2)]]) # Bipolar participants
# We construct the inputs and outputs to test the model
x=[]
y=[]
for X in collection:
# The input is the signature of the normalised path
if is_sig:
# If using synthetic data, the input is already a signature
x.append(X.data)
else:
# If using the original data, we convert the normalised path into the signature here
x.append(tosig.stream2sig(np.array(X.data), order))
# The function f returns the point for the corresponding
# clinical group
y.append(threshold[X.diagnosis])
# We find the predictions corresponding to the computed inputs
predicted = self.reg.predict(x)
# We find which group the predictions belong to, and
# store how often the participant belongs to each group
vector = np.zeros(3)
for i in range(len(x)):
threshold2 = [tuple(l) for l in threshold.tolist()]
vector[threshold2.index(tuple(_findMin(predicted[i], threshold)))] += 1
vector /= float(len(x))
return vector
def _findMin(p, A):
"""Given a point p and a list of points A, returns the point
in A closest to p.
Parameters
----------
p : array
Point on the plane.
A : list
List of points on the plane.
Returns
-------
tuple
Point in A closest to p in the Euclidean metric.
"""
m=(-1, (0,0))
for p0 in A:
dist = np.linalg.norm(p0-np.array(p))
if m[0]==-1 or m[0]>dist:
m = (dist, p0)
return tuple(m[1])
def getCategory(id):
"""Finds the clinical group a given participant belongs to.
Parameters
----------
id : int
ID of the participant.
Returns
-------
str
Clinical group that the participant with the given
ID belongs to.
"""
file = open("data/"+str(id)+"/os.obj",'rb')
collection = pickle.load(file)
file.close()
categories = ["borderline", "healthy", "bipolar"]
return categories[collection[0].diagnosis]
def train(path, order=2, is_sig=False):
"""Trains the model, as specified in the original paper.
Parameters
----------
path : str
Path of the training set.
order : int, optional
Order of the signature.
Default is 2.
is_sig : bool, optional
Whether the test set files contain signatures.
Default is false, in which case conversion to signatures will be carried out here.
Returns
-------
Model
Trained model.
"""
file = open(path,'rb')
collection = pickle.load(file)
file.close()
# Each clinical group is associated with a point on the
# plane. These points were found using cross-validation.
threshold = np.array([[1, 0], # Point for borderline participants
[0, 1], # Point for healthy participants
[-1/np.sqrt(2), -1/np.sqrt(2)]]) # Point for bipolar participants
# x will contain the inputs of the model, while y
# will contain the outputs.
x = []
y = []
for participant in collection:
# The input will be the signature of the stream of
# the participant.
if is_sig:
# If using synthetic data, the data is already stored as a signature
x.append(participant.data)
else:
# If using the original data, we convert the normalised path into the signature here
x.append(tosig.stream2sig(np.array(participant.data), order))
# The output, on the other hand, will be te point
# on the plane corresponding to the clinical group
# of the participant.
y.append(threshold[participant.diagnosis])
# We train the model using Random Forests.
reg = RandomForestRegressor(n_estimators=100)
reg.fit(x, y)
# Return the trained model.
return Model(reg)
def plotDensityMap(scores, plot_name):
"""Plots, given a set of scores, the density map on a triangle.
Parameters
----------
scores : list
List of scores, where each score is a 3-dimensional list.
plot_name : string
Name given to the saved plot
"""
TRIANGLE = np.array([[math.cos(math.pi*0.5), math.sin(math.pi*0.5)],
[math.cos(math.pi*1.166), math.sin(math.pi*1.166)],
[math.cos(math.pi*1.833), math.sin(math.pi*1.833)]])
pointsX = [score.dot(TRIANGLE)[0] for score in scores]
pointsY = [score.dot(TRIANGLE)[1] for score in scores]
vertices = []
vertices.append(np.array([1,0,0]).dot(TRIANGLE))
vertices.append(np.array([0,1,0]).dot(TRIANGLE))
vertices.append(np.array([0,0,1]).dot(TRIANGLE))
for i in range(3):
p1 = vertices[i]
if i == 2:
p2 = vertices[0]
else:
p2 = vertices[i+1]
c = 0.5 * (p1 + p2)
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], color='k', linestyle='-', linewidth=2)
plt.plot([0, c[0]], [0, c[1]], color='k', linestyle='-', linewidth=1)
ax = plt.gca()
ax.set_xlim([-1.2, 1.32])
ax.set_ylim([-0.7,1.3])
ax.text(0.8, -0.6, 'Bipolar')
ax.text(-1.1, -0.6, 'Healthy')
ax.text(-0.15, 1.05, 'Borderline')
data = [[pointsX[i], pointsY[i]] for i in range(len(pointsX))]
H, _, _=np.histogram2d(pointsX,pointsY,bins=40,normed=True)
norm=H.sum()
contour1=0.75
target1=norm*contour1
def objective(limit, target):
w = np.where(H>limit)
count = H[w]
return count.sum() - target
level1 = scipy.optimize.bisect(objective, H.min(), H.max(), args=(target1,))
levels = [level1]
data = np.array(data)
sns.kdeplot(np.array(pointsX), np.array(pointsY), shade=True, ax=ax)
sns.kdeplot(np.array(pointsX), np.array(pointsY), n_levels=3, ax=ax, cmap="Reds")
file_name = plot_name + datetime.datetime.now().strftime("-%Y-%m-%d-%H:%M") + ".png"
plt.savefig(file_name)
plt.clf()
if os.path.isfile(file_name):
logger.log("Saved plot as {}".format(file_name))
else:
logger.log("Could not save file {}".format(file_name))
def export(l, i, data_prepared=False, is_test=False):
"""Saves as a pickle file the training or testing sets.
Parameters
----------
l : list
List of participants that should be exported. If the
length of the list is 1, the set is the out-of-sample
set. Otherwise, it is the training set.
i : int
A random ID that will be used to export the file.
"""
size=20 # Number of observations of each stream of data
if not os.path.exists("data/"+str(i)):
os.makedirs("data/"+str(i))
if len(l)==1 or (data_prepared and is_test):
# We want to export a single participant for
# testing.
setType="os"
else:
# We want to export the rest of the cohort
# for training.
setType="ts"
dataset=[]
# For each participant and each bucket of appropriate size,
# add the stream of data to dataset.
if data_prepared:
dataset = l
else:
for participant in l:
for v in range(0, len(participant.data)-size, size):
p = psychiatry.Participant(participant.data[v:v+size],
participant.idNumber,
participant.diagnosis,
participant.data[v+size])
dataset.append(psychiatry.normalise(p))
# Export the dataset.
filehandler = open("data/"+str(i)+"/"+setType+".obj","wb")
pickle.dump(dataset,filehandler)
filehandler.close()
def get_folders(a_dir):
"""Finds all folders in a directory.
Parameters
----------
a_dir : str
Directory path.
Returns
-------
list of str
List of all folders in the directory.
"""
return [name for name in sorted(os.listdir(a_dir))
if os.path.isdir(os.path.join(a_dir, name))]
def load_and_export_cohort():
""" Loads cohort data and exports two files for each participant into a folder with a random ID.
One file contains the test data, i.e. the normalised buckets of mood score data from that patient, and the second
contains the training data, i.e. the normalised buckets of mood score data from all other patients.
"""
# We load all participants in the study
print("Loading cohort...")
cohort = psychiatry.loadParticipants("../data")
# Number of observations of each stream of data
size = 20
# Only consider participants that provided at least 5 buckets of data
valid_participants = [participant for participant in cohort if len(participant.data)>5*size]
print("Exporting participants...")
for ref_participant in tqdm(valid_participants):
# Use participant for testing
test_participant=[ref_participant]
# Use the remaining participants for training.
train_participants=[participant for participant in cohort if participant!=ref_participant]
# Check that ref_participant is not in train_participants
assert ref_participant not in train_participants
# Save the testing and training sets as a file
random_id = random.randint(0, 1e8)
export(test_participant, random_id)
export(train_participants, random_id)
def load_and_export_synthetic_cohort(cohort):
""" Loads cohort synthetic data and exports two files for each 'participant' into a folder with a random ID.
The synthetic data does not correspond to specific participants, so here we choose to define a 'participant' as
a group of synthetic signatures, with the group size set by the variable buckets_per_participant.
One file contains the test data, i.e. the synthetic signatures from that 'participant', and the second contains the
training data, i.e. the synthetic signatures from all other 'participants'.
Parameters
----------
cohort: int
ID of the synthetic cohort to be analysed
"""
# Load all synthetic signatures and diagnoses
signatures = np.genfromtxt(os.path.join("synthetic-data",
"cohort_" + str(cohort) + "_sigs.pickle"), delimiter=',')
diagnoses = np.genfromtxt(os.path.join("synthetic-data",
"cohort_" + str(cohort) + "_diagnosis.pickle"), delimiter=',')
# We don't have distinguishable participants in the synthetic dataset, so we'll consider groups of seven signatures
# as having come from each participant for now
buckets_per_participant = 7
# Work out how many buckets of data we have for each diagnosis, and how many "patients" we can generate
diag_ids, diag_counts = np.unique(diagnoses, return_counts=True)
bucket_counts = [int(c) for c in diag_counts]
patient_counts = [int(c/buckets_per_participant) for c in bucket_counts]
# Create list of participants. We take all signatures associated with a particular diagnosis,
# then construct participants by taking seven of these signatures at a time (a couple remain unused)
participants = []
for d_ind, d in enumerate(diag_ids):
signatures_d = signatures[diagnoses == d]
print("Diagnosis {}: {} buckets of data available to create {} patients".format(d, bucket_counts[d_ind],
patient_counts[d_ind]))
single_participant = []
for s_ind, s in enumerate(signatures_d):
p_id = sum(diag_counts[:d_ind]/buckets_per_participant) + s_ind/buckets_per_participant - 1
if len(single_participant) < buckets_per_participant:
single_participant.append(psychiatry.Participant(s, p_id, int(d+1), None))
else:
participants.extend(single_participant)
single_participant = [psychiatry.Participant(s, p_id, int(d+1), None)]
print("{} buckets of data were not used".format(len(single_participant)))
# Check that we put the correct number of unique buckets into "Participant" form
assert(len(participants) == sum(patient_counts)*buckets_per_participant)
# Export train and test sets for each participant
for id in range(0, sum(patient_counts)-1, 1):
random_id = random.randint(0, 1e8)
test_participant = [p for p in participants if p.idNumber == id]
train_participants = [p for p in participants if p.idNumber != id]
export(train_participants, random_id, data_prepared=True, is_test=False)
export(test_participant, random_id, data_prepared=True, is_test=True)
if __name__ == "__main__":
"""Plots a triangle and the density map of the proportion
of periods of time participants with a specific diagnosis
spend in each clinical category.
"""
"""
Step 0
Set up command line argument parsers
--seed (optional): Sets the random seed; default is the original value used in this script.
--synth (optional): If not specified at all, mood score data is loaded.
If --synth is given without a value, cohort 772192 (synthetic signatures) is loaded.
If a value for --synth is given, the specified cohort of synthetic signatures is loaded.
"""
parser = argparse.ArgumentParser(
description="Plot the time participants spend in each clinical category (healthy, borderline and bipolar)")
parser.add_argument("--seed", type=int, default=1,
help="seed for the random number generators (int, default=1)")
parser.add_argument("--synth", nargs="?", type=int, const=772192,
help="ID of cohort of synthetic mood score signatures, if they are to be used (int, default=772192 if --synth \
alone is provided, or None (i.e. load original mood score data) if not)")
args = parser.parse_args()
logger = Logger("heat_map")
# Set the random seeds and report
random.seed(args.seed)
np.random.seed(args.seed)
logger.log("Random seed has been set to {}\n".format(args.seed))
# Clean up the data folder (only delete files + folders produced by earlier runs of heat_map.py)
if os.path.exists("data/"):
folders = get_folders("data/")
for folder in folders:
contents = os.listdir(os.path.join("data/", folder))
if folder.isdigit() and "ts.obj" in contents and "os.obj" in contents:
shutil.rmtree(os.path.join("data/", folder))
"""
Step 1
Load the cohort. Then, take each participant
and create a testing set (using the participant)
and a training set (using the rest of the
cohort) and save it as a file.
"""
if args.synth is None:
use_synth_sig = False
logger.log("Preparing to load mood score data...")
load_and_export_cohort()
logger.log("Loaded and exported cohort\n")
else:
use_synth_sig = True
logger.log("Preparing to load synthetic signatures from cohort {}...".format(args.synth))
load_and_export_synthetic_cohort(args.synth)
logger.log("Loaded and exported synthetic cohort {}\n".format(args.synth))
"""
Step 2
For each participant in the clinical group we are interested in,
test the model with data from this participant. The model is
trained using the remaining participants in the cohort.
"""
folders = get_folders("data/")
scores = []
logger.log("Calculating points...")
for folder in tqdm(folders):
# Train the model
model=train("data/"+folder+"/ts.obj", is_sig=use_synth_sig)
# Test the model
score=model.test("data/"+folder+"/os.obj", is_sig=use_synth_sig)
# Save the score
scores.append((folder, score))
"""
Step 3
Assign each score to the corresponding clinical group
"""
trianglePoints={
"bipolar": [],
"healthy": [],
"borderline": []
}
logger.log("Assigning scores...")
for id, score in scores:
category = getCategory(id)
trianglePoints[category].append(score)
"""
Step 4
Plot the triangle and the density map.
"""
logger.log("Generating plots...")
plotDensityMap(trianglePoints["bipolar"], "bipolar-heatmap")
plotDensityMap(trianglePoints["healthy"], "healthy-heatmap")
plotDensityMap(trianglePoints["borderline"], "borderline-heatmap")
| [
"numpy.sqrt",
"math.cos",
"numpy.array",
"psychiatry.loadParticipants",
"os.path.exists",
"sklearn.ensemble.RandomForestRegressor",
"os.listdir",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"numpy.histogram2d",
"random.randint",
"matplotlib.pypl... | [((4940, 4957), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4951, 4957), False, 'import pickle\n'), ((5659, 5676), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5670, 5676), False, 'import pickle\n'), ((6898, 6937), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (6919, 6937), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((8221, 8230), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8228, 8230), True, 'import matplotlib.pyplot as plt\n'), ((8491, 8545), 'numpy.histogram2d', 'np.histogram2d', (['pointsX', 'pointsY'], {'bins': '(40)', 'normed': '(True)'}), '(pointsX, pointsY, bins=40, normed=True)\n', (8505, 8545), True, 'import numpy as np\n'), ((8853, 8867), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (8861, 8867), True, 'import numpy as np\n'), ((9128, 9150), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (9139, 9150), True, 'import matplotlib.pyplot as plt\n'), ((9156, 9165), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9163, 9165), True, 'import matplotlib.pyplot as plt\n'), ((9174, 9199), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (9188, 9199), False, 'import os\n'), ((10911, 10944), 'pickle.dump', 'pickle.dump', (['dataset', 'filehandler'], {}), '(dataset, filehandler)\n', (10922, 10944), False, 'import pickle\n'), ((11796, 11834), 'psychiatry.loadParticipants', 'psychiatry.loadParticipants', (['"""../data"""'], {}), "('../data')\n", (11823, 11834), False, 'import psychiatry\n'), ((12150, 12174), 'tqdm.tqdm', 'tqdm', (['valid_participants'], {}), '(valid_participants)\n', (12154, 12174), False, 'from tqdm import tqdm\n'), ((14177, 14217), 'numpy.unique', 'np.unique', (['diagnoses'], {'return_counts': '(True)'}), '(diagnoses, return_counts=True)\n', (14186, 14217), True, 'import numpy as np\n'), ((16823, 16964), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot the time participants spend in each clinical category (healthy, borderline and bipolar)"""'}), "(description=\n 'Plot the time participants spend in each clinical category (healthy, borderline and bipolar)'\n )\n", (16846, 16964), False, 'import argparse\n'), ((17422, 17440), 'logger.Logger', 'Logger', (['"""heat_map"""'], {}), "('heat_map')\n", (17428, 17440), False, 'from logger import Logger\n'), ((17487, 17509), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (17498, 17509), False, 'import random\n'), ((17515, 17540), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (17529, 17540), True, 'import numpy as np\n'), ((17724, 17747), 'os.path.exists', 'os.path.exists', (['"""data/"""'], {}), "('data/')\n", (17738, 17747), False, 'import os\n'), ((19097, 19110), 'tqdm.tqdm', 'tqdm', (['folders'], {}), '(folders)\n', (19101, 19110), False, 'from tqdm import tqdm\n'), ((2350, 2367), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2361, 2367), False, 'import pickle\n'), ((3745, 3756), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3753, 3756), True, 'import numpy as np\n'), ((8046, 8125), 'matplotlib.pyplot.plot', 'plt.plot', (['[p1[0], p2[0]]', '[p1[1], p2[1]]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(2)'}), "([p1[0], p2[0]], [p1[1], p2[1]], color='k', linestyle='-', linewidth=2)\n", (8054, 8125), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8204), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, c[0]]', '[0, c[1]]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "([0, c[0]], [0, c[1]], color='k', linestyle='-', linewidth=1)\n", (8143, 8204), True, 'import matplotlib.pyplot as plt\n'), ((8655, 8674), 'numpy.where', 'np.where', (['(H > limit)'], {}), '(H > limit)\n', (8663, 8674), True, 'import numpy as np\n'), ((8887, 8904), 'numpy.array', 'np.array', (['pointsX'], {}), '(pointsX)\n', (8895, 8904), True, 'import numpy as np\n'), ((8906, 8923), 'numpy.array', 'np.array', (['pointsY'], {}), '(pointsY)\n', (8914, 8923), True, 'import numpy as np\n'), ((8961, 8978), 'numpy.array', 'np.array', (['pointsX'], {}), '(pointsX)\n', (8969, 8978), True, 'import numpy as np\n'), ((8980, 8997), 'numpy.array', 'np.array', (['pointsY'], {}), '(pointsY)\n', (8988, 8997), True, 'import numpy as np\n'), ((12623, 12653), 'random.randint', 'random.randint', (['(0)', '(100000000.0)'], {}), '(0, 100000000.0)\n', (12637, 12653), False, 'import random\n'), ((15794, 15824), 'random.randint', 'random.randint', (['(0)', '(100000000.0)'], {}), '(0, 100000000.0)\n', (15808, 15824), False, 'import random\n'), ((4442, 4453), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (4450, 4453), True, 'import numpy as np\n'), ((7352, 7375), 'math.cos', 'math.cos', (['(math.pi * 0.5)'], {}), '(math.pi * 0.5)\n', (7360, 7375), False, 'import math\n'), ((7375, 7398), 'math.sin', 'math.sin', (['(math.pi * 0.5)'], {}), '(math.pi * 0.5)\n', (7383, 7398), False, 'import math\n'), ((7425, 7450), 'math.cos', 'math.cos', (['(math.pi * 1.166)'], {}), '(math.pi * 1.166)\n', (7433, 7450), False, 'import math\n'), ((7450, 7475), 'math.sin', 'math.sin', (['(math.pi * 1.166)'], {}), '(math.pi * 1.166)\n', (7458, 7475), False, 'import math\n'), ((7502, 7527), 'math.cos', 'math.cos', (['(math.pi * 1.833)'], {}), '(math.pi * 1.833)\n', (7510, 7527), False, 'import math\n'), ((7527, 7552), 'math.sin', 'math.sin', (['(math.pi * 1.833)'], {}), '(math.pi * 1.833)\n', (7535, 7552), False, 'import math\n'), ((7720, 7739), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (7728, 7739), True, 'import numpy as np\n'), ((7774, 7793), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (7782, 7793), True, 'import numpy as np\n'), ((7828, 7847), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (7836, 7847), True, 'import numpy as np\n'), ((10499, 10628), 'psychiatry.Participant', 'psychiatry.Participant', (['participant.data[v:v + size]', 'participant.idNumber', 'participant.diagnosis', 'participant.data[v + size]'], {}), '(participant.data[v:v + size], participant.idNumber,\n participant.diagnosis, participant.data[v + size])\n', (10521, 10628), False, 'import psychiatry\n'), ((11256, 11273), 'os.listdir', 'os.listdir', (['a_dir'], {}), '(a_dir)\n', (11266, 11273), False, 'import os\n'), ((11305, 11330), 'os.path.join', 'os.path.join', (['a_dir', 'name'], {}), '(a_dir, name)\n', (11317, 11330), False, 'import os\n'), ((17856, 17885), 'os.path.join', 'os.path.join', (['"""data/"""', 'folder'], {}), "('data/', folder)\n", (17868, 17885), False, 'import os\n'), ((5993, 6003), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6000, 6003), True, 'import numpy as np\n'), ((6008, 6018), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6015, 6018), True, 'import numpy as np\n'), ((6595, 6621), 'numpy.array', 'np.array', (['participant.data'], {}), '(participant.data)\n', (6603, 6621), True, 'import numpy as np\n'), ((9062, 9085), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9083, 9085), False, 'import datetime\n'), ((10788, 10811), 'psychiatry.normalise', 'psychiatry.normalise', (['p'], {}), '(p)\n', (10808, 10811), False, 'import psychiatry\n'), ((18002, 18031), 'os.path.join', 'os.path.join', (['"""data/"""', 'folder'], {}), "('data/', folder)\n", (18014, 18031), False, 'import os\n'), ((2721, 2731), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2728, 2731), True, 'import numpy as np\n'), ((2736, 2746), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2743, 2746), True, 'import numpy as np\n'), ((3297, 3313), 'numpy.array', 'np.array', (['X.data'], {}), '(X.data)\n', (3305, 3313), True, 'import numpy as np\n')] |
"""
Plot Example to Learn Interface
=============================================================
Example to learn and gain experience
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$\sin(x)$')
# To avoid matplotlib text output
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((197, 227), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (208, 227), True, 'import numpy as np\n'), ((232, 241), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (238, 241), True, 'import numpy as np\n'), ((243, 257), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (251, 257), True, 'import matplotlib.pyplot as plt\n'), ((258, 275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (268, 275), True, 'import matplotlib.pyplot as plt\n'), ((276, 300), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sin(x)$"""'], {}), "('$\\\\sin(x)$')\n", (286, 300), True, 'import matplotlib.pyplot as plt\n'), ((334, 344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (342, 344), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras as K
from tqdm import tqdm
def visualize_saliency(
model: K.Model,
data_wrt: np.ndarray,
n_samples: int = 1,
epsilon: float = 1e-18,
normalize: bool = True,
stddev_spread: float = 0.15,
):
"""Visualize the saliency wrt to an input tensor.
Can visualize either the backprop gradients or use SmoothGrad to inject
noise and visualize a sample of gradients.
Parameters
----------
model : Keras.Model
The trained Keras model.
data_wrt : np.ndarray
The data to calculate the gradients wrt.
n_samples : int
The number of samples to take when generating the saliency.
epsilon : float
A small offset to prevent divide by zero when normalizing the gradients.
normalize : bool
Flag to specify whether to normalize the gradients in the range [0, 1].
stddev_spread : float
The standard deviation of the Gaussian noise used when sampling the gradients.
Returns
-------
normalized_gradient : np.array
The normalized gradients/saliency map.
logits : tf.Tensor
The output logits of the model.
Notes
-----
Deep Inside Convolutional Networks: Visualising Image Classification
Models and Saliency Maps
<NAME> and <NAME> and <NAME>
"""
# SmoothGrad
stddev = stddev_spread * (np.max(data_wrt) - np.min(data_wrt))
tensor_wrt = tf.Variable(data_wrt, dtype=tf.float32)
# SmoothGrad
if n_samples > 1:
smooth_gradient = []
for sample in tqdm(range(n_samples)):
noise = tf.random.normal(data_wrt.shape, mean=0, stddev=stddev)
tensor_plus_noise = tensor_wrt + noise
with tf.GradientTape() as tape:
logits = model(tensor_plus_noise, training=False)
tape.watch(tensor_plus_noise)
gradient = tape.gradient(logits, tensor_plus_noise)
# absolute_gradient = np.abs(gradient.numpy())
absolute_gradient = tf.square(gradient).numpy()
smooth_gradient.append(absolute_gradient)
absolute_gradient = (
np.sum(np.stack(smooth_gradient, axis=0), axis=0) / n_samples
)
# gradients only
else:
with tf.GradientTape() as tape:
logits = model(tensor_wrt, training=False)
# get the gradient and magnitude of the gradient
gradient = tape.gradient(logits, tensor_wrt)
absolute_gradient = np.abs(gradient.numpy())
# if we're not normalizing, return the raw gradient magnitude
if not normalize:
return absolute_gradient, logits
def _normalize_gradients(g):
return (g - np.min(g)) / (np.max(g) - np.min(g) + epsilon)
# now normalize the gradients
normalized_gradient = _normalize_gradients(absolute_gradient)
return normalized_gradient, logits
| [
"tensorflow.random.normal",
"tensorflow.Variable",
"numpy.max",
"tensorflow.GradientTape",
"numpy.stack",
"numpy.min",
"tensorflow.square"
] | [((1468, 1507), 'tensorflow.Variable', 'tf.Variable', (['data_wrt'], {'dtype': 'tf.float32'}), '(data_wrt, dtype=tf.float32)\n', (1479, 1507), True, 'import tensorflow as tf\n'), ((1413, 1429), 'numpy.max', 'np.max', (['data_wrt'], {}), '(data_wrt)\n', (1419, 1429), True, 'import numpy as np\n'), ((1432, 1448), 'numpy.min', 'np.min', (['data_wrt'], {}), '(data_wrt)\n', (1438, 1448), True, 'import numpy as np\n'), ((1644, 1699), 'tensorflow.random.normal', 'tf.random.normal', (['data_wrt.shape'], {'mean': '(0)', 'stddev': 'stddev'}), '(data_wrt.shape, mean=0, stddev=stddev)\n', (1660, 1699), True, 'import tensorflow as tf\n'), ((2317, 2334), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2332, 2334), True, 'import tensorflow as tf\n'), ((1769, 1786), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1784, 1786), True, 'import tensorflow as tf\n'), ((2207, 2240), 'numpy.stack', 'np.stack', (['smooth_gradient'], {'axis': '(0)'}), '(smooth_gradient, axis=0)\n', (2215, 2240), True, 'import numpy as np\n'), ((2755, 2764), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (2761, 2764), True, 'import numpy as np\n'), ((2769, 2778), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (2775, 2778), True, 'import numpy as np\n'), ((2781, 2790), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (2787, 2790), True, 'import numpy as np\n'), ((2075, 2094), 'tensorflow.square', 'tf.square', (['gradient'], {}), '(gradient)\n', (2084, 2094), True, 'import tensorflow as tf\n')] |
import numpy as np
import warnings
#import collections
import urllib
import json
from pydiablo.logger import logger
from pydiablo.error import PydiabloError
from pydiablo.d2data import D2Data
def eias(ias):
return np.floor(120. * (ias) / (120 + (ias))).astype(int)
# AnimLength: frames per direction in AnimData.txt
# AnimSpeed: AnimData.txt
# AnimRate: Slowing effects
def anim_duration(AnimLength, AnimSpeed, AnimRate, SIAS, WSM, IASItem, WIAS=0, rollback=100):
EIAS = eias(IASItem+WIAS)
speed_increase = min(AnimRate + SIAS + EIAS - WSM, 175)
# numerator AnimSpeed is always 256 for attacks (only numerator?)
return (np.ceil((np.ceil(AnimLength*rollback/100.) * 256) / np.floor(AnimSpeed * (speed_increase) / 100.)) - 1).astype('int')
def anim_duration_seq(AnimLength, AnimSpeed, AnimRate, SIAS, WSM, IASItem, WIAS=0, rollback=100):
EIAS = eias(IASItem+WIAS)
speed_increase = min(AnimRate + SIAS + EIAS - WSM, 175)
# AnimSpeed is always 256 for attacks (this is also from AnimData.txt)
return np.ceil((np.ceil(AnimLength*rollback/100.) * 256) / np.floor(AnimSpeed * (speed_increase) / 100.)).astype('int')
def anim_speed(anim_duration, aidelay=0):
avg_duration = 1.0*(sum(anim_duration)+aidelay)/len(anim_duration)
return 25.0/avg_duration
def breakpoints(anim_duration_function, wtype, AnimRate, SIAS, WSM, WIAS=0, **kwargs):
meias = np.array(range(120))
mias = np.ceil(meias*120./(120-meias)).astype('int')-WIAS
ias_list = list(mias[mias >= 0])
#ias_list = range(200)
total_dur_prev = 0
bps = []
for ias in ias_list:
total_dur = sum(anim_duration_function(wtype, AnimRate, SIAS, WSM, ias, WIAS=WIAS, **kwargs))
if total_dur != total_dur_prev:
bps.append(ias)
total_dur_prev = total_dur
return bps
# print np.diff(np.floor(120.*(ias)/(120+(ias))))
# take the raw rowdata from the animdata file for a particular animation and return the position of the action flag
def action_flag_position(animdata):
framedata = animdata[2:]
return np.nonzero(framedata)[0][0]
#get_breakpoints(0,0,0,0,0)
def write_bp_table(iostream, anim_duration_function, wtype, AnimRate, SIAS, WSM, WIAS=0, **kwargs):
iostream.write('Class: ' + anim_duration_function.__self__.__name__ + '\n')
iostream.write('Animation: ' + anim_duration_function.__name__.split('_')[0] + '\n')
iostream.write('Weapon: ' + wtype + '\n')
iostream.write('AnimRate: ' + str(AnimRate) + '\n')
iostream.write('SIAS: ' + str(SIAS) + '\n')
iostream.write('WSM: ' + str(WSM) + '\n')
iostream.write('WIAS: ' + str(WIAS) + '\n')
metadata = {'anim_name': anim_duration_function.__name__,
'wtype': wtype,
'anim_rate': AnimRate,
'sias': SIAS,
'wsm': WSM}
header = ['ias']
header += ['eias']
first_run = True
bps = breakpoints(anim_duration_function, wtype, AnimRate, SIAS, WSM, WIAS=WIAS, **kwargs)
aidelay = anim_duration_function.__self__.aidelay
for bp in bps:
#for anim_duration_function in anim_duration_functions:
mlist = anim_duration_function(wtype, AnimRate, SIAS, WSM, bp, WIAS=WIAS, **kwargs)
if first_run:
header += ['atk{:d}'.format(x) for x,y in enumerate(mlist)]
if aidelay:
header += ['aidelay']
header += ['avg']
mstr = [str(x) for x in mlist]
if aidelay:
mstr.append(str(aidelay))
mstr.append('{:.2f}'.format(1.0*(sum(mlist)+aidelay)/len(mlist)))
mstr.append('{:.2f}'.format(anim_speed(mlist, aidelay=aidelay)))
mstr.insert(0, str(bp))
mstr.insert(1, str(eias(bp+WIAS)-WSM+SIAS))
if first_run:
header += ['aps']
iostream.write('\t'.join(header) + '\n')
iostream.write('\t'.join(mstr) + '\n')
#mstr = '\t'.join([d for d in mline])
#if mstr != mstr_prev:
# return_str += '\t'.join([str(ias), mstr])
# return_str += '\n'
#mstr_prev = mstr
first_run = False
#mdtype = ['float64']*len(header)
#np_tbl = np.rec.array(tbl, dtype=zip(header, mdtype))
#def print_table(tbl, metadata):
# for row in tbl:
# print row
class AnimData(object):
def __init__(self, filename):
animdata_keys = np.genfromtxt(D2Data.DATA_PATH + filename, delimiter='\t', names=True, dtype=None, usecols=0, encoding=None)['CofName']
animdata = np.genfromtxt(D2Data.DATA_PATH + filename, delimiter='\t', skip_header=1, dtype='int', encoding=None)
self.animdata_dict = dict(zip(animdata_keys, [row[1:] for row in animdata]))
def get_data(self, key):
return self.animdata_dict[key]
class Stat:
"""This class is a collection of static functions for now.
"""
#itemstatcost = D2Data('data/global/excel/ItemStatCost.txt', 'Stat')
itemstatcostid = D2Data('data/global/excel/ItemStatCost.txt', 'ID', usecols=[0,1]);
properties = D2Data('data/global/excel/Properties.txt', 'code', usecols=range(30))
@classmethod
def attribute_to_stats(cls, attr):
# first handle some special cases where the d2s parser
# combined some stats into ranges.
if attr['id'] in [17, 48, 50, 52, 54, 57]:
stats = []
for i, value in enumerate(attr['values']):
stat = cls.itemstatcostid.get_data(attr['id']+i, 'Stat')
stats.append({'stat': stat, 'values': [value]})
return stats
# next deal with the properties giving charges.
if attr['id'] in range(204,214):
# override the stat reference to point to a new dict that we will
# modify so that charges fits in better to our scheme. We recombine
# the current and maximum charges into one number.
new_attr = {}
new_attr['id'] = attr['id']
try:
# MSB is maximum charges, LSB is current charges
new_attr['values'] = [attr['values'][0], attr['values'][1],
attr['values'][2] + 2**8*attr['values'][3]]
except IndexError as e:
logger.error("Unexpected values field in item charges attribute. JSON dump: {}".format(attr))
raise
attr = new_attr
# next handle the general case.
stat = cls.itemstatcostid.get_data(attr['id'], 'Stat')
return [{'stat': stat, 'values': attr['values']}]
@classmethod
def add_attributes_to_map(cls, attr_iterator, stat_map):
"""Add attributes from the item to the stat map.
Positional arguments:
attr_iterator -- an iterator for the item attributes.
stat_map -- add stats to this map
First, some terminology. Nokka's d2s parser gives 'attributes' for the items.
These 'attributes' are a little different than the 'stats' in ItemStatCost.txt.
When referring to the stat as it exists in the JSON from the d2s parser, I will
use the term 'attribute'. When referring to a stat consistent with ItemStatCost.txt,
I will use the term 'stat'.
attr_iterator must yield a map with an id and values field and can
be created with the generator methods in the Item class. These maps are
expected to follow the format of nokka's d2s parser. When converting from
attribute to stat, we change a few things, notably with combined stat ranges
(min-max dmg) and with charges.
The stat_map will contain all item stats, keyed by stat id (ItemStatCost.txt).
In the case of a simple stat (one value), the value for the stat id
will be a list of all instance values of that stat. In the case of a complex
stat, the value for the stat id will be another map, keyed by parameter.
simple attribute:
> stat_map[141] # deadly strike
[20]
complex attribute:
> stat_map[204][62][30] # level 30 (30) hydra (62) charges (204 is the stat id)
[2570]
The game stores the current and max charges as one 16 bit value. In this case,
there are 10 current charges (LSB) and 10 max (MSB): 2570 = 0x0A0A.
"""
for attr in attr_iterator:
for stat in cls.attribute_to_stats(attr):
mdict = stat_map
mkey = stat['stat']
for value in attr['values'][:-1][::-1]:
if mkey not in mdict:
mdict[mkey] = {}
mdict = mdict[mkey]
mkey = value
if mkey not in mdict:
mdict[mkey] = []
mdict[mkey].append(attr['values'][-1])
@staticmethod
def create_stat(func, stat, set_, val, param, min_, max_, rand=False):
"""Return a newly created stat as a dict with 'stat_id' and 'values' fields.
The values are ordered consistenly with the item stat order in the d2s file.
"""
if rand:
logger.error("Random generation of stats not yet supported.")
# The funciton mapping below was reverse engineered from vanilla game data
# and by comparing to the item stat order in the d2s file (easy to see in nokka's parser).
# It may not be completely accurate. Surely there are some differences between
# the otherwise identical functions.
# TODO: func3 is same as 1, but it should reuse the func1 rolls.
if func in [1, 3, 8]:
#stat_id = cls.itemstatcost.get_data(stat, 'ID')
return {'stat': stat, 'values': [(min_+max_)//2]}
if func==21:
#stat_id = cls.itemstatcost.get_data(stat, 'ID')
return {'stat': stat, 'values': [val, (min_+max_)//2]}
else:
return {}
@classmethod
def property_functions(cls, prop):
"""Yield a map containing 'set', 'val', 'func', and 'stat' fields for each stat associated with the property."""
for i in range(1,8): # 7 maximum stats per property
stat = cls.properties.get_data(prop, 'stat{}'.format(i))
#stat_id = cls.itemstatcost.get_data(stat, 'ID')
set_ = cls.properties.get_data(prop, 'set{}'.format(i))
val = cls.properties.get_data(prop, 'val{}'.format(i))
func = cls.properties.get_data(prop, 'func{}'.format(i))
if func.dtype == 'bool' or func == -1:
return # no additional stats to yield
yield {'stat': stat, 'set_': set_, 'val': val, 'func': func}
class Item(object):
# constants for interpreting json character data
ITEM_LOCATION_STORED = 0
ITEM_LOCATION_EQUIPPED = 1
ITEM_LOCATION_BELT = 2
ITEM_ALT_POSITION_INVENTORY = 1
ITEM_ALT_POSITION_CUBE = 4
ITEM_ALT_POSITION_STASH = 5
ITEM_EQUIPPED_ID_RIGHT_HAND = 4
ITEM_EQUIPPED_ID_LEFT_HAND = 5
ITEM_TYPE_ID_WEAPON = 3
ITEM_QUALITY_SET = 5
# this is a bit ugly, but we need to know if any item is a charm to determine
# if we should use the item or not
CHARM_ITEM_TYPES = ['cm1', 'cm2', 'cm3']
def __init__(self, itemdata):
self.item = itemdata
@classmethod
def create_item(cls, itemdata):
if 'quality' in itemdata and itemdata['quality'] is not None:
if itemdata['quality'] == cls.ITEM_QUALITY_SET:
return SetItem(itemdata)
return cls(itemdata)
def is_equipped(self):
"""Returns true if the given item is equipped."""
return self.item['location_id'] == self.ITEM_LOCATION_EQUIPPED
def is_weapon(self):
return self.item['type_id'] == self.ITEM_TYPE_ID_WEAPON
def is_right_hand_weapon(self):
"""Returns true if the item is a weapon and is equipped in the right hand (above glove slot)."""
return self.is_equipped() and self.item['equipped_id'] == self.ITEM_EQUIPPED_ID_RIGHT_HAND and self.is_weapon()
def is_left_hand_weapon(self):
"""Returns true if the item is a weapon and is equipped in the left hand (above boots)."""
return self.is_equipped() and self.item['equipped_id'] == self.ITEM_EQUIPPED_ID_LEFT_HAND and self.is_weapon()
#@classmethod
#def is_primary_weapon(cls, item):
# """Returns true if this item is the primary weapon."""
# return
def in_inventory(self):
"""Returns true if the given item is in the player's inventory."""
return self.item['location_id'] == self.ITEM_LOCATION_STORED and self.item['alt_position_id'] == self.ITEM_ALT_POSITION_INVENTORY
def is_charm(self):
"""Returns true if the given item is a charm."""
return self.item['type'] in self.CHARM_ITEM_TYPES
def use_item(self):
"""Returns true if the item is used by the player, i.e., equipped or a charm."""
return self.is_equipped() or self.in_inventory() and self.is_charm()
def get_socketed_items(self):
if 'socketed_items' in self.item and self.item['socketed_items'] is not None:
return [Item(itemdata) for itemdata in self.item['socketed_items']]
return []
def attributes(self, attr_name):
"""Return an iterator for attributes associated with attr_name."""
if attr_name in self.item and self.item[attr_name] is not None:
for attr in self.item[attr_name]:
yield attr
def non_set_attributes(self):
"""Return an iterator for all non-set item attributes."""
for attribute in self.attributes('magic_attributes'):
yield attribute
for attribute in self.attributes('runeword_attributes'):
yield attribute
for socketed_item in self.get_socketed_items():
for attribute in socketed_item.attributes('magic_attributes'):
yield attribute
def sets_key(self):
"""Return the key for lookups in the Sets.txt file.
Returns None unless it's a set item."""
return None
def set_attributes(self, num_items):
"""Return an empty iterator. Set items will override this method."""
return
yield
class Set():
sets = D2Data('data/global/excel/Sets.txt', 'index')
def __init__(self, set_id):
self.set_id = set_id
logger.debug("{} is a {} piece set.".format(self.set_name(), self.num_items()))
def num_items(self):
return np.sum(SetItem.setitems.data['set'] == self.set_id)
def sets_data(self, col):
"""Get data from a column of Sets.txt."""
return self.sets.get_data(self.set_id, col)
def set_name(self):
"""Return the name of the set."""
return self.sets.get_data(self.set_id, 'name')
def _attributes(self, prefix, suffix, start, stop):
# partial set bonuses first
for i in range(start, stop):
for c in suffix:
propstr = '{}Code{}{}'.format(prefix,i,c)
parstr = '{}Param{}{}'.format(prefix,i,c)
minstr = '{}Min{}{}'.format(prefix,i,c)
maxstr = '{}Max{}{}'.format(prefix,i,c)
prop = self.sets_data(propstr)
par = self.sets_data(parstr)
min_ = self.sets_data(minstr)
max_ = self.sets_data(maxstr)
# bool check is because an empty column has bool datatype
# TODO: Figure out a better way to deal with this before it's all over the place
if prop.dtype != 'bool' and prop != '':
logger.debug("Found property {} to include from {} set.".format(prop, self.set_name())
+ " This property calls funcions {}.".format(list(Stat.property_functions(prop)))
+ " Arguments to property function: param={} min={} max={}".format(par, min_, max_))
for property_function in Stat.property_functions(prop):
stat = Stat.create_stat(**property_function, param=par, min_=min_, max_=max_)
logger.debug("Created stat {}.".format(stat))
def attributes(self, num_items):
self._attributes('P', ['a','b'], 2, num_items+1)
if num_items == self.num_items():
self._attributes('F', [''], 1, 9)
class SetItem(Item):
# map the set_id from d2s parser to the index used by SetItems.txt
setitems2 = D2Data('data2/SetItems2.txt', 'set_id')
setitems = D2Data('data/global/excel/SetItems.txt', 'index')
def __init__(self, itemdata):
Item.__init__(self, itemdata)
try:
self.set_index = self.setitems2.get_data(itemdata['set_id'], 'index')
#self.set = Set(self.sets_key())
logger.debug("Creating {} set item {}.".format(self.sets_key(), self.set_index))
except KeyError as e:
logger.error("Set item by quality has no set_id. JSON dump: {}".format(itemdata))
raise
def setitems_key(self):
"""Return the key for lookups in the SetItems.txt file."""
return self.set_index
def sets_key(self):
"""Return the key for lookups in the Sets.txt file."""
return self.setitems.get_data(self.set_index, 'set')
def setitems_data(self, col):
"""Get data from a column of SetItems.txt."""
return self.setitems.get_data(self.set_index, col)
def all_set_attributes(self):
"""Return an iterator for lists of set item attributes, active or not.
Set items have attributes organized as a list of lists. The inner lists
contain the actual attributes. The outer list is for groups of attributes.
These attributes are grouped because of the way set bonuses are applied.
The first group is applied with x many items, second group with y many, etc."""
for attr_list in self.attributes('set_attributes'):
yield attr_list
def set_attributes(self, num_items):
"""Return an iterator for active set attributes."""
# first figure out if bonuses on this item depend on total items equipped or specific items equipped
# (Civerb's shield is the only one in the latter category)
if self.setitems_data('add_func') == 1:
# add the stats based on which other specific items are present
logger.error("Sets items with bonuses dependent on specific set items (e.g. Civerb's shield) are not"
" yet supported. Bonuses will not be applied on {}".format(self.setitems_key()))
elif self.setitems_data('add_func') == 2:
# add the stats based on total number of unique items present
# first grab the set attributes iterator for the item. This is intentionally
# only initialized once, and not again in the inner loop. It should advance each
# time we match the exepcted stats from ItemStatCost with the attributes in the list.
set_attr_iter = self.all_set_attributes()
try:
for i in range(1, num_items):
stat_ids = []
for c in ['a','b']:
propstr = 'aprop{}{}'.format(i,c)
#parstr = 'apar{}{}'.format(i,c)
#minstr = 'amin{}{}'.format(i,c)
#maxstr = 'amax{}{}'.format(i,c)
prop = self.setitems_data(propstr)
#par = item.setitems_data(parstr)
#min_ = item.setitems_data(minstr)
#max_ = item.setitems_data(maxstr)
# bool check is because an empty column has bool datatype
# TODO: Figure out a better way to deal with this before it's all over the place
if prop.dtype != 'bool' and prop != '':
logger.debug("Found property {} to include on {}.".format(prop, self.setitems_key())
+ " This property adds stats {}.".format([x['stat'] for x in list(Stat.property_functions(prop))]))
stat_ids += list(Stat.property_functions(prop))
# we need to find the attribute(s) in the d2s parser that matches the stat ids we look
# up from the property to add. We could attempt to look up the stat values themselves
# in the txt files, but this isn't the right way to do it. Some stat bonuses on items
# are actually variable (see Civerb's shield), so we should respect the values in the
# d2s file.
if len(stat_ids) > 0:
# above condition means there is a bonus we should apply, now we need to match it
# to the d2s attributes
for attr_list in set_attr_iter:
tmp_map = {}
Stat.add_attributes_to_map(iter(attr_list), tmp_map)
if set(tmp_map.keys()) == set([x['stat'] for x in stat_ids]):
logger.debug("Attributes {} active on {}.".format(attr_list, self.setitems_key()))
for attr in attr_list:
yield attr
break
else:
raise PydiabloError("Attributes {} did not match expected stat ids {} on {}.".format(attr_list,
stat_ids, self.setitems_key()))
except PydiabloError as e:
logger.error("Problem matching the set bonuses from d2s to those expected"
" by SetItems.txt ({}). Don't trust set bonuses on this item.".format(str(e)))
return
# if the value is 0 (empty), do nothing.
class Character(object):
animdata = AnimData('data2/animdata.txt')
#item_stat_cost = D2Data('data/global/excel/ItemStatCost.txt', 'Stat')
# see http://www.mannm.org/d2library/faqtoids/animspeed.html#startframes
startframes = {'HTH': 0,
'BOW': 0,
'1HS': 0,
'1HT': 0,
'STF': 0,
'2HS': 0,
'2HT': 0,
'XBW': 0}
aidelay = 0
#def __init__(self):
# self.weapon = None #HandToHand()
# self.equipment = []
#def equip_weapon(self, weapon):
# self.weapon = weapon
#def equip(self, equipable):
# self.equipment.append(equipable)
def __init__(self, chardata):
self.chardata = chardata
self.character = chardata['character']
self.d2s = self.character['d2s']
self.header = self.d2s['header']
self.attributes = self.d2s['attributes']
self.skills = self.d2s['skills']
self.items = [Item.create_item(itemdata) for itemdata in self.d2s['items']]
self.corpse_items = self.d2s['corpse_items']
self.merc_items = self.d2s['merc_items']
self.build_set_map()
self.build_stat_maps()
def name(self):
return self.header['name']
def level(self):
return self.header['level']
def get_active_items(self):
"""Return a list of the active items, i.e., those that are equipped or charms."""
active_items = []
for item in self.items:
if item.use_item():
active_items.append(item)
return active_items
def get_primary_weapon(self):
"""Return the primary weapon."""
left_hand_weapon = None
for item in self.get_active_items():
if item.is_right_hand_weapon():
# if the item is in the right hand, then we don't need to look anymore,
# it is the primary weapon
return item
elif item.is_left_hand_weapon():
left_hand_weapon = item
# if there was no right hand weapon found, then we return the left hand weapon,
# which will be None if there was no left hand weapon
return left_hand_weapon
def get_secondary_weapon(self):
"""Return the secondary weapon."""
right_hand_weapon = None
left_hand_weapon = None
for item in self.get_active_items():
if item.is_right_hand_weapon():
right_hand_weapon = item
elif item.is_left_hand_weapon():
left_hand_weapon = item
if right_hand_weapon is not None and left_hand_weapon is not None:
# as soon as we find two weapons, we can return the one in the left hand
return left_hand_weapon
# get here if we did not find two weapons, in which case there is no secondary
return None
def get_active_non_weapons(self):
"""Return a list of non-weapon active items."""
items = []
for item in self.get_active_items():
if not item.is_right_hand_weapon() and not item.is_left_hand_weapon():
items.append(item)
return items
def num_set_items(self, item):
"""Return total number of active set items for the set item 'item'."""
if item.sets_key() is None: return 0
n = len(set([item_.setitems_key() for item_ in self.set_map[item.sets_key()]]))
logger.debug("Processing {} on {} with bonuses from {} items from the {} set.".format(item.setitems_key(), self.name(), n, item.sets_key()))
return n
def build_stat_maps(self):
"""Construct the stat maps that will be used to perform O(1) lookup per stat."""
self.primary_weapon_stats = {}
self.secondary_weapon_stats = {}
self.off_weapon_stats = {}
for item in [self.get_primary_weapon()]:
if item is not None:
Stat.add_attributes_to_map(item.non_set_attributes(), self.primary_weapon_stats)
Stat.add_attributes_to_map(item.set_attributes(self.num_set_items(item)), self.primary_weapon_stats)
for item in [self.get_secondary_weapon()]:
if item is not None:
Stat.add_attributes_to_map(item.non_set_attributes(), self.secondary_weapon_stats)
Stat.add_attributes_to_map(item.set_attributes(self.num_set_items(item)), self.secondary_weapon_stats)
for item in self.get_active_non_weapons():
if item is not None:
Stat.add_attributes_to_map(item.non_set_attributes(), self.off_weapon_stats)
Stat.add_attributes_to_map(item.set_attributes(self.num_set_items(item)), self.off_weapon_stats)
def build_set_map(self):
"""Build a map of the character's set items, keyed by index from Sets.txt.
Each element of the dict is a list of set items.
"""
self.set_map = {}
for item in self.get_active_items():
if item.sets_key() is not None:
if item.sets_key() not in self.set_map:
self.set_map[item.sets_key()] = []
self.set_map[item.sets_key()].append(item)
# TODO: Best way to do this is probably to build a map of stat ids (itemstatcost.txt) to a list of values.
# We can do this once in the constructor, then we don't have to search through all the items every time.
#def deadly_strike(self):
# """Return character's total effective deadly strike as a percentage."""
# deadly_strike = 0
# for item in self.get_active_items():
# if 'magic_attributes' not in item: continue
# for stat in item['magic_attributes']:
# if stat['id'] == 141:
# deadly_strike += stat['values'][0]
# elif stat['id'] == 250:
# deadly_strike += stat['values'][0]*self.level()//8
# return deadly_strike
@classmethod
def get_char_animdata(cls, AnimName, wtype):
animkey = cls.ctype + AnimName + wtype
return cls.animdata.get_data(animkey)
@classmethod
def anim_duration(cls, AnimName, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0, rollback=100, first=False):
animdata = cls.get_char_animdata(AnimName, wtype)
if first: startframes = cls.startframes[wtype]
else: startframes = 0
return anim_duration(animdata[0]-startframes, animdata[1], AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=rollback)
# the length of the animation used for serial attacks, like zeal or fury
@classmethod
def base_foreswing_frames(cls, animdata, wtype, first=False):
af = action_flag_position(animdata)
if first:
af -= cls.startframes[wtype]
return af
@classmethod
def foreswing_duration(cls, AnimName, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0, first=False, rollback=100):
animdata = cls.get_char_animdata(AnimName, wtype)
#print animdata
af = cls.base_foreswing_frames(animdata, wtype, first)
return anim_duration_seq(af, animdata[1], AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=rollback)
@classmethod
def avg_attack_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0):
return sum(cls.attack_duration(wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS))/2.0
#def avg_attack_duration_as_equipped(self, AnimRate, SIAS, IASItem):
# wtype = self.weapon.wtype
# wias = self.weapon.mp.ias
# wsm = self.weapon.wsm
# return self.avg_attack_duration(wtype, AnimRate, SIAS, wsm, IASItem, WIAS=wias)
#@classmethod
#def avg_action_frame(cls, wtype, AnimRate, SIAS, WSM, IASItem):
# return (cls.action_frame('A1', wtype, AnimRate, SIAS, WSM, IASItem)\
# +cls.action_frame('A2', wtype, AnimRate, SIAS, WSM, IASItem))/2.0
@classmethod
def attack_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0):
a1_dur = cls.anim_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True)
try:
a2_dur = cls.anim_duration('A2', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True)
except KeyError as e:
return [a1_dur]
return [a1_dur, a2_dur]
@classmethod
def zeal_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0):
return [cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True)] +\
[cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS)]*3 +\
[cls.anim_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS)]
class Paladin(Character):
ctype = 'PA'
class Druid(Character):
ctype = 'DZ'
class Barbarian(Character):
ctype = 'BA'
class Assassin(Character):
ctype = 'AI'
class Necromancer(Character):
ctype = 'NE'
class Transform(object):
@classmethod
def get_xform_animdata(cls, AnimName):
key = cls.ttype + AnimName + 'HTH'
return cls.animdata.get_data(key)
@classmethod
def modified_anim_speed(cls, AnimName, wtype, WSM, WIAS):
frames_neutral = cls.get_xform_animdata('NU')[0]
if wtype == '2HS': wtype = '1HS' # TODO: double check this
chardata = cls.get_char_animdata(AnimName, wtype)
frames_char = chardata[0]
char_speed = chardata[1]
delay = np.floor(256.*frames_char / np.floor((100.+WIAS-WSM) * char_speed/100.))
return int(np.floor(256.*frames_neutral / delay))
# TODO: figure out what to do with first argument here
@classmethod
def anim_duration(cls, AnimName, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0, first=False):
animdata = cls.get_xform_animdata(AnimName)
animlength = animdata[0]
animspeed = cls.modified_anim_speed(AnimName, wtype, WSM, WIAS)
#eias = int(np.floor(120 * (IASItem+WIAS) / (120 + (IASItem+WIAS))))
#speed_increase = AnimRate + SIAS + eias - WSM
#if speed_increase > 175:
# SIAS = SIAS - (speed_increase-175)
return anim_duration(animlength, animspeed, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS)
@classmethod
def foreswing_duration(cls, AnimName, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0, first=False):
animdata = cls.get_xform_animdata(AnimName)
#print animdata
af = cls.base_foreswing_frames(animdata, wtype, first)
animspeed = cls.modified_anim_speed(AnimName, wtype, WSM, WIAS)
return anim_duration_seq(af, animspeed, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS)
class Werewolf(Transform):
ttype = '40'
@classmethod
def fury_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0):
return cls.zeal_duration(wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS)
class Werebear(Transform):
ttype = 'TG'
class WolfDruid(Werewolf, Druid):
pass
class WolfBarbarian(Werewolf, Barbarian):
pass
class BearDruid(Werebear, Druid):
pass
class Sorceress(Character):
ctype = 'SO'
startframes = {'HTH': 1,
'BOW': 0,
'1HS': 2,
'1HT': 2,
'STF': 2,
'2HS': 2,
'2HT': 2, # see comment below on amazon start frames
'XBW': 0}
class Amazon(Character):
ctype = 'AM'
startframes = {'HTH': 1,
'BOW': 0,
'1HS': 2,
'1HT': 2,
'STF': 2,
'2HS': 2,
'2HT': 2, # d2 factoids says this is 0, but results only agree with german calc if this is 2
'XBW': 0}
# strafe is not quite matching the german calc or the amazon basin tables for crossbows (which dont agree themselves).
# Main problem is that crossbows seem to have unequal length follow up frames. I don't care enough to figure this
# out for now. Bow breakpoints seem fine.
# Problems for 0 WSM XBW:
# * Missing 14 and 68 IAS BPs due to unequal length follow up frames (both German and AB show this)
# * Differ with AB at 30 IAS for one follow up frame (German does not have this BP)
# * Differ in length of last attack at 32 IAS BP. German and AB both agree this is 11 frames, but then AB
# says the next BP is 12 frames (??). This calc returns 12 frames.
# * Differ in one follow up frame at 75 IAS BP where German and AB agree
# * Differ in last attack length at 152 IAS (German does not have this BP)
@classmethod
def strafe_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0):
if wtype=='XBW':
warnings.warn("Crossbow strafe not completely accurate. See {} documentation.".format(cls.strafe_duration.__name__))
return [max(cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True),5)] +\
[max(cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=50),3)]*8 +\
[max(cls.anim_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=78),7)]
#[anim_duration(16, 256, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=100)]
@classmethod
def fend_duration(cls, wtype, AnimRate, SIAS, WSM, IASItem, WIAS=0, ntargets=5):
# rollback values below were chosen to match german calculator. not all cases were tested though
first = cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True)
follow = cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=60)
last = cls.anim_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=75)
if ntargets==1:
return [first+last-follow]
else:
return [first] + [follow]*max(0,ntargets-2) + [last]
#return [cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, first=True)] +\
# [cls.foreswing_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=60)]*8 +\
# [cls.anim_duration('A1', wtype, AnimRate, SIAS, WSM, IASItem, WIAS=WIAS, rollback=75)]
class Act1Merc(Character):
ctype = 'RG'
aidelay = 2
class Act2Merc(Character):
ctype = 'GU'
JAB_DURATION = 14 # seems to be the right magic number
aidelay = 2
@classmethod
def attack_duration(cls, wtype, AnimRate, SIAS, WSM, ias, WIAS=0):
return [cls.anim_duration('A1', 'HTH', AnimRate, SIAS, WSM, ias, WIAS=WIAS)]
@classmethod
def jab_duration(cls, wtype, AnimRate, SIAS, WSM, ias, WIAS=0):
# a2 merc only has HTH animation defined. this function ignores wtype
return [anim_duration_seq(cls.JAB_DURATION, 256, AnimRate, SIAS, WSM, ias, WIAS=WIAS),0]
class Act3Merc(Character):
ctype = 'IW'
class Act5Merc(Character):
ctype = '0A'
# constants related to accessing slash data
SLASH_URL = "https://armory.slashdiablo.net/retrieving/v1/character?name={}"
SLASH_CLASS_MAP = {'Sorceress': Sorceress,
'Amazon': Amazon,
'Druid': Druid,
'Barbarian': Barbarian,
'Assassin': Assassin,
'Necromancer': Necromancer,
'Paladin': Paladin}
def chardata_from_slash(char_name):
try:
contents = urllib.request.urlopen(SLASH_URL.format(char_name)).read()
except urllib.error.HTTPError as e:
raise RuntimeError("Could not find character {}. Armory down or missing character.".format(char_name)) from e
return json.loads(contents)
def create_from_json(chardata):
try:
charclass = chardata['character']['d2s']['header']['class']
char_name = chardata['character']['d2s']['header']['name']
except KeyError as e:
logger.error("Problem accessing character data. JSON dump: {}".format(chardata))
raise RuntimeError("Bad character data. Top level keys: {}".format(chardata.keys())) from e
logger.debug("{} is a {}".format(char_name, charclass))
return SLASH_CLASS_MAP[charclass](chardata)
def create_from_slash(char_name):
chardata = chardata_from_slash(char_name)
return create_from_json(chardata)
| [
"json.loads",
"numpy.ceil",
"pydiablo.d2data.D2Data",
"numpy.floor",
"numpy.sum",
"numpy.nonzero",
"pydiablo.logger.logger.error",
"numpy.genfromtxt"
] | [((4926, 4992), 'pydiablo.d2data.D2Data', 'D2Data', (['"""data/global/excel/ItemStatCost.txt"""', '"""ID"""'], {'usecols': '[0, 1]'}), "('data/global/excel/ItemStatCost.txt', 'ID', usecols=[0, 1])\n", (4932, 4992), False, 'from pydiablo.d2data import D2Data\n'), ((14202, 14247), 'pydiablo.d2data.D2Data', 'D2Data', (['"""data/global/excel/Sets.txt"""', '"""index"""'], {}), "('data/global/excel/Sets.txt', 'index')\n", (14208, 14247), False, 'from pydiablo.d2data import D2Data\n'), ((16417, 16456), 'pydiablo.d2data.D2Data', 'D2Data', (['"""data2/SetItems2.txt"""', '"""set_id"""'], {}), "('data2/SetItems2.txt', 'set_id')\n", (16423, 16456), False, 'from pydiablo.d2data import D2Data\n'), ((16472, 16521), 'pydiablo.d2data.D2Data', 'D2Data', (['"""data/global/excel/SetItems.txt"""', '"""index"""'], {}), "('data/global/excel/SetItems.txt', 'index')\n", (16478, 16521), False, 'from pydiablo.d2data import D2Data\n'), ((37562, 37582), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (37572, 37582), False, 'import json\n'), ((4491, 4596), 'numpy.genfromtxt', 'np.genfromtxt', (['(D2Data.DATA_PATH + filename)'], {'delimiter': '"""\t"""', 'skip_header': '(1)', 'dtype': '"""int"""', 'encoding': 'None'}), "(D2Data.DATA_PATH + filename, delimiter='\\t', skip_header=1,\n dtype='int', encoding=None)\n", (4504, 4596), True, 'import numpy as np\n'), ((14439, 14490), 'numpy.sum', 'np.sum', (["(SetItem.setitems.data['set'] == self.set_id)"], {}), "(SetItem.setitems.data['set'] == self.set_id)\n", (14445, 14490), True, 'import numpy as np\n'), ((219, 254), 'numpy.floor', 'np.floor', (['(120.0 * ias / (120 + ias))'], {}), '(120.0 * ias / (120 + ias))\n', (227, 254), True, 'import numpy as np\n'), ((2065, 2086), 'numpy.nonzero', 'np.nonzero', (['framedata'], {}), '(framedata)\n', (2075, 2086), True, 'import numpy as np\n'), ((4352, 4464), 'numpy.genfromtxt', 'np.genfromtxt', (['(D2Data.DATA_PATH + filename)'], {'delimiter': '"""\t"""', 'names': '(True)', 'dtype': 'None', 'usecols': '(0)', 'encoding': 'None'}), "(D2Data.DATA_PATH + filename, delimiter='\\t', names=True,\n dtype=None, usecols=0, encoding=None)\n", (4365, 4464), True, 'import numpy as np\n'), ((9065, 9126), 'pydiablo.logger.logger.error', 'logger.error', (['"""Random generation of stats not yet supported."""'], {}), "('Random generation of stats not yet supported.')\n", (9077, 9126), False, 'from pydiablo.logger import logger\n'), ((31452, 31492), 'numpy.floor', 'np.floor', (['(256.0 * frames_neutral / delay)'], {}), '(256.0 * frames_neutral / delay)\n', (31460, 31492), True, 'import numpy as np\n'), ((1423, 1461), 'numpy.ceil', 'np.ceil', (['(meias * 120.0 / (120 - meias))'], {}), '(meias * 120.0 / (120 - meias))\n', (1430, 1461), True, 'import numpy as np\n'), ((31388, 31439), 'numpy.floor', 'np.floor', (['((100.0 + WIAS - WSM) * char_speed / 100.0)'], {}), '((100.0 + WIAS - WSM) * char_speed / 100.0)\n', (31396, 31439), True, 'import numpy as np\n'), ((1087, 1131), 'numpy.floor', 'np.floor', (['(AnimSpeed * speed_increase / 100.0)'], {}), '(AnimSpeed * speed_increase / 100.0)\n', (1095, 1131), True, 'import numpy as np\n'), ((694, 738), 'numpy.floor', 'np.floor', (['(AnimSpeed * speed_increase / 100.0)'], {}), '(AnimSpeed * speed_increase / 100.0)\n', (702, 738), True, 'import numpy as np\n'), ((1044, 1082), 'numpy.ceil', 'np.ceil', (['(AnimLength * rollback / 100.0)'], {}), '(AnimLength * rollback / 100.0)\n', (1051, 1082), True, 'import numpy as np\n'), ((651, 689), 'numpy.ceil', 'np.ceil', (['(AnimLength * rollback / 100.0)'], {}), '(AnimLength * rollback / 100.0)\n', (658, 689), True, 'import numpy as np\n')] |
from __future__ import print_function
import sys
import os
import regreg.api as rr
import numpy as np
from selection.reduced_optimization.generative_model import generate_data, generate_data_random
from selection.reduced_optimization.initial_soln import instance
from selection.tests.instance import logistic_instance, gaussian_instance
def selection_nonrandomized(X, y, sigma=None, method="theoretical"):
n, p = X.shape
loss = rr.glm.gaussian(X,y)
epsilon = 1. / np.sqrt(n)
lam_frac = 1.
if sigma is None:
sigma = 1.
if method == "theoretical":
lam = 1. * sigma * lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 10000)))).max(0))
W = np.ones(p)*lam
penalty = rr.group_lasso(np.arange(p), weights = dict(zip(np.arange(p), W)), lagrange=1.)
# initial solution
problem = rr.simple_problem(loss, penalty)
random_term = rr.identity_quadratic(epsilon, 0, 0, 0)
solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}
initial_soln = problem.solve(random_term, **solve_args)
active = (initial_soln != 0)
if np.sum(active) == 0:
return None
initial_grad = loss.smooth_objective(initial_soln, mode='grad')
betaE = initial_soln[active]
subgradient = -(initial_grad+epsilon*initial_soln)
cube = subgradient[~active]/lam
return lam, epsilon, active, betaE, cube, initial_soln
def lasso_selection(X,
y,
beta,
sigma):
n,p = X.shape
sel = selection_nonrandomized(X, y)
if sel is not None:
lam, epsilon, active, betaE, cube, initial_soln = sel
lagrange = lam * np.ones(p)
active_sign = np.sign(betaE)
nactive = active.sum()
print("number of selected variables by Lasso", nactive)
print("initial soln", betaE)
prior_variance = 1000.
noise_variance = sigma**2
projection_active = X[:, active].dot(np.linalg.inv(X[:, active].T.dot(X[:, active])))
M_1 = prior_variance * (X.dot(X.T)) + noise_variance * np.identity(n)
M_2 = prior_variance * ((X.dot(X.T)).dot(projection_active))
M_3 = prior_variance * (projection_active.T.dot(X.dot(X.T)).dot(projection_active))
post_mean = M_2.T.dot(np.linalg.inv(M_1)).dot(y)
print("observed data", post_mean)
post_var = M_3 - M_2.T.dot(np.linalg.inv(M_1)).dot(M_2)
unadjusted_intervals = np.vstack([post_mean - 1.65 * (np.sqrt(post_var.diagonal())),
post_mean + 1.65 * (np.sqrt(post_var.diagonal()))])
print("unadjusted intervals", unadjusted_intervals)
coverage_unad = np.zeros(nactive)
unad_length = np.zeros(nactive)
true_val = projection_active.T.dot(X.dot(beta))
print("true value", true_val)
for l in range(nactive):
if (unadjusted_intervals[0, l] <= true_val[l]) and (true_val[l] <= unadjusted_intervals[1, l]):
coverage_unad[l] += 1
unad_length[l] = unadjusted_intervals[1, l] - unadjusted_intervals[0, l]
naive_cov = coverage_unad.sum() / nactive
unad_len = unad_length.sum() / nactive
bayes_risk_unad = np.power(post_mean - true_val, 2.).sum() / nactive
return np.vstack([naive_cov, unad_len, bayes_risk_unad])
else:
return None
if __name__ == "__main__":
### set parameters
n = 200
p = 1000
### GENERATE X
niter = 50
unad_cov = 0.
unad_len = 0.
unad_risk = 0.
for i in range(niter):
np.random.seed(0)
sample = instance(n=n, p=p, s=0, sigma=1., rho=0, snr=7.)
### GENERATE Y BASED ON SEED
np.random.seed(i) # ensures different y
#X, y, beta, nonzero, sigma = gaussian_instance()
X, y, beta, nonzero, sigma = sample.generate_response()
### RUN LASSO AND TEST
lasso = lasso_selection(X,
y,
beta,
sigma)
if lasso is not None:
unad_cov += lasso[0,0]
unad_len += lasso[1, 0]
unad_risk += lasso[2,0]
print("\n")
print("cov", unad_cov)
print("risk", unad_risk)
print("iteration completed", i)
print("\n")
print("unadjusted coverage, lengths and risk", unad_cov/niter, unad_len/niter, unad_risk/niter)
| [
"regreg.api.simple_problem",
"numpy.identity",
"numpy.random.standard_normal",
"numpy.sqrt",
"numpy.ones",
"regreg.api.identity_quadratic",
"numpy.power",
"numpy.sum",
"regreg.api.glm.gaussian",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.sign",
"numpy.vstack",
"numpy.random.seed",
"select... | [((439, 460), 'regreg.api.glm.gaussian', 'rr.glm.gaussian', (['X', 'y'], {}), '(X, y)\n', (454, 460), True, 'import regreg.api as rr\n'), ((852, 884), 'regreg.api.simple_problem', 'rr.simple_problem', (['loss', 'penalty'], {}), '(loss, penalty)\n', (869, 884), True, 'import regreg.api as rr\n'), ((903, 942), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['epsilon', '(0)', '(0)', '(0)'], {}), '(epsilon, 0, 0, 0)\n', (924, 942), True, 'import regreg.api as rr\n'), ((479, 489), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (486, 489), True, 'import numpy as np\n'), ((704, 714), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (711, 714), True, 'import numpy as np\n'), ((748, 760), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (757, 760), True, 'import numpy as np\n'), ((1109, 1123), 'numpy.sum', 'np.sum', (['active'], {}), '(active)\n', (1115, 1123), True, 'import numpy as np\n'), ((1707, 1721), 'numpy.sign', 'np.sign', (['betaE'], {}), '(betaE)\n', (1714, 1721), True, 'import numpy as np\n'), ((2692, 2709), 'numpy.zeros', 'np.zeros', (['nactive'], {}), '(nactive)\n', (2700, 2709), True, 'import numpy as np\n'), ((2732, 2749), 'numpy.zeros', 'np.zeros', (['nactive'], {}), '(nactive)\n', (2740, 2749), True, 'import numpy as np\n'), ((3301, 3350), 'numpy.vstack', 'np.vstack', (['[naive_cov, unad_len, bayes_risk_unad]'], {}), '([naive_cov, unad_len, bayes_risk_unad])\n', (3310, 3350), True, 'import numpy as np\n'), ((3588, 3605), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3602, 3605), True, 'import numpy as np\n'), ((3625, 3675), 'selection.reduced_optimization.initial_soln.instance', 'instance', ([], {'n': 'n', 'p': 'p', 's': '(0)', 'sigma': '(1.0)', 'rho': '(0)', 'snr': '(7.0)'}), '(n=n, p=p, s=0, sigma=1.0, rho=0, snr=7.0)\n', (3633, 3675), False, 'from selection.reduced_optimization.initial_soln import instance\n'), ((3722, 3739), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (3736, 3739), True, 'import numpy as np\n'), ((1674, 1684), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (1681, 1684), True, 'import numpy as np\n'), ((2078, 2092), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (2089, 2092), True, 'import numpy as np\n'), ((781, 793), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (790, 793), True, 'import numpy as np\n'), ((2284, 2302), 'numpy.linalg.inv', 'np.linalg.inv', (['M_1'], {}), '(M_1)\n', (2297, 2302), True, 'import numpy as np\n'), ((3234, 3269), 'numpy.power', 'np.power', (['(post_mean - true_val)', '(2.0)'], {}), '(post_mean - true_val, 2.0)\n', (3242, 3269), True, 'import numpy as np\n'), ((2390, 2408), 'numpy.linalg.inv', 'np.linalg.inv', (['M_1'], {}), '(M_1)\n', (2403, 2408), True, 'import numpy as np\n'), ((647, 684), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n, 10000)'], {}), '((n, 10000))\n', (672, 684), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#---------------------Import coordinate file-------------------------#
f_x = 'simple_bulk/img/subdataset1_geometry/x.txt'
f_l = 'simple_bulk/img/subdataset1_geometry/l.txt'
x = np.loadtxt(f_x, dtype = int)
l = np.loadtxt(f_l, dtype = int)
#-------------------Column Parameters---------------------------------#
L = 40 # length of column
w = 5 # width of column
#-------------------Generate Image-------------------------------------#
def img_gen(L,w,x,l,ii):
# L and w are coumn dimensions
# x and l are files to generate geometry
# ii is the column number to be generated
x = x
l = l
img = np.zeros((L,w), dtype = bool)
img[0,0:w] = 1
img[-1,0:w] = 1
for jj in range(1,L-1):
img[jj,x[ii][jj]-l[ii][jj]:x[ii][jj]+l[ii][jj]] = 1
return img
img = []
for ii in range(0,x.shape):
img.append(img_gen(L,w,x,l,ii)) #img ouput, save as array of images if want to convert to graph
img = np.asarray(img)
np.save('subdataset1/img/img.npy',img)
| [
"numpy.loadtxt",
"numpy.asarray",
"numpy.zeros",
"numpy.save"
] | [((237, 263), 'numpy.loadtxt', 'np.loadtxt', (['f_x'], {'dtype': 'int'}), '(f_x, dtype=int)\n', (247, 263), True, 'import numpy as np\n'), ((272, 298), 'numpy.loadtxt', 'np.loadtxt', (['f_l'], {'dtype': 'int'}), '(f_l, dtype=int)\n', (282, 298), True, 'import numpy as np\n'), ((1007, 1022), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1017, 1022), True, 'import numpy as np\n'), ((1026, 1065), 'numpy.save', 'np.save', (['"""subdataset1/img/img.npy"""', 'img'], {}), "('subdataset1/img/img.npy', img)\n", (1033, 1065), True, 'import numpy as np\n'), ((679, 707), 'numpy.zeros', 'np.zeros', (['(L, w)'], {'dtype': 'bool'}), '((L, w), dtype=bool)\n', (687, 707), True, 'import numpy as np\n')] |
# https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python
# https://www.udemy.com/deep-reinforcement-learning-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import gym
import os
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go
# so you can test different architectures
class HiddenLayer:
def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True, zeros=False):
if zeros:
W = np.zeros((M1, M2), dtype=np.float32)
else:
W = tf.random.normal(shape=(M1, M2)) * np.sqrt(2. / M1, dtype=np.float32)
self.W = tf.Variable(W)
self.use_bias = use_bias
if use_bias:
self.b = tf.Variable(np.zeros(M2).astype(np.float32))
self.f = f
def forward(self, X):
if self.use_bias:
a = tf.matmul(X, self.W) + self.b
else:
a = tf.matmul(X, self.W)
return self.f(a)
# approximates pi(a | s)
class PolicyModel:
def __init__(self, D, ft, hidden_layer_sizes=[]):
self.ft = ft
##### hidden layers #####
M1 = D
self.hidden_layers = []
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.hidden_layers.append(layer)
M1 = M2
# final layer mean
self.mean_layer = HiddenLayer(M1, 1, lambda x: x, use_bias=False, zeros=True)
# final layer variance
self.stdv_layer = HiddenLayer(M1, 1, tf.nn.softplus, use_bias=False, zeros=False)
# inputs and targets
self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, D), name='X')
self.actions = tf.compat.v1.placeholder(tf.float32, shape=(None,), name='actions')
self.advantages = tf.compat.v1.placeholder(tf.float32, shape=(None,), name='advantages')
# get final hidden layer
Z = self.X
for layer in self.hidden_layers:
Z = layer.forward(Z)
# calculate output and cost
mean = self.mean_layer.forward(Z)
stdv = self.stdv_layer.forward(Z) + 1e-5 # smoothing
# make them 1-D
mean = tf.reshape(mean, [-1])
stdv = tf.reshape(stdv, [-1])
norm = tf.contrib.distributions.Normal(mean, stdv)
self.predict_op = tf.clip_by_value(norm.sample(), -1, 1)
log_probs = norm.log_prob(self.actions)
cost = -tf.reduce_sum(input_tensor=self.advantages * log_probs + 0.1*norm.entropy())
self.train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(cost)
def set_session(self, session):
self.session = session
def partial_fit(self, X, actions, advantages):
X = np.atleast_2d(X)
X = self.ft.transform(X)
actions = np.atleast_1d(actions)
advantages = np.atleast_1d(advantages)
self.session.run(
self.train_op,
feed_dict={
self.X: X,
self.actions: actions,
self.advantages: advantages,
}
)
def predict(self, X):
X = np.atleast_2d(X)
X = self.ft.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def sample_action(self, X):
p = self.predict(X)[0]
return p
# approximates V(s)
class ValueModel:
def __init__(self, D, ft, hidden_layer_sizes=[]):
self.ft = ft
self.costs = []
# create the graph
self.layers = []
M1 = D
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, lambda x: x)
self.layers.append(layer)
# inputs and targets
self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, D), name='X')
self.Y = tf.compat.v1.placeholder(tf.float32, shape=(None,), name='Y')
# calculate output and cost
Z = self.X
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = tf.reshape(Z, [-1]) # the output
self.predict_op = Y_hat
cost = tf.reduce_sum(input_tensor=tf.square(self.Y - Y_hat))
self.cost = cost
self.train_op = tf.compat.v1.train.AdamOptimizer(1e-1).minimize(cost)
def set_session(self, session):
self.session = session
def partial_fit(self, X, Y):
X = np.atleast_2d(X)
X = self.ft.transform(X)
Y = np.atleast_1d(Y)
self.session.run(self.train_op, feed_dict={self.X: X, self.Y: Y})
cost = self.session.run(self.cost, feed_dict={self.X: X, self.Y: Y})
self.costs.append(cost)
def predict(self, X):
X = np.atleast_2d(X)
X = self.ft.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def play_one_td(env, pmodel, vmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
prev_observation = observation
observation, reward, done, info = env.step([action])
totalreward += reward
# update the models
V_next = vmodel.predict(observation)
G = reward + gamma*V_next
advantage = G - vmodel.predict(prev_observation)
pmodel.partial_fit(prev_observation, action, advantage)
vmodel.partial_fit(prev_observation, G)
iters += 1
return totalreward, iters
def main():
env = gym.make('MountainCarContinuous-v0')
ft = FeatureTransformer(env, n_components=100)
D = ft.dimensions
pmodel = PolicyModel(D, ft, [])
vmodel = ValueModel(D, ft, [])
init = tf.compat.v1.global_variables_initializer()
session = tf.compat.v1.InteractiveSession()
session.run(init)
pmodel.set_session(session)
vmodel.set_session(session)
gamma = 0.95
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 50
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
totalreward, num_steps = play_one_td(env, pmodel, vmodel, gamma)
totalrewards[n] = totalreward
if n % 1 == 0:
print("episode:", n, "total reward: %.1f" % totalreward, "num steps: %d" % num_steps, "avg reward (last 100): %.1f" % totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, vmodel)
if __name__ == '__main__':
main()
| [
"numpy.sqrt",
"tensorflow.contrib.distributions.Normal",
"tensorflow.compat.v1.train.AdamOptimizer",
"builtins.range",
"gym.wrappers.Monitor",
"gym.make",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"q_learning.FeatureTransformer",
"numpy.atleast_2d",
... | [((5357, 5393), 'gym.make', 'gym.make', (['"""MountainCarContinuous-v0"""'], {}), "('MountainCarContinuous-v0')\n", (5365, 5393), False, 'import gym\n'), ((5401, 5442), 'q_learning.FeatureTransformer', 'FeatureTransformer', (['env'], {'n_components': '(100)'}), '(env, n_components=100)\n', (5419, 5442), False, 'from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go\n'), ((5539, 5582), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (5580, 5582), True, 'import tensorflow as tf\n'), ((5595, 5628), 'tensorflow.compat.v1.InteractiveSession', 'tf.compat.v1.InteractiveSession', ([], {}), '()\n', (5626, 5628), True, 'import tensorflow as tf\n'), ((5943, 5954), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (5951, 5954), True, 'import numpy as np\n'), ((5965, 5976), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (5973, 5976), True, 'import numpy as np\n'), ((5988, 5996), 'builtins.range', 'range', (['N'], {}), '(N)\n', (5993, 5996), False, 'from builtins import range\n'), ((6363, 6385), 'matplotlib.pyplot.plot', 'plt.plot', (['totalrewards'], {}), '(totalrewards)\n', (6371, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6388, 6408), 'matplotlib.pyplot.title', 'plt.title', (['"""Rewards"""'], {}), "('Rewards')\n", (6397, 6408), True, 'import matplotlib.pyplot as plt\n'), ((6411, 6421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6419, 6421), True, 'import matplotlib.pyplot as plt\n'), ((6425, 6455), 'q_learning.plot_running_avg', 'plot_running_avg', (['totalrewards'], {}), '(totalrewards)\n', (6441, 6455), False, 'from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go\n'), ((6458, 6486), 'q_learning.plot_cost_to_go', 'plot_cost_to_go', (['env', 'vmodel'], {}), '(env, vmodel)\n', (6473, 6486), False, 'from q_learning import plot_running_avg, FeatureTransformer, plot_cost_to_go\n'), ((833, 847), 'tensorflow.Variable', 'tf.Variable', (['W'], {}), '(W)\n', (844, 847), True, 'import tensorflow as tf\n'), ((1685, 1748), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None, D)', 'name': '"""X"""'}), "(tf.float32, shape=(None, D), name='X')\n", (1709, 1748), True, 'import tensorflow as tf\n'), ((1768, 1835), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None,)', 'name': '"""actions"""'}), "(tf.float32, shape=(None,), name='actions')\n", (1792, 1835), True, 'import tensorflow as tf\n'), ((1858, 1928), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None,)', 'name': '"""advantages"""'}), "(tf.float32, shape=(None,), name='advantages')\n", (1882, 1928), True, 'import tensorflow as tf\n'), ((2198, 2220), 'tensorflow.reshape', 'tf.reshape', (['mean', '[-1]'], {}), '(mean, [-1])\n', (2208, 2220), True, 'import tensorflow as tf\n'), ((2232, 2254), 'tensorflow.reshape', 'tf.reshape', (['stdv', '[-1]'], {}), '(stdv, [-1])\n', (2242, 2254), True, 'import tensorflow as tf\n'), ((2268, 2311), 'tensorflow.contrib.distributions.Normal', 'tf.contrib.distributions.Normal', (['mean', 'stdv'], {}), '(mean, stdv)\n', (2299, 2311), True, 'import tensorflow as tf\n'), ((2701, 2717), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (2714, 2717), True, 'import numpy as np\n'), ((2766, 2788), 'numpy.atleast_1d', 'np.atleast_1d', (['actions'], {}), '(actions)\n', (2779, 2788), True, 'import numpy as np\n'), ((2806, 2831), 'numpy.atleast_1d', 'np.atleast_1d', (['advantages'], {}), '(advantages)\n', (2819, 2831), True, 'import numpy as np\n'), ((3027, 3043), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (3040, 3043), True, 'import numpy as np\n'), ((3643, 3706), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None, D)', 'name': '"""X"""'}), "(tf.float32, shape=(None, D), name='X')\n", (3667, 3706), True, 'import tensorflow as tf\n'), ((3720, 3781), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '(None,)', 'name': '"""Y"""'}), "(tf.float32, shape=(None,), name='Y')\n", (3744, 3781), True, 'import tensorflow as tf\n'), ((3899, 3918), 'tensorflow.reshape', 'tf.reshape', (['Z', '[-1]'], {}), '(Z, [-1])\n', (3909, 3918), True, 'import tensorflow as tf\n'), ((4223, 4239), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (4236, 4239), True, 'import numpy as np\n'), ((4277, 4293), 'numpy.atleast_1d', 'np.atleast_1d', (['Y'], {}), '(Y)\n', (4290, 4293), True, 'import numpy as np\n'), ((4498, 4514), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (4511, 4514), True, 'import numpy as np\n'), ((5881, 5915), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'monitor_dir'], {}), '(env, monitor_dir)\n', (5897, 5915), False, 'from gym import wrappers\n'), ((693, 729), 'numpy.zeros', 'np.zeros', (['(M1, M2)'], {'dtype': 'np.float32'}), '((M1, M2), dtype=np.float32)\n', (701, 729), True, 'import numpy as np\n'), ((1078, 1098), 'tensorflow.matmul', 'tf.matmul', (['X', 'self.W'], {}), '(X, self.W)\n', (1087, 1098), True, 'import tensorflow as tf\n'), ((750, 782), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '(M1, M2)'}), '(shape=(M1, M2))\n', (766, 782), True, 'import tensorflow as tf\n'), ((785, 820), 'numpy.sqrt', 'np.sqrt', (['(2.0 / M1)'], {'dtype': 'np.float32'}), '(2.0 / M1, dtype=np.float32)\n', (792, 820), True, 'import numpy as np\n'), ((1028, 1048), 'tensorflow.matmul', 'tf.matmul', (['X', 'self.W'], {}), '(X, self.W)\n', (1037, 1048), True, 'import tensorflow as tf\n'), ((2527, 2566), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (2559, 2566), True, 'import tensorflow as tf\n'), ((3999, 4024), 'tensorflow.square', 'tf.square', (['(self.Y - Y_hat)'], {}), '(self.Y - Y_hat)\n', (4008, 4024), True, 'import tensorflow as tf\n'), ((4067, 4104), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['(0.1)'], {}), '(0.1)\n', (4099, 4104), True, 'import tensorflow as tf\n'), ((5855, 5869), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5867, 5869), False, 'from datetime import datetime\n'), ((5768, 5794), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (5784, 5794), False, 'import os\n'), ((922, 934), 'numpy.zeros', 'np.zeros', (['M2'], {}), '(M2)\n', (930, 934), True, 'import numpy as np\n')] |
"""
PipelineLoader accepting a DataFrame as input.
"""
from functools import partial
from interface import implements
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import as_column
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(implements(PipelineLoader)):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column)
| [
"pandas.DatetimeIndex",
"zipline.utils.numpy_utils.as_column",
"numpy.ix_",
"pandas.Index",
"functools.partial",
"interface.implements",
"pandas.Int64Index"
] | [((464, 535), 'pandas.Index', 'Index', (["['sid', 'value', 'kind', 'start_date', 'end_date', 'apply_date']"], {}), "(['sid', 'value', 'kind', 'start_date', 'end_date', 'apply_date'])\n", (469, 535), False, 'from pandas import DataFrame, DatetimeIndex, Index, Int64Index\n'), ((587, 613), 'interface.implements', 'implements', (['PipelineLoader'], {}), '(PipelineLoader)\n', (597, 613), False, 'from interface import implements\n'), ((2438, 2475), 'pandas.DatetimeIndex', 'DatetimeIndex', (['adjustments.apply_date'], {}), '(adjustments.apply_date)\n', (2451, 2475), False, 'from pandas import DataFrame, DatetimeIndex, Index, Int64Index\n'), ((2512, 2547), 'pandas.DatetimeIndex', 'DatetimeIndex', (['adjustments.end_date'], {}), '(adjustments.end_date)\n', (2525, 2547), False, 'from pandas import DataFrame, DatetimeIndex, Index, Int64Index\n'), ((2579, 2606), 'pandas.Int64Index', 'Int64Index', (['adjustments.sid'], {}), '(adjustments.sid)\n', (2589, 2606), False, 'from pandas import DataFrame, DatetimeIndex, Index, Int64Index\n'), ((3196, 3247), 'functools.partial', 'partial', (['make_adjustment_from_labels', 'dates', 'assets'], {}), '(make_adjustment_from_labels, dates, assets)\n', (3203, 3247), False, 'from functools import partial\n'), ((5907, 5940), 'numpy.ix_', 'ix_', (['date_indexer', 'assets_indexer'], {}), '(date_indexer, assets_indexer)\n', (5910, 5940), False, 'from numpy import ix_, zeros\n'), ((5972, 5993), 'zipline.utils.numpy_utils.as_column', 'as_column', (['good_dates'], {}), '(good_dates)\n', (5981, 5993), False, 'from zipline.utils.numpy_utils import as_column\n'), ((2062, 2079), 'pandas.DatetimeIndex', 'DatetimeIndex', (['[]'], {}), '([])\n', (2075, 2079), False, 'from pandas import DataFrame, DatetimeIndex, Index, Int64Index\n')] |
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import time
import numpy as np
import tensorflow as tf
from easy_rec.python.input.input import Input
from easy_rec.python.utils import odps_util
from easy_rec.python.utils.tf_utils import get_tf_type
try:
import common_io
except Exception:
common_io = None
try:
from datahub import DataHub
from datahub.exceptions import DatahubException
from datahub.models import RecordType
from datahub.models import CursorType
except Exception:
logging.warning(
'DataHub is not installed. You can install it by: pip install pydatahub')
DataHub = None
class DataHubInput(Input):
"""Common IO based interface, could run at local or on data science."""
def __init__(self,
data_config,
feature_config,
datahub_config,
task_index=0,
task_num=1,
check_mode=False):
super(DataHubInput, self).__init__(data_config, feature_config, '',
task_index, task_num, check_mode)
if DataHub is None:
logging.error('please install datahub: ',
'pip install pydatahub ;Python 3.6 recommended')
try:
self._datahub_config = datahub_config
if self._datahub_config is None:
pass
self._datahub = DataHub(self._datahub_config.akId,
self._datahub_config.akSecret,
self._datahub_config.region)
self._num_epoch = 0
except Exception as ex:
logging.info('exception in init datahub:', str(ex))
pass
def _parse_record(self, *fields):
fields = list(fields)
inputs = {self._input_fields[x]: fields[x] for x in self._effective_fids}
for x in self._label_fids:
inputs[self._input_fields[x]] = fields[x]
return inputs
def _datahub_generator(self):
logging.info('start epoch[%d]' % self._num_epoch)
self._num_epoch += 1
odps_util.check_input_field_and_types(self._data_config)
record_defaults = [
self.get_type_defaults(x, v)
for x, v in zip(self._input_field_types, self._input_field_defaults)
]
batch_defaults = [
np.array([x] * self._data_config.batch_size) for x in record_defaults
]
try:
self._datahub.wait_shards_ready(self._datahub_config.project,
self._datahub_config.topic)
topic_result = self._datahub.get_topic(self._datahub_config.project,
self._datahub_config.topic)
if topic_result.record_type != RecordType.TUPLE:
logging.error('topic type illegal !')
record_schema = topic_result.record_schema
shard_result = self._datahub.list_shard(self._datahub_config.project,
self._datahub_config.topic)
shards = shard_result.shards
for shard in shards:
shard_id = shard._shard_id
cursor_result = self._datahub.get_cursor(self._datahub_config.project,
self._datahub_config.topic,
shard_id, CursorType.OLDEST)
cursor = cursor_result.cursor
limit = self._data_config.batch_size
while True:
get_result = self._datahub.get_tuple_records(
self._datahub_config.project, self._datahub_config.topic,
shard_id, record_schema, cursor, limit)
batch_data_np = [x.copy() for x in batch_defaults]
for row_id, record in enumerate(get_result.records):
for col_id in range(len(record_defaults)):
if record.values[col_id] not in ['', 'Null', None]:
batch_data_np[col_id][row_id] = record.values[col_id]
yield tuple(batch_data_np)
if 0 == get_result.record_count:
time.sleep(1)
cursor = get_result.next_cursor
except DatahubException as e:
logging.error(e)
def _build(self, mode, params):
# get input type
list_type = [get_tf_type(x) for x in self._input_field_types]
list_type = tuple(list_type)
list_shapes = [tf.TensorShape([None]) for x in range(0, len(list_type))]
list_shapes = tuple(list_shapes)
# read datahub
dataset = tf.data.Dataset.from_generator(
self._datahub_generator,
output_types=list_type,
output_shapes=list_shapes)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(
self._data_config.shuffle_buffer_size,
seed=2020,
reshuffle_each_iteration=True)
dataset = dataset.repeat(self.num_epochs)
else:
dataset = dataset.repeat(1)
dataset = dataset.map(
self._parse_record,
num_parallel_calls=self._data_config.num_parallel_calls)
# preprocess is necessary to transform data
# so that they could be feed into FeatureColumns
dataset = dataset.map(
map_func=self._preprocess,
num_parallel_calls=self._data_config.num_parallel_calls)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
if mode != tf.estimator.ModeKeys.PREDICT:
dataset = dataset.map(lambda x:
(self._get_features(x), self._get_labels(x)))
else:
dataset = dataset.map(lambda x: (self._get_features(x)))
return dataset
| [
"datahub.DataHub",
"easy_rec.python.utils.tf_utils.get_tf_type",
"logging.warning",
"tensorflow.data.Dataset.from_generator",
"logging.info",
"time.sleep",
"numpy.array",
"easy_rec.python.utils.odps_util.check_input_field_and_types",
"logging.error",
"tensorflow.TensorShape"
] | [((539, 633), 'logging.warning', 'logging.warning', (['"""DataHub is not installed. You can install it by: pip install pydatahub"""'], {}), "(\n 'DataHub is not installed. You can install it by: pip install pydatahub')\n", (554, 633), False, 'import logging\n'), ((1924, 1973), 'logging.info', 'logging.info', (["('start epoch[%d]' % self._num_epoch)"], {}), "('start epoch[%d]' % self._num_epoch)\n", (1936, 1973), False, 'import logging\n'), ((2003, 2059), 'easy_rec.python.utils.odps_util.check_input_field_and_types', 'odps_util.check_input_field_and_types', (['self._data_config'], {}), '(self._data_config)\n', (2040, 2059), False, 'from easy_rec.python.utils import odps_util\n'), ((4340, 4451), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['self._datahub_generator'], {'output_types': 'list_type', 'output_shapes': 'list_shapes'}), '(self._datahub_generator, output_types=\n list_type, output_shapes=list_shapes)\n', (4370, 4451), True, 'import tensorflow as tf\n'), ((1133, 1227), 'logging.error', 'logging.error', (['"""please install datahub: """', '"""pip install pydatahub ;Python 3.6 recommended"""'], {}), "('please install datahub: ',\n 'pip install pydatahub ;Python 3.6 recommended')\n", (1146, 1227), False, 'import logging\n'), ((1371, 1470), 'datahub.DataHub', 'DataHub', (['self._datahub_config.akId', 'self._datahub_config.akSecret', 'self._datahub_config.region'], {}), '(self._datahub_config.akId, self._datahub_config.akSecret, self.\n _datahub_config.region)\n', (1378, 1470), False, 'from datahub import DataHub\n'), ((2235, 2279), 'numpy.array', 'np.array', (['([x] * self._data_config.batch_size)'], {}), '([x] * self._data_config.batch_size)\n', (2243, 2279), True, 'import numpy as np\n'), ((4111, 4125), 'easy_rec.python.utils.tf_utils.get_tf_type', 'get_tf_type', (['x'], {}), '(x)\n', (4122, 4125), False, 'from easy_rec.python.utils.tf_utils import get_tf_type\n'), ((4212, 4234), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (4226, 4234), True, 'import tensorflow as tf\n'), ((2665, 2702), 'logging.error', 'logging.error', (['"""topic type illegal !"""'], {}), "('topic type illegal !')\n", (2678, 2702), False, 'import logging\n'), ((4021, 4037), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (4034, 4037), False, 'import logging\n'), ((3925, 3938), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3935, 3938), False, 'import time\n')] |
# This should work for all COCO/ LVIS style annotation files.
# Annotations are stored per instance. We need to construct a list of numpy arrays per image.
################################################################################
## Import packages. ##
################################################################################
import numpy as np
import json
import pickle
import ipdb
import mmcv
################################################################################
## Define things. ##
################################################################################
# GT_FILE = "/scratch/cluster/ishann/data/lvis/annotations/lvis_val_100.json"
# PROP_FILE = "/scratch/cluster/ishann/data/lvis/proposals/lvis_val_100_props_gt.pkl"
GT_FILE = "/scratch/cluster/ishann/data/lvis/annotations/lvis_v0.5_train.json"
PROP_FILE = "/scratch/cluster/ishann/data/lvis/proposals/lvis_v0.5_train_props_gt.pkl"
EMPTY_BOX = [1.0, 1.0, 2.0, 2.0]
################################################################################
## Utility for converting boxes from COCO/ LVIS format to RPN format. ##
################################################################################
def _coco_box_to_bbox(box):
"""
Get cv2 compatible bbox from COCO compatible bbox.
"""
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.int32)
return bbox
################################################################################
## Load data, initialize structures, etc. ##
################################################################################
print("\nFetching annotations.")
with open(GT_FILE, "rb") as file:
gt = json.load(file)
anns = gt['annotations']
print("\nFetched annotations.")
img_ids = [gtimg['id'] for gtimg in gt['images']]
img_file_names = [gtimg['file_name'] for gtimg in gt['images']]
proposals = [np.empty((0, 4), dtype=np.float32) for _ in img_ids]
# ipdb.set_trace()
prog_bar = mmcv.ProgressBar(len(anns))
# Loop over all per instance annotations
for idx, ann in enumerate(anns):
# use ann['image_id'] to figure out which index it lies at in in img_ids
img_idx = img_ids.index(ann['image_id'])
bbox = _coco_box_to_bbox(ann['bbox'])
bbox = np.expand_dims(np.asarray(bbox), axis=0)
curr_proposals = proposals[img_idx]
upd_proposals = np.concatenate((curr_proposals, bbox), axis=0)
proposals[img_idx] = upd_proposals
prog_bar.update()
# Add dummy box each image that does not have any ground truth proposals.
count = 0
for idx, p in enumerate(proposals):
if p.shape[0]==0:
# print(idx)
count += 1
bbox = np.expand_dims(np.asarray(EMPTY_BOX), axis=0)
curr_proposals = proposals[idx]
upd_proposals = np.concatenate((curr_proposals, bbox), axis=0)
proposals[idx] = upd_proposals
print("\nDummy boxes added to {} images.".format(count))
# Convert proposals to float32 arrays. Ready to publish.
proposals = [np.float32(props) for props in proposals]
# Publish.
with open(PROP_FILE, 'wb') as handle:
pickle.dump(proposals, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("File written to: {}".format(PROP_FILE))
| [
"pickle.dump",
"numpy.asarray",
"numpy.array",
"numpy.empty",
"numpy.concatenate",
"json.load",
"numpy.float32"
] | [((1437, 1513), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.int32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.int32)\n', (1445, 1513), True, 'import numpy as np\n'), ((1873, 1888), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1882, 1888), False, 'import json\n'), ((2078, 2112), 'numpy.empty', 'np.empty', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (2086, 2112), True, 'import numpy as np\n'), ((2545, 2591), 'numpy.concatenate', 'np.concatenate', (['(curr_proposals, bbox)'], {'axis': '(0)'}), '((curr_proposals, bbox), axis=0)\n', (2559, 2591), True, 'import numpy as np\n'), ((3179, 3196), 'numpy.float32', 'np.float32', (['props'], {}), '(props)\n', (3189, 3196), True, 'import numpy as np\n'), ((3275, 3339), 'pickle.dump', 'pickle.dump', (['proposals', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(proposals, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (3286, 3339), False, 'import pickle\n'), ((2457, 2473), 'numpy.asarray', 'np.asarray', (['bbox'], {}), '(bbox)\n', (2467, 2473), True, 'import numpy as np\n'), ((2964, 3010), 'numpy.concatenate', 'np.concatenate', (['(curr_proposals, bbox)'], {'axis': '(0)'}), '((curr_proposals, bbox), axis=0)\n', (2978, 3010), True, 'import numpy as np\n'), ((2869, 2890), 'numpy.asarray', 'np.asarray', (['EMPTY_BOX'], {}), '(EMPTY_BOX)\n', (2879, 2890), True, 'import numpy as np\n')] |
# Python codes for multimodal classification models, this file will be updated later
from nltk.tokenize import sent_tokenize
import os
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
import pandas as pd
import bert
import numpy as np
from scipy import stats
print("\n**************************************************************************************************")
print("loading acoustic and visual features")
# this part is for loading features from audio and video modalities
print("Done!")
print("\n**************************************************************************************************")
print("loading textual data...")
print("create tokenizer...")
def createTokenizer():
currentDir = os.path.dirname(os.path.realpath(__file__))
modelsFolder = os.path.join(currentDir, "models", "uncased_L-12_H-768_A-12")
vocab_file = os.path.join(modelsFolder, "vocab.txt")
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case=True)
return tokenizer
tokenizer = createTokenizer()
data_list = pd.read_csv('./csv_data/data/data.csv', delimiter='\t', header=None).values.tolist()
utterance_tokens = []
pair_tokens = []
for sentences in data_list:
sentence_list = sent_tokenize(sentences[0])
sentences_tokens = []
for sentence in sentence_list:
words = tokenizer.tokenize(sentence)
words.append('[SEP]')
sentences_tokens += words
sentences_tokens += ['[EOT]']
utterance_tokens.append(sentences_tokens)
for token_index in range(len(utterance_tokens)-1):
current_token = utterance_tokens[token_index]
next_token = utterance_tokens[token_index+1]
turn_token = ['[CLS]'] + current_token + next_token
turn_token = turn_token[:-1]
pair_tokens.append(turn_token)
print("Done!")
print("\n**************************************************************************************************")
print("loading impasse labels...")
label_list = pd.read_csv('./csv_data/label/label.csv', delimiter='\t', header=None).values.tolist()
print("The length of the label list is: ", len(label_list))
print("Done!")
print("\n**************************************************************************************************")
print("create bert layer...")
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import RandomUnderSampler
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
MAX_SEQ_LEN = 40
def createBertLayer(max_seq_length):
global bert_layer
currentDir = os.path.dirname(os.path.realpath(__file__))
bertDir = os.path.join(currentDir, "models", "uncased_L-12_H-768_A-12")
bert_params = bert.params_from_pretrained_ckpt(bertDir)
bert_layer = bert.BertModelLayer.from_params(bert_params, name="bert")
model_layer = tf.keras.Sequential([
tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32', name='input_ids'),
bert_layer
])
model_layer.build(input_shape=(None, max_seq_length))
bert_layer.apply_adapter_freeze() # use this to use pre-trained BERT weights; otherwise the model will train the BERT model from the scratch
# bert_layer.trainable = False
createBertLayer(MAX_SEQ_LEN)
def loadBertCheckpoint():
currentDir = os.path.dirname(os.path.realpath(__file__))
bertDir = os.path.join(currentDir, "models", "uncased_L-12_H-768_A-12")
checkpointName = os.path.join(bertDir, "bert_model.ckpt")
bert.load_stock_weights(bert_layer, checkpointName)
loadBertCheckpoint()
print("done!")
print("\n**************************************************************************************************")
print("create model...")
use_language_model = False
use_acoustic_model = False
use_visual_model = False
use_language_acoustic_model = False
use_language_visual_model = False
use_acoustic_visual_model = True
use_triple_joint_model = False
Average_Time_Interval = 7
MAX_ACOUSTIC_LEN = Average_Time_Interval*128 # for audios we have 128 dimensional vector for each second, we set the average time is 7 seconds
MAX_VISUAL_LEN = Average_Time_Interval*82 # for video we have 82 dimensional vector for each second, was 76 before
def createModel():
global model
if use_language_model:
print("Use language model!")
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids'),
bert_layer,
tf.keras.layers.BatchNormalization(momentum=0.99),
tf.keras.layers.Lambda(lambda x: x[:, 0, :]),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.build(input_shape=(None, MAX_SEQ_LEN))
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_acoustic_model:
print("Use acoustic model!")
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(MAX_ACOUSTIC_LEN,), dtype='int32', name='input_ids'),
tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.build(input_shape=(None, MAX_ACOUSTIC_LEN))
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_visual_model:
print("Use visual model!")
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(MAX_VISUAL_LEN,), dtype='int32', name='input_ids'),
tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.build(input_shape=(None, MAX_VISUAL_LEN))
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_language_acoustic_model:
print("Use language and acoustic model!")
inputA = tf.keras.layers.Input(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids') # textual input
inputB = tf.keras.layers.Input(shape=(MAX_ACOUSTIC_LEN,)) # acoustic input
A = bert_layer(inputA)
A = tf.keras.layers.BatchNormalization(momentum=0.99)(A)
A = tf.keras.layers.Lambda(lambda x: x[:, 0, :])(
A) # We are only only interested in BERT’s output for the [CLS] token, so here select that slice of the cube and discard everything else.
A = tf.keras.Model(inputs=inputA, outputs=A)
Combined = tf.keras.layers.concatenate([A.output, inputB])
Combined = tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(Combined)
model = tf.keras.Model(inputs=[A.input, inputB], outputs=Combined)
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_language_visual_model:
print("Use language and visual model!")
inputA = tf.keras.layers.Input(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids') # textual input
inputC = tf.keras.layers.Input(shape=(MAX_VISUAL_LEN,)) # visual input
A = bert_layer(inputA)
A = tf.keras.layers.BatchNormalization(momentum=0.99)(A)
A = tf.keras.layers.Lambda(lambda x: x[:, 0, :])(
A) # We are only only interested in BERT’s output for the [CLS] token, so here select that slice of the cube and discard everything else.
A = tf.keras.Model(inputs=inputA, outputs=A)
Combined = tf.keras.layers.concatenate([A.output, inputC])
Combined = tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(Combined)
model = tf.keras.Model(inputs=[A.input, inputC], outputs=Combined)
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_acoustic_visual_model:
inputB = tf.keras.layers.Input(shape=(MAX_ACOUSTIC_LEN,)) # acoustic input
inputC = tf.keras.layers.Input(shape=(MAX_VISUAL_LEN,)) # visual input
Combined = tf.keras.layers.concatenate([inputB, inputC])
Combined = tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(Combined)
model = tf.keras.Model(inputs=[inputB, inputC], outputs=Combined)
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
elif use_triple_joint_model:
print("Use language, acoustic and visual triple joint model!")
inputA = tf.keras.layers.Input(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids') # textual input
inputB = tf.keras.layers.Input(shape=(MAX_ACOUSTIC_LEN,)) # acoustic input
inputC = tf.keras.layers.Input(shape=(MAX_VISUAL_LEN,)) # visual input
A = bert_layer(inputA)
A = tf.keras.layers.BatchNormalization(momentum=0.99)(A)
A = tf.keras.layers.Lambda(lambda x: x[:, 0, :])(A) # We are only only interested in BERT’s output for the [CLS] token, so here select that slice of the cube and discard everything else.
A = tf.keras.Model(inputs=inputA, outputs=A)
Combined = tf.keras.layers.concatenate([A.output, inputB, inputC])
Combined = tf.keras.layers.Dense(768, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(128, activation=tf.nn.leaky_relu)(Combined)
Combined = tf.keras.layers.Dropout(0.4)(Combined)
Combined = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(Combined)
model = tf.keras.Model(inputs=[A.input, inputB, inputC], outputs=Combined)
model.compile(loss='binary_crossentropy', optimizer=tf.optimizers.Adam(lr=0.00001), metrics=['accuracy'])
print(model.summary())
else:
print("ERROR! Please select a model type to build!")
createModel()
print("done!")
print("\n**************************************************************************************************")
print("preparing training and testing data...")
tokens_ids = [tokenizer.convert_tokens_to_ids(token) for token in pair_tokens]
textual_token_ids = pad_sequences(tokens_ids, maxlen=MAX_SEQ_LEN, dtype="long", truncating="post", padding="post")
text_train, text_vali, audio_train, audio_vali, video_train, video_vali, label_train, label_vali = train_test_split(textual_token_ids, acoustic_feature_lists, visual_feature_lists, label_list, test_size=0.2)
text_train = np.array(text_train)
text_vali = np.array(text_vali)
audio_train = np.array(audio_train)
audio_vali = np.array(audio_vali)
video_train = np.array(video_train)
video_vali = np.array(video_vali)
label_train = np.array(label_train)
label_vali = np.array(label_vali)
print("done!")
print("\n**************************************************************************************************")
print("start training...")
if use_language_model:
print("Use language model!")
history = model.fit(
text_train,
label_train,
batch_size=16,
epochs=10,
validation_data=(text_vali, label_vali),
verbose=1
)
elif use_acoustic_model:
print("Use acoustic model!")
history = model.fit(
audio_train,
label_train,
batch_size=16,
epochs=10,
validation_data=(audio_vali, label_vali),
verbose=1
)
elif use_visual_model:
print("Use visual model!")
history = model.fit(
video_train,
label_train,
batch_size=16,
epochs=10,
validation_data=(video_vali, label_vali),
verbose=1
)
elif use_language_acoustic_model:
print("Use language and acoustic model!")
history = model.fit(
[text_train, audio_train],
label_train,
batch_size=16,
epochs=10,
validation_data=([text_vali, audio_vali], label_vali),
verbose=1
)
elif use_acoustic_visual_model:
print("Use language and acoustic model!")
history = model.fit(
[audio_train, video_train],
label_train,
batch_size=16,
epochs=10,
validation_data=([audio_vali, video_vali], label_vali),
verbose=1
)
elif use_language_visual_model:
print("Use language and visual model!")
history = model.fit(
[text_train, video_train],
label_train,
batch_size=16,
epochs=10,
validation_data=([text_vali, video_vali], label_vali),
verbose=1
)
elif use_triple_joint_model:
print("Use language, acoustic and visual triple joint model!")
history = model.fit(
[text_train, audio_train, video_train],
label_train,
batch_size=16,
epochs=30,
validation_data=([text_vali, audio_vali, video_vali], label_vali),
verbose=1
)
else:
print("ERROR! Please select a model type to train!")
print("Done!")
print("\n**************************************************************************************************")
print("Curves plotting...")
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print("\n**************************************************************************************************")
print("confusion matrix on training set...")
if use_language_model:
y_pred = model.predict(text_train,verbose=0)
elif use_acoustic_model:
y_pred = model.predict(audio_train,verbose=0)
elif use_visual_model:
y_pred = model.predict(video_train,verbose=0)
elif use_language_acoustic_model:
y_pred = model.predict([text_train, audio_train],verbose=0)
elif use_language_visual_model:
y_pred = model.predict([text_train, video_train],verbose=0)
elif use_triple_joint_model:
y_pred = model.predict([text_train, audio_train, video_train],verbose=0)
elif use_acoustic_visual_model:
y_pred = model.predict([audio_train, video_train], verbose=0)
else:
print("ERROR! Please select a model type to predict!")
y_pred_list = []
for pred_index in range(len(y_pred)):
if y_pred[pred_index][0] < 0.5:
y_pred_list.append(0)
elif y_pred[pred_index][0] > 0.5:
y_pred_list.append(1)
else:
print("ERROR! pred probability equals to 0.5!")
print(classification_report(label_train, y_pred_list))
print("\n**************************************************************************************************")
print("confusion matrix on test set...")
if use_language_model:
y_pred = model.predict(text_vali,verbose=0)
elif use_acoustic_model:
y_pred = model.predict(audio_vali,verbose=0)
elif use_visual_model:
y_pred = model.predict(video_vali,verbose=0)
elif use_language_acoustic_model:
y_pred = model.predict([text_vali, audio_vali],verbose=0)
elif use_language_visual_model:
y_pred = model.predict([text_vali, video_vali],verbose=0)
elif use_triple_joint_model:
y_pred = model.predict([text_vali, audio_vali, video_vali],verbose=0)
elif use_acoustic_visual_model:
y_pred = model.predict([audio_vali, video_vali], verbose=0)
else:
print("ERROR! Please select a model type to predict!")
y_pred_list = []
for pred_index in range(len(y_pred)):
if y_pred[pred_index][0] < 0.5:
y_pred_list.append(0)
elif y_pred[pred_index][0] > 0.5:
y_pred_list.append(1)
else:
print("ERROR! pred probability equals to 0.5!")
print(classification_report(label_vali, y_pred_list)) | [
"pandas.read_csv",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"tensorflow.keras.layers.Dense",
"nltk.tokenize.sent_tokenize",
"tensorflow.keras.layers.Input... | [((11682, 11781), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['tokens_ids'], {'maxlen': 'MAX_SEQ_LEN', 'dtype': '"""long"""', 'truncating': '"""post"""', 'padding': '"""post"""'}), "(tokens_ids, maxlen=MAX_SEQ_LEN, dtype='long', truncating=\n 'post', padding='post')\n", (11695, 11781), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((11877, 11989), 'sklearn.model_selection.train_test_split', 'train_test_split', (['textual_token_ids', 'acoustic_feature_lists', 'visual_feature_lists', 'label_list'], {'test_size': '(0.2)'}), '(textual_token_ids, acoustic_feature_lists,\n visual_feature_lists, label_list, test_size=0.2)\n', (11893, 11989), False, 'from sklearn.model_selection import train_test_split\n'), ((12000, 12020), 'numpy.array', 'np.array', (['text_train'], {}), '(text_train)\n', (12008, 12020), True, 'import numpy as np\n'), ((12033, 12052), 'numpy.array', 'np.array', (['text_vali'], {}), '(text_vali)\n', (12041, 12052), True, 'import numpy as np\n'), ((12067, 12088), 'numpy.array', 'np.array', (['audio_train'], {}), '(audio_train)\n', (12075, 12088), True, 'import numpy as np\n'), ((12102, 12122), 'numpy.array', 'np.array', (['audio_vali'], {}), '(audio_vali)\n', (12110, 12122), True, 'import numpy as np\n'), ((12137, 12158), 'numpy.array', 'np.array', (['video_train'], {}), '(video_train)\n', (12145, 12158), True, 'import numpy as np\n'), ((12172, 12192), 'numpy.array', 'np.array', (['video_vali'], {}), '(video_vali)\n', (12180, 12192), True, 'import numpy as np\n'), ((12207, 12228), 'numpy.array', 'np.array', (['label_train'], {}), '(label_train)\n', (12215, 12228), True, 'import numpy as np\n'), ((12242, 12262), 'numpy.array', 'np.array', (['label_vali'], {}), '(label_vali)\n', (12250, 12262), True, 'import numpy as np\n'), ((14645, 14682), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (14653, 14682), True, 'import matplotlib.pyplot as plt\n'), ((14683, 14724), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (14691, 14724), True, 'import matplotlib.pyplot as plt\n'), ((14725, 14752), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (14734, 14752), True, 'import matplotlib.pyplot as plt\n'), ((14753, 14775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (14763, 14775), True, 'import matplotlib.pyplot as plt\n'), ((14776, 14795), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (14786, 14795), True, 'import matplotlib.pyplot as plt\n'), ((14796, 14843), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (14806, 14843), True, 'import matplotlib.pyplot as plt\n'), ((14844, 14854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14852, 14854), True, 'import matplotlib.pyplot as plt\n'), ((14884, 14917), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (14892, 14917), True, 'import matplotlib.pyplot as plt\n'), ((14918, 14955), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (14926, 14955), True, 'import matplotlib.pyplot as plt\n'), ((14956, 14979), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (14965, 14979), True, 'import matplotlib.pyplot as plt\n'), ((14980, 14998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (14990, 14998), True, 'import matplotlib.pyplot as plt\n'), ((14999, 15018), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (15009, 15018), True, 'import matplotlib.pyplot as plt\n'), ((15019, 15066), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (15029, 15066), True, 'import matplotlib.pyplot as plt\n'), ((15067, 15077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15075, 15077), True, 'import matplotlib.pyplot as plt\n'), ((787, 848), 'os.path.join', 'os.path.join', (['currentDir', '"""models"""', '"""uncased_L-12_H-768_A-12"""'], {}), "(currentDir, 'models', 'uncased_L-12_H-768_A-12')\n", (799, 848), False, 'import os\n'), ((866, 905), 'os.path.join', 'os.path.join', (['modelsFolder', '"""vocab.txt"""'], {}), "(modelsFolder, 'vocab.txt')\n", (878, 905), False, 'import os\n'), ((922, 990), 'bert.bert_tokenization.FullTokenizer', 'bert.bert_tokenization.FullTokenizer', (['vocab_file'], {'do_lower_case': '(True)'}), '(vocab_file, do_lower_case=True)\n', (958, 990), False, 'import bert\n'), ((1230, 1257), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['sentences[0]'], {}), '(sentences[0])\n', (1243, 1257), False, 'from nltk.tokenize import sent_tokenize\n'), ((2715, 2776), 'os.path.join', 'os.path.join', (['currentDir', '"""models"""', '"""uncased_L-12_H-768_A-12"""'], {}), "(currentDir, 'models', 'uncased_L-12_H-768_A-12')\n", (2727, 2776), False, 'import os\n'), ((2795, 2836), 'bert.params_from_pretrained_ckpt', 'bert.params_from_pretrained_ckpt', (['bertDir'], {}), '(bertDir)\n', (2827, 2836), False, 'import bert\n'), ((2854, 2911), 'bert.BertModelLayer.from_params', 'bert.BertModelLayer.from_params', (['bert_params'], {'name': '"""bert"""'}), "(bert_params, name='bert')\n", (2885, 2911), False, 'import bert\n'), ((3437, 3498), 'os.path.join', 'os.path.join', (['currentDir', '"""models"""', '"""uncased_L-12_H-768_A-12"""'], {}), "(currentDir, 'models', 'uncased_L-12_H-768_A-12')\n", (3449, 3498), False, 'import os\n'), ((3520, 3560), 'os.path.join', 'os.path.join', (['bertDir', '"""bert_model.ckpt"""'], {}), "(bertDir, 'bert_model.ckpt')\n", (3532, 3560), False, 'import os\n'), ((3565, 3616), 'bert.load_stock_weights', 'bert.load_stock_weights', (['bert_layer', 'checkpointName'], {}), '(bert_layer, checkpointName)\n', (3588, 3616), False, 'import bert\n'), ((16180, 16227), 'sklearn.metrics.classification_report', 'classification_report', (['label_train', 'y_pred_list'], {}), '(label_train, y_pred_list)\n', (16201, 16227), False, 'from sklearn.metrics import classification_report\n'), ((17314, 17360), 'sklearn.metrics.classification_report', 'classification_report', (['label_vali', 'y_pred_list'], {}), '(label_vali, y_pred_list)\n', (17335, 17360), False, 'from sklearn.metrics import classification_report\n'), ((740, 766), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (756, 766), False, 'import os\n'), ((2673, 2699), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2689, 2699), False, 'import os\n'), ((3395, 3421), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3411, 3421), False, 'import os\n'), ((1056, 1124), 'pandas.read_csv', 'pd.read_csv', (['"""./csv_data/data/data.csv"""'], {'delimiter': '"""\t"""', 'header': 'None'}), "('./csv_data/data/data.csv', delimiter='\\t', header=None)\n", (1067, 1124), True, 'import pandas as pd\n'), ((1959, 2029), 'pandas.read_csv', 'pd.read_csv', (['"""./csv_data/label/label.csv"""'], {'delimiter': '"""\t"""', 'header': 'None'}), "('./csv_data/label/label.csv', delimiter='\\t', header=None)\n", (1970, 2029), True, 'import pandas as pd\n'), ((2960, 3039), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(max_seq_length,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(max_seq_length,), dtype='int32', name='input_ids')\n", (2981, 3039), True, 'import tensorflow as tf\n'), ((4441, 4517), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_SEQ_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids')\n", (4462, 4517), True, 'import tensorflow as tf\n'), ((4555, 4604), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'momentum': '(0.99)'}), '(momentum=0.99)\n', (4589, 4604), True, 'import tensorflow as tf\n'), ((4618, 4662), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x[:, 0, :])'], {}), '(lambda x: x[:, 0, :])\n', (4640, 4662), True, 'import tensorflow as tf\n'), ((4676, 4704), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (4699, 4704), True, 'import tensorflow as tf\n'), ((4718, 4773), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (4739, 4773), True, 'import tensorflow as tf\n'), ((4787, 4815), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (4810, 4815), True, 'import tensorflow as tf\n'), ((4829, 4879), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (4850, 4879), True, 'import tensorflow as tf\n'), ((5004, 5032), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (5022, 5032), True, 'import tensorflow as tf\n'), ((5206, 5292), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_ACOUSTIC_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_ACOUSTIC_LEN,), dtype='int32', name=\n 'input_ids')\n", (5227, 5292), True, 'import tensorflow as tf\n'), ((5301, 5356), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (5322, 5356), True, 'import tensorflow as tf\n'), ((5370, 5398), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (5393, 5398), True, 'import tensorflow as tf\n'), ((5412, 5467), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (5433, 5467), True, 'import tensorflow as tf\n'), ((5481, 5509), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (5504, 5509), True, 'import tensorflow as tf\n'), ((5523, 5573), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (5544, 5573), True, 'import tensorflow as tf\n'), ((5703, 5731), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (5721, 5731), True, 'import tensorflow as tf\n'), ((6585, 6661), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_SEQ_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids')\n", (6606, 6661), True, 'import tensorflow as tf\n'), ((6696, 6744), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_ACOUSTIC_LEN,)'}), '(shape=(MAX_ACOUSTIC_LEN,))\n', (6717, 6744), True, 'import tensorflow as tf\n'), ((7081, 7121), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputA', 'outputs': 'A'}), '(inputs=inputA, outputs=A)\n', (7095, 7121), True, 'import tensorflow as tf\n'), ((7142, 7189), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[A.output, inputB]'], {}), '([A.output, inputB])\n', (7169, 7189), True, 'import tensorflow as tf\n'), ((7573, 7631), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[A.input, inputB]', 'outputs': 'Combined'}), '(inputs=[A.input, inputB], outputs=Combined)\n', (7587, 7631), True, 'import tensorflow as tf\n'), ((5901, 5980), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_VISUAL_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_VISUAL_LEN,), dtype='int32', name='input_ids')\n", (5922, 5980), True, 'import tensorflow as tf\n'), ((5994, 6049), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (6015, 6049), True, 'import tensorflow as tf\n'), ((6063, 6091), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (6086, 6091), True, 'import tensorflow as tf\n'), ((6105, 6160), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (6126, 6160), True, 'import tensorflow as tf\n'), ((6174, 6202), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (6197, 6202), True, 'import tensorflow as tf\n'), ((6216, 6266), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (6237, 6266), True, 'import tensorflow as tf\n'), ((6394, 6422), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (6412, 6422), True, 'import tensorflow as tf\n'), ((6807, 6856), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'momentum': '(0.99)'}), '(momentum=0.99)\n', (6841, 6856), True, 'import tensorflow as tf\n'), ((6872, 6916), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x[:, 0, :])'], {}), '(lambda x: x[:, 0, :])\n', (6894, 6916), True, 'import tensorflow as tf\n'), ((7209, 7264), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (7230, 7264), True, 'import tensorflow as tf\n'), ((7294, 7322), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (7317, 7322), True, 'import tensorflow as tf\n'), ((7352, 7407), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (7373, 7407), True, 'import tensorflow as tf\n'), ((7437, 7465), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (7460, 7465), True, 'import tensorflow as tf\n'), ((7495, 7545), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (7516, 7545), True, 'import tensorflow as tf\n'), ((7880, 7956), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_SEQ_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids')\n", (7901, 7956), True, 'import tensorflow as tf\n'), ((7991, 8037), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_VISUAL_LEN,)'}), '(shape=(MAX_VISUAL_LEN,))\n', (8012, 8037), True, 'import tensorflow as tf\n'), ((8372, 8412), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputA', 'outputs': 'A'}), '(inputs=inputA, outputs=A)\n', (8386, 8412), True, 'import tensorflow as tf\n'), ((8433, 8480), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[A.output, inputC]'], {}), '([A.output, inputC])\n', (8460, 8480), True, 'import tensorflow as tf\n'), ((8864, 8922), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[A.input, inputC]', 'outputs': 'Combined'}), '(inputs=[A.input, inputC], outputs=Combined)\n', (8878, 8922), True, 'import tensorflow as tf\n'), ((7693, 7721), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (7711, 7721), True, 'import tensorflow as tf\n'), ((8098, 8147), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'momentum': '(0.99)'}), '(momentum=0.99)\n', (8132, 8147), True, 'import tensorflow as tf\n'), ((8163, 8207), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x[:, 0, :])'], {}), '(lambda x: x[:, 0, :])\n', (8185, 8207), True, 'import tensorflow as tf\n'), ((8500, 8555), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (8521, 8555), True, 'import tensorflow as tf\n'), ((8585, 8613), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (8608, 8613), True, 'import tensorflow as tf\n'), ((8643, 8698), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (8664, 8698), True, 'import tensorflow as tf\n'), ((8728, 8756), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (8751, 8756), True, 'import tensorflow as tf\n'), ((8786, 8836), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (8807, 8836), True, 'import tensorflow as tf\n'), ((9123, 9171), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_ACOUSTIC_LEN,)'}), '(shape=(MAX_ACOUSTIC_LEN,))\n', (9144, 9171), True, 'import tensorflow as tf\n'), ((9207, 9253), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_VISUAL_LEN,)'}), '(shape=(MAX_VISUAL_LEN,))\n', (9228, 9253), True, 'import tensorflow as tf\n'), ((9290, 9335), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[inputB, inputC]'], {}), '([inputB, inputC])\n', (9317, 9335), True, 'import tensorflow as tf\n'), ((9719, 9776), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[inputB, inputC]', 'outputs': 'Combined'}), '(inputs=[inputB, inputC], outputs=Combined)\n', (9733, 9776), True, 'import tensorflow as tf\n'), ((8984, 9012), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (9002, 9012), True, 'import tensorflow as tf\n'), ((9355, 9410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (9376, 9410), True, 'import tensorflow as tf\n'), ((9440, 9468), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (9463, 9468), True, 'import tensorflow as tf\n'), ((9498, 9553), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (9519, 9553), True, 'import tensorflow as tf\n'), ((9583, 9611), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (9606, 9611), True, 'import tensorflow as tf\n'), ((9641, 9691), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (9662, 9691), True, 'import tensorflow as tf\n'), ((10045, 10121), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_SEQ_LEN,)', 'dtype': '"""int32"""', 'name': '"""input_ids"""'}), "(shape=(MAX_SEQ_LEN,), dtype='int32', name='input_ids')\n", (10066, 10121), True, 'import tensorflow as tf\n'), ((10156, 10204), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_ACOUSTIC_LEN,)'}), '(shape=(MAX_ACOUSTIC_LEN,))\n', (10177, 10204), True, 'import tensorflow as tf\n'), ((10240, 10286), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(MAX_VISUAL_LEN,)'}), '(shape=(MAX_VISUAL_LEN,))\n', (10261, 10286), True, 'import tensorflow as tf\n'), ((10608, 10648), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputA', 'outputs': 'A'}), '(inputs=inputA, outputs=A)\n', (10622, 10648), True, 'import tensorflow as tf\n'), ((10669, 10724), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[A.output, inputB, inputC]'], {}), '([A.output, inputB, inputC])\n', (10696, 10724), True, 'import tensorflow as tf\n'), ((11108, 11174), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[A.input, inputB, inputC]', 'outputs': 'Combined'}), '(inputs=[A.input, inputB, inputC], outputs=Combined)\n', (11122, 11174), True, 'import tensorflow as tf\n'), ((9838, 9866), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (9856, 9866), True, 'import tensorflow as tf\n'), ((10347, 10396), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'momentum': '(0.99)'}), '(momentum=0.99)\n', (10381, 10396), True, 'import tensorflow as tf\n'), ((10412, 10456), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x[:, 0, :])'], {}), '(lambda x: x[:, 0, :])\n', (10434, 10456), True, 'import tensorflow as tf\n'), ((10744, 10799), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(768)'], {'activation': 'tf.nn.leaky_relu'}), '(768, activation=tf.nn.leaky_relu)\n', (10765, 10799), True, 'import tensorflow as tf\n'), ((10829, 10857), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (10852, 10857), True, 'import tensorflow as tf\n'), ((10887, 10942), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), '(128, activation=tf.nn.leaky_relu)\n', (10908, 10942), True, 'import tensorflow as tf\n'), ((10972, 11000), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (10995, 11000), True, 'import tensorflow as tf\n'), ((11030, 11080), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'tf.nn.sigmoid'}), '(1, activation=tf.nn.sigmoid)\n', (11051, 11080), True, 'import tensorflow as tf\n'), ((11236, 11264), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (11254, 11264), True, 'import tensorflow as tf\n')] |
# This module tests data directly from the pangeo google cloud storage.
# Tests are meant to be more high level and also serve to document known problems (see skip statements).
import pytest
import xarray as xr
import numpy as np
from cmip6_preprocessing.tests.cloud_test_utils import (
full_specs,
xfail_wrapper,
all_models,
data,
diagnose_doubles,
)
from cmip6_preprocessing.preprocessing import combined_preprocessing
from cmip6_preprocessing.grids import combine_staggered_grid
pytest.importorskip("gcsfs")
print(f"\n\n\n\n$$$$$$$ All available models: {all_models()}$$$$$$$\n\n\n\n")
# manually combine all pytest parameters, so that I have very fine grained control over
# which combination of parameters is expected to fail.
########################### Most basic test #########################
expected_failures = [
("AWI-ESM-1-1-LR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-LR", "thetao", "ssp585", "gn"),
("AWI-CM-1-1-MR", "thetao", "historical", "gn"),
("AWI-CM-1-1-MR", "thetao", "ssp585", "gn"),
# TODO: would be nice to have a "*" matching...
("CESM2-FV2", "thetao", "historical", "gn"),
("CESM2-FV2", "thetao", "ssp585", "gn"),
]
@pytest.mark.parametrize(
"source_id,variable_id,experiment_id,grid_label",
xfail_wrapper(full_specs(), expected_failures),
)
def test_check_dim_coord_values_wo_intake(
source_id, variable_id, experiment_id, grid_label
):
# there must be a better way to build this at the class level and then tear it down again
# I can probably get this done with fixtures, but I dont know how atm
ds, cat = data(source_id, variable_id, experiment_id, grid_label, False)
if ds is None:
pytest.skip(
f"No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"
)
##### Check for dim duplicates
# check all dims for duplicates
# for di in ds.dims:
# for now only test a subset of the dims. TODO: Add the bounds once they
# are cleaned up.
for di in ["x", "y", "lev", "time"]:
if di in ds.dims:
diagnose_doubles(ds[di].load().data)
assert len(ds[di]) == len(np.unique(ds[di]))
if di != "time": # these tests do not make sense for decoded time
assert ~np.all(np.isnan(ds[di]))
assert np.all(ds[di].diff(di) >= 0)
assert ds.lon.min().load() >= 0
assert ds.lon.max().load() <= 360
if "lon_bounds" in ds.variables:
assert ds.lon_bounds.min().load() >= 0
assert ds.lon_bounds.max().load() <= 360
assert ds.lat.min().load() >= -90
assert ds.lat.max().load() <= 90
# make sure lon and lat are 2d
assert len(ds.lon.shape) == 2
assert len(ds.lat.shape) == 2
expected_failures = [
("AWI-ESM-1-1-LR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-LR", "thetao", "ssp585", "gn"),
("AWI-CM-1-1-MR", "thetao", "historical", "gn"),
("AWI-CM-1-1-MR", "thetao", "ssp585", "gn"),
# TODO: would be nice to have a "*" matching...
("CESM2-FV2", "thetao", "historical", "gn"),
("CESM2-FV2", "thetao", "ssp585", "gn"),
(
"IPSL-CM6A-LR",
"thetao",
"historical",
"gn",
), # IPSL has an issue with `lev` dims concatting
("IPSL-CM6A-LR", "o2", "historical", "gn"),
("NorESM2-MM", "thetao", "historical", "gn"),
("NorESM2-MM", "thetao", "historical", "gr"),
]
@pytest.mark.parametrize(
"source_id,variable_id,experiment_id,grid_label",
xfail_wrapper(full_specs(), expected_failures),
)
def test_check_dim_coord_values(source_id, variable_id, experiment_id, grid_label):
# there must be a better way to build this at the class level and then tear it down again
# I can probably get this done with fixtures, but I dont know how atm
ds, cat = data(source_id, variable_id, experiment_id, grid_label, True)
if ds is None:
pytest.skip(
f"No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"
)
##### Check for dim duplicates
# check all dims for duplicates
# for di in ds.dims:
# for now only test a subset of the dims. TODO: Add the bounds once they
# are cleaned up.
for di in ["x", "y", "lev", "time"]:
if di in ds.dims:
diagnose_doubles(ds[di].load().data)
assert len(ds[di]) == len(np.unique(ds[di]))
if di != "time": # these tests do not make sense for decoded time
assert ~np.all(np.isnan(ds[di]))
assert np.all(ds[di].diff(di) >= 0)
assert ds.lon.min().load() >= 0
assert ds.lon.max().load() <= 360
if "lon_bounds" in ds.variables:
assert ds.lon_bounds.min().load() >= 0
assert ds.lon_bounds.max().load() <= 360
assert ds.lat.min().load() >= -90
assert ds.lat.max().load() <= 90
# make sure lon and lat are 2d
assert len(ds.lon.shape) == 2
assert len(ds.lat.shape) == 2
############################### Specific Bound Coords Test ###############################
expected_failures = [
("AWI-ESM-1-1-LR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-MR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-MR", "thetao", "ssp585", "gn"),
("AWI-CM-1-1-MR", "thetao", "historical", "gn"),
("AWI-CM-1-1-MR", "thetao", "ssp585", "gn"),
("CESM2-FV2", "thetao", "historical", "gn"),
("FGOALS-f3-L", "thetao", "historical", "gn"),
("FGOALS-f3-L", "thetao", "ssp585", "gn"),
("FGOALS-g3", "thetao", "historical", "gn"),
("FGOALS-g3", "thetao", "ssp585", "gn"),
("NorESM2-MM", "thetao", "historical", "gn"),
("NorESM2-MM", "thetao", "historical", "gr"),
("IPSL-CM6A-LR", "thetao", "historical", "gn"),
("IPSL-CM6A-LR", "o2", "historical", "gn"),
]
@pytest.mark.parametrize(
"source_id,variable_id,experiment_id,grid_label",
xfail_wrapper(full_specs(), expected_failures),
)
def test_check_bounds_verticies(source_id, variable_id, experiment_id, grid_label):
ds, cat = data(source_id, variable_id, experiment_id, grid_label, True)
if ds is None:
pytest.skip(
f"No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"
)
if "vertex" in ds.dims:
np.testing.assert_allclose(ds.vertex.data, np.arange(4))
####Check for existing bounds and verticies
for co in ["lon_bounds", "lat_bounds", "lon_verticies", "lat_verticies"]:
assert co in ds.coords
# make sure that all other dims are eliminated from the bounds.
assert (set(ds[co].dims) - set(["bnds", "vertex"])) == set(["x", "y"])
#### Check the order of the vertex
# Ill only check these south of the Arctic for now. Up there
# things are still weird.
test_ds = ds.sel(y=slice(-40, 40))
vertex_lon_diff1 = test_ds.lon_verticies.isel(
vertex=3
) - test_ds.lon_verticies.isel(vertex=0)
vertex_lon_diff2 = test_ds.lon_verticies.isel(
vertex=2
) - test_ds.lon_verticies.isel(vertex=1)
vertex_lat_diff1 = test_ds.lat_verticies.isel(
vertex=1
) - test_ds.lat_verticies.isel(vertex=0)
vertex_lat_diff2 = test_ds.lat_verticies.isel(
vertex=2
) - test_ds.lat_verticies.isel(vertex=3)
for vertex_diff in [vertex_lon_diff1, vertex_lon_diff2]:
assert (vertex_diff <= 0).sum() <= (3 * len(vertex_diff.y))
# allowing for a few rows to be negative
for vertex_diff in [vertex_lat_diff1, vertex_lat_diff2]:
assert (vertex_diff <= 0).sum() <= (5 * len(vertex_diff.x))
# allowing for a few rows to be negative
# This is just to make sure that not the majority of values is negative or zero.
# Same for the bounds:
lon_diffs = test_ds.lon_bounds.diff("bnds")
lat_diffs = test_ds.lat_bounds.diff("bnds")
assert (lon_diffs <= 0).sum() <= (5 * len(lon_diffs.y))
assert (lat_diffs <= 0).sum() <= (5 * len(lat_diffs.y))
################################# xgcm grid specific tests ########################################
expected_failures = [
("AWI-ESM-1-1-LR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-MR", "thetao", "historical", "gn"),
("AWI-ESM-1-1-MR", "thetao", "ssp585", "gn"),
("AWI-CM-1-1-MR", "thetao", "historical", "gn"),
("AWI-CM-1-1-MR", "thetao", "ssp585", "gn"),
("CESM2-FV2", "thetao", "historical", "gn"),
("CMCC-CM2-SR5", "thetao", "historical", "gn"),
("CMCC-CM2-SR5", "thetao", "ssp585", "gn"),
("FGOALS-f3-L", "thetao", "historical", "gn"),
("FGOALS-f3-L", "thetao", "ssp585", "gn"),
("FGOALS-g3", "thetao", "historical", "gn"),
("FGOALS-g3", "thetao", "ssp585", "gn"),
("MPI-ESM-1-2-HAM", "thetao", "historical", "gn"),
("MPI-ESM-1-2-HAM", "o2", "historical", "gn"),
("NorESM2-MM", "thetao", "historical", "gn"),
("NorESM2-MM", "thetao", "historical", "gr"),
("IPSL-CM6A-LR", "thetao", "historical", "gn"),
("IPSL-CM6A-LR", "o2", "historical", "gn"),
]
@pytest.mark.parametrize(
"source_id,variable_id,experiment_id,grid_label",
xfail_wrapper(full_specs(), expected_failures),
)
def test_check_grid(source_id, variable_id, experiment_id, grid_label):
ds, cat = data(source_id, variable_id, experiment_id, grid_label, True)
if ds is None:
pytest.skip(
f"No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"
)
# This is just a rudimentary test to see if the creation works
staggered_grid, ds_staggered = combine_staggered_grid(ds, recalculate_metrics=True)
print(ds_staggered)
assert ds_staggered is not None
#
if "lev" in ds_staggered.dims:
assert "bnds" in ds_staggered.lev_bounds.dims
for axis in ["X", "Y"]:
for metric in ["_t", "_gx", "_gy", "_gxgy"]:
assert f"d{axis.lower()}{metric}" in list(ds_staggered.coords)
# TODO: Include actual test to combine variables
| [
"cmip6_preprocessing.tests.cloud_test_utils.full_specs",
"cmip6_preprocessing.tests.cloud_test_utils.all_models",
"numpy.unique",
"cmip6_preprocessing.grids.combine_staggered_grid",
"pytest.importorskip",
"cmip6_preprocessing.tests.cloud_test_utils.data",
"numpy.isnan",
"pytest.skip",
"numpy.arange"... | [((503, 531), 'pytest.importorskip', 'pytest.importorskip', (['"""gcsfs"""'], {}), "('gcsfs')\n", (522, 531), False, 'import pytest\n'), ((1622, 1684), 'cmip6_preprocessing.tests.cloud_test_utils.data', 'data', (['source_id', 'variable_id', 'experiment_id', 'grid_label', '(False)'], {}), '(source_id, variable_id, experiment_id, grid_label, False)\n', (1626, 1684), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((3826, 3887), 'cmip6_preprocessing.tests.cloud_test_utils.data', 'data', (['source_id', 'variable_id', 'experiment_id', 'grid_label', '(True)'], {}), '(source_id, variable_id, experiment_id, grid_label, True)\n', (3830, 3887), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((6015, 6076), 'cmip6_preprocessing.tests.cloud_test_utils.data', 'data', (['source_id', 'variable_id', 'experiment_id', 'grid_label', '(True)'], {}), '(source_id, variable_id, experiment_id, grid_label, True)\n', (6019, 6076), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((9190, 9251), 'cmip6_preprocessing.tests.cloud_test_utils.data', 'data', (['source_id', 'variable_id', 'experiment_id', 'grid_label', '(True)'], {}), '(source_id, variable_id, experiment_id, grid_label, True)\n', (9194, 9251), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((9494, 9546), 'cmip6_preprocessing.grids.combine_staggered_grid', 'combine_staggered_grid', (['ds'], {'recalculate_metrics': '(True)'}), '(ds, recalculate_metrics=True)\n', (9516, 9546), False, 'from cmip6_preprocessing.grids import combine_staggered_grid\n'), ((1713, 1811), 'pytest.skip', 'pytest.skip', (['f"""No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"""'], {}), "(\n f'No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}'\n )\n", (1724, 1811), False, 'import pytest\n'), ((1304, 1316), 'cmip6_preprocessing.tests.cloud_test_utils.full_specs', 'full_specs', ([], {}), '()\n', (1314, 1316), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((3916, 4014), 'pytest.skip', 'pytest.skip', (['f"""No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"""'], {}), "(\n f'No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}'\n )\n", (3927, 4014), False, 'import pytest\n'), ((3524, 3536), 'cmip6_preprocessing.tests.cloud_test_utils.full_specs', 'full_specs', ([], {}), '()\n', (3534, 3536), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((6105, 6203), 'pytest.skip', 'pytest.skip', (['f"""No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"""'], {}), "(\n f'No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}'\n )\n", (6116, 6203), False, 'import pytest\n'), ((5880, 5892), 'cmip6_preprocessing.tests.cloud_test_utils.full_specs', 'full_specs', ([], {}), '()\n', (5890, 5892), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((9280, 9378), 'pytest.skip', 'pytest.skip', (['f"""No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}"""'], {}), "(\n f'No data found for {source_id}|{variable_id}|{experiment_id}|{grid_label}'\n )\n", (9291, 9378), False, 'import pytest\n'), ((9067, 9079), 'cmip6_preprocessing.tests.cloud_test_utils.full_specs', 'full_specs', ([], {}), '()\n', (9077, 9079), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((580, 592), 'cmip6_preprocessing.tests.cloud_test_utils.all_models', 'all_models', ([], {}), '()\n', (590, 592), False, 'from cmip6_preprocessing.tests.cloud_test_utils import full_specs, xfail_wrapper, all_models, data, diagnose_doubles\n'), ((6296, 6308), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6305, 6308), True, 'import numpy as np\n'), ((2174, 2191), 'numpy.unique', 'np.unique', (['ds[di]'], {}), '(ds[di])\n', (2183, 2191), True, 'import numpy as np\n'), ((4377, 4394), 'numpy.unique', 'np.unique', (['ds[di]'], {}), '(ds[di])\n', (4386, 4394), True, 'import numpy as np\n'), ((2303, 2319), 'numpy.isnan', 'np.isnan', (['ds[di]'], {}), '(ds[di])\n', (2311, 2319), True, 'import numpy as np\n'), ((4506, 4522), 'numpy.isnan', 'np.isnan', (['ds[di]'], {}), '(ds[di])\n', (4514, 4522), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
#encoding="utf-8"
import pickle
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
paths_labels=unpickle("FACES_P_S")
images_path = paths_labels['images_path']
labels=paths_labels['scores_path']
#scores rank 预处理
labels=[round(label,2) for label in labels]
index=[0,1,2,3,4,5,6,7,8]
score_rank=[1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0]
index_scores_dict=dict(list(zip(score_rank,index)))
redefined_labels=[]
redefined_labels_index=[]
for label in labels:
a=[]
for i in range(len(score_rank)):
a.append(abs(label-score_rank[i]))
label=score_rank[np.argmin(a)]
redefined_labels.append(label)
for i in redefined_labels:
redefined_labels_index.append(index_scores_dict[i])
images_array=np.zeros((5500,299,299,3),dtype=np.float32)
for i in range(5500):
img=cv2.imread(images_path[i])
im=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
im = cv2.resize(im,(299,299))
images_array[0,:,:,:]+=im
print("finis {}/5500".format(i))
images_array=images_array/255.0
validation_images = images_array[:100]
validation_scores=redefined_labels_index[:100]
import tensorflow as tf
import os
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3
train_file="D:/BaiduYunDownload/python_exe/dataset/scut_faces/path/to/saved"
if not os.path.exists(train_file):
os.makedirs(train_file)
ckpt_file="D:/BaiduYunDownload/python_exe/models/convs/inception/inception_v3.ckpt"
#定义训练使用的参数
learning_rate = 0.0001
steps = 300
batch = 32
classes = 8
#,要训练的是最后的全连接层
checkpoint_exclude_scopes='InceptionV3/Logits,InceptionV3/AuxLogits'
trainable_scopes='InceptionV3/Logits,InceptionV3/AuxLogits'
#加载所有固定的不需要动的参数
def get_tuned_variables():
#判断要移除的层
exclusions = {scope.strip() for scope in checkpoint_exclude_scopes.split(",")}
variables_to_restore=[]
for var in slim.get_model_variables():
excluded=False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded=True
break
if not excluded:
variables_to_restore.append(var)
return variables_to_restore
#获得所有需要训练的var
def get_trainable_variables():
scopes=[scope.strip() for scope in trainable_scopes.split(",")]
variables_to_train=[]
for scope in scopes:
variables =tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,scope
)
variables_to_train.append(variables)
return variables_to_train
# 加载数据进行操作
# shuffle the data
# state= np.random.get_state()
# np.random.shuffle(images_array)
# np.random.set_state(state)X
# np.random.shuffle(labels)
#定义输入与输出X
X=tf.placeholder(dtype=tf.float32,shape=[None,299,299,3],name="input_image")
scores=tf.placeholder(dtype=tf.float32,shape=[None],name="scores")
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
logits,_=inception_v3.inception_v3(
X,num_classes=classes)
#获得trainable data
trainable_variables=get_trainable_variables()
#使用cross_entropy softmax
'''定义losses'''
tf.losses.softmax_cross_entropy(tf.one_hot(redefined_labels_index,classes),
logits,weights=1.0)
'''定义优化'''
train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(
tf.losses.get_total_loss()
)
'''计算正确率'''
with tf.name_scope('evaluation'):
correct_prediction = tf.equal(tf.argmax(logits,1),redefined_labels_index
)
evaluation_step=tf.reduce_mean(tf.cast(
correct_prediction,tf.float32
))
# 定义加载模型的参数
load_fn = slim.assign_from_checkpoint_fn(
ckpt_file,
get_tuned_variables(),
ignore_missing_vars=True
)
saver=tf.train.Saver()
with tf.Session() as sess:
init_op=tf.global_variables_initializer()
sess.run(init_op)
print("loading tuned variables from {}".format(ckpt_file))
load_fn(sess)
#开始训练
start = 0
end = batch
for i in range(steps):
sess.run(train_step,feed_dict={
X:images_array[start:end],
scores:redefined_labels_index[start:end]
})
# output log
if i%30==0 or i+1==steps:
saver.save(sess,train_file,global_step=i)
validation_accuracy = sess.run(evaluation_step,feed_dict={
X:validation_images[start:end],scores:validation_scores[start:end]
})
print("Step {},validation accuracy is {}%".format(i,validation_accuracy*100))
start=end
if start == 5500:
start = 0
end = start+batch
if end>5500:
end = 5500
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3_arg_scope",
"tensorflow.cast",
"tensorflow.app.run",
"os.path.exists",
"tensorflow.losses.get_total_loss",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.argmin",
"tensorflow.one_hot",
"pickle.load",
"cv2.cvtColor",
"c... | [((890, 937), 'numpy.zeros', 'np.zeros', (['(5500, 299, 299, 3)'], {'dtype': 'np.float32'}), '((5500, 299, 299, 3), dtype=np.float32)\n', (898, 937), True, 'import numpy as np\n'), ((2825, 2904), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, 299, 299, 3]', 'name': '"""input_image"""'}), "(dtype=tf.float32, shape=[None, 299, 299, 3], name='input_image')\n", (2839, 2904), True, 'import tensorflow as tf\n'), ((2907, 2968), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""scores"""'}), "(dtype=tf.float32, shape=[None], name='scores')\n", (2921, 2968), True, 'import tensorflow as tf\n'), ((3822, 3838), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3836, 3838), True, 'import tensorflow as tf\n'), ((964, 990), 'cv2.imread', 'cv2.imread', (['images_path[i]'], {}), '(images_path[i])\n', (974, 990), False, 'import cv2\n'), ((998, 1034), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1010, 1034), False, 'import cv2\n'), ((1043, 1069), 'cv2.resize', 'cv2.resize', (['im', '(299, 299)'], {}), '(im, (299, 299))\n', (1053, 1069), False, 'import cv2\n'), ((1490, 1516), 'os.path.exists', 'os.path.exists', (['train_file'], {}), '(train_file)\n', (1504, 1516), False, 'import os\n'), ((1522, 1545), 'os.makedirs', 'os.makedirs', (['train_file'], {}), '(train_file)\n', (1533, 1545), False, 'import os\n'), ((2027, 2053), 'tensorflow.contrib.slim.get_model_variables', 'slim.get_model_variables', ([], {}), '()\n', (2051, 2053), True, 'import tensorflow.contrib.slim as slim\n'), ((3041, 3090), 'tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3', 'inception_v3.inception_v3', (['X'], {'num_classes': 'classes'}), '(X, num_classes=classes)\n', (3066, 3090), True, 'import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3\n'), ((3236, 3279), 'tensorflow.one_hot', 'tf.one_hot', (['redefined_labels_index', 'classes'], {}), '(redefined_labels_index, classes)\n', (3246, 3279), True, 'import tensorflow as tf\n'), ((3411, 3437), 'tensorflow.losses.get_total_loss', 'tf.losses.get_total_loss', ([], {}), '()\n', (3435, 3437), True, 'import tensorflow as tf\n'), ((3458, 3485), 'tensorflow.name_scope', 'tf.name_scope', (['"""evaluation"""'], {}), "('evaluation')\n", (3471, 3485), True, 'import tensorflow as tf\n'), ((3845, 3857), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3855, 3857), True, 'import tensorflow as tf\n'), ((3879, 3912), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3910, 3912), True, 'import tensorflow as tf\n'), ((4766, 4778), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4776, 4778), True, 'import tensorflow as tf\n'), ((215, 248), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (226, 248), False, 'import pickle\n'), ((742, 754), 'numpy.argmin', 'np.argmin', (['a'], {}), '(a)\n', (751, 754), True, 'import numpy as np\n'), ((2503, 2561), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n', (2520, 2561), True, 'import tensorflow as tf\n'), ((2988, 3025), 'tensorflow.contrib.slim.python.slim.nets.inception_v3.inception_v3_arg_scope', 'inception_v3.inception_v3_arg_scope', ([], {}), '()\n', (3023, 3025), True, 'import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3\n'), ((3356, 3396), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3381, 3396), True, 'import tensorflow as tf\n'), ((3521, 3541), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (3530, 3541), True, 'import tensorflow as tf\n'), ((3635, 3674), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3642, 3674), True, 'import tensorflow as tf\n')] |
import GridCal.Engine as gc
from ortools.linear_solver import pywraplp
import numpy as np
import pandas as pd
from scipy.sparse.csc import csc_matrix
def lpDot(mat, arr):
"""
CSC matrix-vector or CSC matrix-matrix dot product (A x b)
:param mat: CSC sparse matrix (A)
:param arr: dense vector or matrix of object type (b)
:return: vector or matrix result of the product
"""
n_rows, n_cols = mat.shape
# check dimensional compatibility
assert (n_cols == arr.shape[0])
# check that the sparse matrix is indeed of CSC format
if mat.format == 'csc':
mat_2 = mat
else:
# convert the matrix to CSC sparse
mat_2 = csc_matrix(mat)
if len(arr.shape) == 1:
"""
Uni-dimensional sparse matrix - vector product
"""
res = np.zeros(n_rows, dtype=arr.dtype)
for i in range(n_cols):
for ii in range(mat_2.indptr[i], mat_2.indptr[i + 1]):
j = mat_2.indices[ii] # row index
res[j] += mat_2.data[ii] * arr[i] # C.data[ii] is equivalent to C[i, j]
else:
"""
Multi-dimensional sparse matrix - matrix product
"""
cols_vec = arr.shape[1]
res = np.zeros((n_rows, cols_vec), dtype=arr.dtype)
for k in range(cols_vec): # for each column of the matrix "vec", do the matrix vector product
for i in range(n_cols):
for ii in range(mat_2.indptr[i], mat_2.indptr[i + 1]):
j = mat_2.indices[ii] # row index
res[j, k] += mat_2.data[ii] * arr[i, k] # C.data[ii] is equivalent to C[i, j]
return res
def lpExpand(mat, arr):
"""
CSC matrix-vector or CSC matrix-matrix dot product (A x b)
:param mat: CSC sparse matrix (A)
:param arr: dense vector or matrix of object type (b)
:return: vector or matrix result of the product
"""
n_rows, n_cols = mat.shape
# check dimensional compatibility
assert (n_cols == arr.shape[0])
# check that the sparse matrix is indeed of CSC format
if mat.format == 'csc':
mat_2 = mat
else:
# convert the matrix to CSC sparse
mat_2 = csc_matrix(mat)
if len(arr.shape) == 1:
"""
Uni-dimensional sparse matrix - vector product
"""
res = np.zeros(n_rows, dtype=arr.dtype)
for i in range(n_cols):
for ii in range(mat_2.indptr[i], mat_2.indptr[i + 1]):
j = mat_2.indices[ii] # row index
res[j] = arr[i] # C.data[ii] is equivalent to C[i, j]
else:
"""
Multi-dimensional sparse matrix - matrix product
"""
cols_vec = arr.shape[1]
res = np.zeros((n_rows, cols_vec), dtype=arr.dtype)
for k in range(cols_vec): # for each column of the matrix "vec", do the matrix vector product
for i in range(n_cols):
for ii in range(mat_2.indptr[i], mat_2.indptr[i + 1]):
j = mat_2.indices[ii] # row index
res[j, k] = arr[i, k] # C.data[ii] is equivalent to C[i, j]
return res
def get_inter_areas_branches(nbr, F, T, buses_areas_1, buses_areas_2):
"""
Get the inter-area branches.
:param buses_areas_1: Area from
:param buses_areas_2: Area to
:return: List of (branch index, branch object, flow sense w.r.t the area exchange)
"""
lst: List[Tuple[int, float]] = list()
for k in range(nbr):
if F[k] in buses_areas_1 and T[k] in buses_areas_2:
lst.append((k, 1.0))
elif F[k] in buses_areas_2 and T[k] in buses_areas_1:
lst.append((k, -1.0))
return lst
def get_generators_connectivity(Cgen, buses_in_a1, buses_in_a2):
"""
:param Cgen:
:param buses_in_a1:
:param buses_in_a2:
:return:
"""
assert isinstance(Cgen, csc_matrix)
gens_in_a1 = list()
gens_in_a2 = list()
gens_out = list()
for j in range(Cgen.shape[1]): # for each bus
for ii in range(Cgen.indptr[j], Cgen.indptr[j + 1]):
i = Cgen.indices[ii]
if i in buses_in_a1:
gens_in_a1.append((i, j)) # i: bus idx, j: gen idx
elif i in buses_in_a2:
gens_in_a2.append((i, j)) # i: bus idx, j: gen idx
else:
gens_out.append((i, j)) # i: bus idx, j: gen idx
return gens_in_a1, gens_in_a2, gens_out
def compose_branches_df(num, solver_power_vars, overloads1, overloads2):
data = list()
for k in range(num.nbr):
val = solver_power_vars[k].solution_value() * num.Sbase
row = [
num.branch_data.branch_names[k],
val,
val / nc.Rates[k],
overloads1[k].solution_value(),
overloads2[k].solution_value()
]
data.append(row)
cols = ['Name', 'Power (MW)', 'Loading', 'SlackF', 'SlackT']
return pd.DataFrame(data, columns=cols)
def compose_generation_df(num, generation, dgen_arr, Pgen_arr):
data = list()
for i, (var, dgen, pgen) in enumerate(zip(generation, dgen_arr, Pgen_arr)):
if not isinstance(var, float):
data.append([str(var),
'',
var.Lb() * nc.Sbase,
var.solution_value() * nc.Sbase,
pgen * nc.Sbase,
dgen.solution_value() * nc.Sbase,
var.Ub() * nc.Sbase])
cols = ['Name', 'Bus', 'LB', 'Power (MW)', 'Set (MW)', 'Delta (MW)', 'UB']
return pd.DataFrame(data=data, columns=cols)
# ----------------------------------------------------------------------------------------------------------------------
# Net transfer capacity optimization program 2021
# ----------------------------------------------------------------------------------------------------------------------
# fname = '/home/santi/Documentos/Git/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus(from .raw).gridcal'
# fname = '/home/santi/Documentos/Git/GitHub/GridCal/Grids_and_profiles/grids/Grid4Bus-OPF.gridcal'
# fname = '/home/santi/Documentos/Git/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118 Bus - ntc_areas.gridcal'
fname = '/home/santi/Documentos/Git/GitHub/GridCal/Grids_and_profiles/grids/IEEE14 - ntc areas.gridcal'
# fname = '/home/santi/Documentos/Git/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus_modNTC.gridcal'
# fname = r'C:\Users\penversa\Git\Github\GridCal\Grids_and_profiles\grids\IEEE 118 Bus - ntc_areas.gridcal'
# fname = r'C:\Users\penversa\Git\Github\GridCal\Grids_and_profiles\grids\IEEE14 - ntc areas.gridcal'
# fname = r'D:\ReeGit\github\GridCal\Grids_and_profiles\grids\PGOC_6bus_modNTC.gridcal'
grid = gc.FileOpen(fname).open()
nc = gc.compile_snapshot_opf_circuit(grid)
print('Problem loaded:')
print('\tNodes:', nc.nbus)
print('\tBranches:', nc.nbr)
gc.compile_snapshot_opf_circuit
# compute information about areas --------------------------------------------------------------------------------------
area_from_idx = 1
area_to_idx = 0
areas = grid.get_bus_area_indices()
# get the area bus indices
areas = areas[nc.original_bus_idx]
a1 = np.where(areas == area_from_idx)[0]
a2 = np.where(areas == area_to_idx)[0]
# get the inter-area branches and their sign
inter_area_branches = get_inter_areas_branches(nc.nbr, nc.branch_data.F, nc.branch_data.T, a1, a2)
# time index
t = 0
# declare the solver ---------------------------------------------------------------------------------------------------
solver = pywraplp.Solver.CreateSolver('CBC')
# create generation delta functions ------------------------------------------------------------------------------------
Cgen = nc.generator_data.C_bus_gen.tocsc()
gen_cost = nc.generator_data.generator_cost[:, t]
Pgen = nc.generator_data.generator_p[:, t] / nc.Sbase
Pmax = nc.generator_data.generator_installed_p / nc.Sbase
gens1, gens2, gens_out = get_generators_connectivity(Cgen, a1, a2)
generation = np.zeros(nc.generator_data.ngen, dtype=object)
dgen1 = list()
dgen2 = list()
delta = list()
generation1 = list()
generation2 = list()
Pgen1 = list()
Pgen2 = list()
for bus_idx, gen_idx in gens1:
name = 'Gen_up_bus{}'.format(bus_idx)
generation[gen_idx] = solver.NumVar(0, Pmax[gen_idx], name)
dg = solver.NumVar(0, Pmax[gen_idx] - Pgen[gen_idx], name + '_delta')
solver.Add(dg == generation[gen_idx] - Pgen[gen_idx])
dgen1.append(dg)
delta.append(dg)
generation1.append(generation[gen_idx])
Pgen1.append(Pgen[gen_idx])
for bus_idx, gen_idx in gens2:
name = 'Gen_down_bus{}'.format(bus_idx)
generation[gen_idx] = solver.NumVar(0, Pmax[gen_idx], name)
dg = solver.NumVar(-Pgen[gen_idx], 0, name + '_delta')
solver.Add(dg == generation[gen_idx] - Pgen[gen_idx])
dgen2.append(dg)
delta.append(dg)
generation2.append(generation[gen_idx])
Pgen2.append(Pgen[gen_idx])
# set the generation in the non inter-area ones
for bus_idx, gen_idx in gens_out:
generation[gen_idx] = Pgen[gen_idx]
area_balance_slack = solver.NumVar(0, 99999, 'Area_slack')
solver.Add(solver.Sum(dgen1) + solver.Sum(dgen2) == area_balance_slack, 'Area equality')
# create the angles ----------------------------------------------------------------------------------------------------
Cf = nc.Cf.tocsc()
Ct = nc.Ct.tocsc()
angles = np.array([solver.NumVar(-6.28, 6.28, 'theta' + str(i)) for i in range(nc.nbus)])
# angles_f = lpExpand(Cf, angles)
# angles_t = lpExpand(Ct, angles)
# Set the slack angles = 0 ---------------------------------------------------------------------------------------------
for i in nc.vd:
solver.Add(angles[i] == 0, "Slack_angle_zero")
# create the phase shift angles ----------------------------------------------------------------------------------------
tau = dict()
for i in range(nc.branch_data.nbr):
if nc.branch_data.control_mode[i] == gc.TransformerControlType.Pt: # is a phase shifter
tau[i] = solver.NumVar(nc.branch_data.theta_min[i], nc.branch_data.theta_max[i], 'tau' + str(i))
# define the power injection -------------------------------------------------------------------------------------------
gen_injections = lpExpand(Cgen, generation)
load_fixed_injections = nc.load_data.get_injections_per_bus()[:, t].real / nc.Sbase # with sign already
Pinj = gen_injections + load_fixed_injections
# nodal balance --------------------------------------------------------------------------------------------------------
# power balance in the non slack nodes: eq.13
node_balance = lpDot(nc.Bbus, angles)
node_balance_slack_1 = [solver.NumVar(0, 99999, 'balance_slack1_' + str(i)) for i in range(nc.nbus)]
node_balance_slack_2 = [solver.NumVar(0, 99999, 'balance_slack2_' + str(i)) for i in range(nc.nbus)]
# equal the balance to the generation: eq.13,14 (equality)
i = 0
for balance, power in zip(node_balance, Pinj):
solver.Add(balance == power + node_balance_slack_1[i] - node_balance_slack_2[i], "Node_power_balance_" + str(i))
i += 1
# branch flow ----------------------------------------------------------------------------------------------------------
pftk = list()
rates = nc.Rates / nc.Sbase
overload1 = np.empty(nc.nbr, dtype=object)
overload2 = np.empty(nc.nbr, dtype=object)
for i in range(nc.nbr):
_f = nc.branch_data.F[i]
_t = nc.branch_data.T[i]
pftk.append(solver.NumVar(-rates[i], rates[i], 'pftk_' + str(i)))
# compute the branch susceptance
bk = (1.0 / complex(nc.branch_data.R[i], nc.branch_data.X[i])).imag
if i in tau.keys():
# branch power from-to eq.15
solver.Add(pftk[i] == bk * (angles[_t] - angles[_f] - tau[i]), 'phase_shifter_power_flow_' + str(i))
else:
# branch power from-to eq.15
solver.Add(pftk[i] == bk * (angles[_t] - angles[_f]), 'branch_power_flow_' + str(i))
# rating restriction in the sense from-to: eq.17
overload1[i] = solver.NumVar(0, 9999, 'overload1_' + str(i))
solver.Add(pftk[i] <= (rates[i] + overload1[i]), "ft_rating_" + str(i))
# rating restriction in the sense to-from: eq.18
overload2[i] = solver.NumVar(0, 9999, 'overload2_' + str(i))
solver.Add((-rates[i] - overload2[i]) <= pftk[i], "tf_rating_" + str(i))
# objective function ---------------------------------------------------------------------------------------------------
# maximize the power from->to
flows_ft = np.zeros(len(inter_area_branches), dtype=object)
for i, (k, sign) in enumerate(inter_area_branches):
flows_ft[i] = sign * pftk[k]
flow_from_a1_to_a2 = solver.Sum(flows_ft)
# include the cost of generation
# gen_cost_f = solver.Sum(gen_cost * delta)
node_balance_slack_f = solver.Sum(node_balance_slack_1) + solver.Sum(node_balance_slack_2)
overload_slack_f = solver.Sum(overload1) + solver.Sum(overload2)
# objective function
solver.Minimize(
# - 1.0 * flow_from_a1_to_a2
- 1.0 * solver.Sum(dgen1)
+ 1.0 * area_balance_slack
# + 1.0 * gen_cost_f
+ 1e0 * node_balance_slack_f
+ 1e0 * overload_slack_f
)
# Solve ----------------------------------------------------------------------------------------------------------------
status = solver.Solve()
# print results --------------------------------------------------------------------------------------------------------
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
print('\nPower flow:')
print(compose_branches_df(nc, pftk, overload1, overload2))
print('\nPower flow inter-area:')
total_pw = 0
for k, sign in inter_area_branches:
total_pw += sign * pftk[k].solution_value()
print(nc.branch_data.branch_names[k], ':', pftk[k].solution_value() * nc.Sbase, 'MW')
print('Total exchange:', flow_from_a1_to_a2.solution_value() * nc.Sbase, 'MW')
print('\nGenerators:')
print(compose_generation_df(nc, generation1, dgen1, Pgen1))
print(compose_generation_df(nc, generation2, dgen2, Pgen2))
print()
print('node balance slack:', node_balance_slack_f.solution_value())
print('Reference node:', nc.vd)
for i, (var1, var2) in enumerate(zip(node_balance_slack_1, node_balance_slack_2)):
print('node slack {0}'.format(i), var1.solution_value(), var2.solution_value())
print('area balance slack:', area_balance_slack.solution_value())
else:
print('The problem does not have an optimal solution.')
# [END print_solution]
# [START advanced]
print('\nAdvanced usage:')
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print()
| [
"numpy.where",
"scipy.sparse.csc.csc_matrix",
"GridCal.Engine.FileOpen",
"numpy.zeros",
"numpy.empty",
"ortools.linear_solver.pywraplp.Solver.CreateSolver",
"GridCal.Engine.compile_snapshot_opf_circuit",
"pandas.DataFrame"
] | [((6769, 6806), 'GridCal.Engine.compile_snapshot_opf_circuit', 'gc.compile_snapshot_opf_circuit', (['grid'], {}), '(grid)\n', (6800, 6806), True, 'import GridCal.Engine as gc\n'), ((7552, 7587), 'ortools.linear_solver.pywraplp.Solver.CreateSolver', 'pywraplp.Solver.CreateSolver', (['"""CBC"""'], {}), "('CBC')\n", (7580, 7587), False, 'from ortools.linear_solver import pywraplp\n'), ((7996, 8042), 'numpy.zeros', 'np.zeros', (['nc.generator_data.ngen'], {'dtype': 'object'}), '(nc.generator_data.ngen, dtype=object)\n', (8004, 8042), True, 'import numpy as np\n'), ((11216, 11246), 'numpy.empty', 'np.empty', (['nc.nbr'], {'dtype': 'object'}), '(nc.nbr, dtype=object)\n', (11224, 11246), True, 'import numpy as np\n'), ((11259, 11289), 'numpy.empty', 'np.empty', (['nc.nbr'], {'dtype': 'object'}), '(nc.nbr, dtype=object)\n', (11267, 11289), True, 'import numpy as np\n'), ((4927, 4959), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (4939, 4959), True, 'import pandas as pd\n'), ((5571, 5608), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'cols'}), '(data=data, columns=cols)\n', (5583, 5608), True, 'import pandas as pd\n'), ((7181, 7213), 'numpy.where', 'np.where', (['(areas == area_from_idx)'], {}), '(areas == area_from_idx)\n', (7189, 7213), True, 'import numpy as np\n'), ((7222, 7252), 'numpy.where', 'np.where', (['(areas == area_to_idx)'], {}), '(areas == area_to_idx)\n', (7230, 7252), True, 'import numpy as np\n'), ((683, 698), 'scipy.sparse.csc.csc_matrix', 'csc_matrix', (['mat'], {}), '(mat)\n', (693, 698), False, 'from scipy.sparse.csc import csc_matrix\n'), ((821, 854), 'numpy.zeros', 'np.zeros', (['n_rows'], {'dtype': 'arr.dtype'}), '(n_rows, dtype=arr.dtype)\n', (829, 854), True, 'import numpy as np\n'), ((1231, 1276), 'numpy.zeros', 'np.zeros', (['(n_rows, cols_vec)'], {'dtype': 'arr.dtype'}), '((n_rows, cols_vec), dtype=arr.dtype)\n', (1239, 1276), True, 'import numpy as np\n'), ((2193, 2208), 'scipy.sparse.csc.csc_matrix', 'csc_matrix', (['mat'], {}), '(mat)\n', (2203, 2208), False, 'from scipy.sparse.csc import csc_matrix\n'), ((2331, 2364), 'numpy.zeros', 'np.zeros', (['n_rows'], {'dtype': 'arr.dtype'}), '(n_rows, dtype=arr.dtype)\n', (2339, 2364), True, 'import numpy as np\n'), ((2723, 2768), 'numpy.zeros', 'np.zeros', (['(n_rows, cols_vec)'], {'dtype': 'arr.dtype'}), '((n_rows, cols_vec), dtype=arr.dtype)\n', (2731, 2768), True, 'import numpy as np\n'), ((6738, 6756), 'GridCal.Engine.FileOpen', 'gc.FileOpen', (['fname'], {}), '(fname)\n', (6749, 6756), True, 'import GridCal.Engine as gc\n')] |
from __future__ import division
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
from keras.layers import Input
from keras.models import Model
#import matlab.engine
import os, cv2, sys
import numpy as np
from config import *
from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions
from models import sam_vgg, sam_resnet, schedule_vgg, schedule_resnet, kl_divergence, correlation_coefficient, nss
def generator(b_s, phase_gen='train'):
if phase_gen == 'train':
images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')]
elif phase_gen == 'val':
images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')]
else:
raise NotImplementedError
images.sort()
maps.sort()
fixs.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out)
Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out)
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix]
counter = (counter + b_s) % len(images)
def generator_test(b_s, imgs_test_path):
images = [imgs_test_path + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
images.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian]
counter = (counter + b_s) % len(images)
if __name__ == '__main__':
if len(sys.argv) == 1:
raise NotImplementedError
else:
phase = sys.argv[1]
x = Input((3, shape_r, shape_c))
x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt))
if version == 0:
m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps]))
print("Compiling SAM-VGG")
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
elif version == 1:
m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps]))
print("Compiling SAM-ResNet")
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
else:
raise NotImplementedError
if phase == 'train':
if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0:
print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
if version == 0:
print("Training SAM-VGG")
m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
callbacks=[EarlyStopping(patience=3),
ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True),
LearningRateScheduler(schedule=schedule_vgg)])
elif version == 1:
print("Training SAM-ResNet")
m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
callbacks=[EarlyStopping(patience=3),
ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True),
LearningRateScheduler(schedule=schedule_resnet)])
elif phase == "test":
# Output Folder Path
output_folder = 'predictions/'
if len(sys.argv) < 2:
raise SyntaxError
imgs_test_path = sys.argv[2]
file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
file_names.sort()
nb_imgs_test = len(file_names)
if nb_imgs_test % b_s != 0:
print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
if version == 0:
print("Loading SAM-VGG weights")
m.load_weights('weights/sam-vgg_salicon_weights.pkl')
#elif version == 1:
# print("Loading SAM-ResNet weights")
# m.load_weights('weights/sam-resnet_salicon_weights.pkl')
print("Predicting saliency maps for " + imgs_test_path)
predictions = m.predict_generator(generator_test(b_s=b_s, imgs_test_path=imgs_test_path), nb_imgs_test)[0]
for pred, name in zip(predictions, file_names):
original_image = cv2.imread(imgs_test_path + name, 0)
res = postprocess_predictions(pred[0], original_image.shape[0], original_image.shape[1])
cv2.imwrite(output_folder + '%s' % name, res.astype(int))
else:
raise NotImplementedError
| [
"keras.callbacks.LearningRateScheduler",
"os.listdir",
"utilities.preprocess_maps",
"keras.callbacks.ModelCheckpoint",
"models.sam_resnet",
"utilities.preprocess_images",
"models.sam_vgg",
"utilities.postprocess_predictions",
"utilities.preprocess_fixmaps",
"numpy.zeros",
"keras.layers.Input",
... | [((1366, 1418), 'numpy.zeros', 'np.zeros', (['(b_s, nb_gaussian, shape_r_gt, shape_c_gt)'], {}), '((b_s, nb_gaussian, shape_r_gt, shape_c_gt))\n', (1374, 1418), True, 'import numpy as np\n'), ((1967, 2019), 'numpy.zeros', 'np.zeros', (['(b_s, nb_gaussian, shape_r_gt, shape_c_gt)'], {}), '((b_s, nb_gaussian, shape_r_gt, shape_c_gt))\n', (1975, 2019), True, 'import numpy as np\n'), ((1464, 1534), 'utilities.preprocess_maps', 'preprocess_maps', (['maps[counter:counter + b_s]', 'shape_r_out', 'shape_c_out'], {}), '(maps[counter:counter + b_s], shape_r_out, shape_c_out)\n', (1479, 1534), False, 'from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions\n'), ((1549, 1622), 'utilities.preprocess_fixmaps', 'preprocess_fixmaps', (['fixs[counter:counter + b_s]', 'shape_r_out', 'shape_c_out'], {}), '(fixs[counter:counter + b_s], shape_r_out, shape_c_out)\n', (1567, 1622), False, 'from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions\n'), ((2333, 2361), 'keras.layers.Input', 'Input', (['(3, shape_r, shape_c)'], {}), '((3, shape_r, shape_c))\n', (2338, 2361), False, 'from keras.layers import Input\n'), ((2379, 2423), 'keras.layers.Input', 'Input', (['(nb_gaussian, shape_r_gt, shape_c_gt)'], {}), '((nb_gaussian, shape_r_gt, shape_c_gt))\n', (2384, 2423), False, 'from keras.layers import Input\n'), ((1864, 1890), 'os.listdir', 'os.listdir', (['imgs_test_path'], {}), '(imgs_test_path)\n', (1874, 1890), False, 'import os, cv2, sys\n'), ((629, 656), 'os.listdir', 'os.listdir', (['imgs_train_path'], {}), '(imgs_train_path)\n', (639, 656), False, 'import os, cv2, sys\n'), ((744, 771), 'os.listdir', 'os.listdir', (['maps_train_path'], {}), '(maps_train_path)\n', (754, 771), False, 'import os, cv2, sys\n'), ((859, 886), 'os.listdir', 'os.listdir', (['fixs_train_path'], {}), '(fixs_train_path)\n', (869, 886), False, 'import os, cv2, sys\n'), ((2068, 2134), 'utilities.preprocess_images', 'preprocess_images', (['images[counter:counter + b_s]', 'shape_r', 'shape_c'], {}), '(images[counter:counter + b_s], shape_r, shape_c)\n', (2085, 2134), False, 'from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions\n'), ((2581, 2599), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2588, 2599), False, 'from keras.optimizers import RMSprop\n'), ((984, 1009), 'os.listdir', 'os.listdir', (['imgs_val_path'], {}), '(imgs_val_path)\n', (994, 1009), False, 'import os, cv2, sys\n'), ((1095, 1120), 'os.listdir', 'os.listdir', (['maps_val_path'], {}), '(maps_val_path)\n', (1105, 1120), False, 'import os, cv2, sys\n'), ((1206, 1231), 'os.listdir', 'os.listdir', (['fixs_val_path'], {}), '(fixs_val_path)\n', (1216, 1231), False, 'import os, cv2, sys\n'), ((1638, 1704), 'utilities.preprocess_images', 'preprocess_images', (['images[counter:counter + b_s]', 'shape_r', 'shape_c'], {}), '(images[counter:counter + b_s], shape_r, shape_c)\n', (1655, 1704), False, 'from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions\n'), ((2498, 2518), 'models.sam_vgg', 'sam_vgg', (['[x, x_maps]'], {}), '([x, x_maps])\n', (2505, 2518), False, 'from models import sam_vgg, sam_resnet, schedule_vgg, schedule_resnet, kl_divergence, correlation_coefficient, nss\n'), ((2815, 2833), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2822, 2833), False, 'from keras.optimizers import RMSprop\n'), ((5556, 5592), 'cv2.imread', 'cv2.imread', (['(imgs_test_path + name)', '(0)'], {}), '(imgs_test_path + name, 0)\n', (5566, 5592), False, 'import os, cv2, sys\n'), ((5615, 5702), 'utilities.postprocess_predictions', 'postprocess_predictions', (['pred[0]', 'original_image.shape[0]', 'original_image.shape[1]'], {}), '(pred[0], original_image.shape[0], original_image.\n shape[1])\n', (5638, 5702), False, 'from utilities import preprocess_images, preprocess_maps, preprocess_fixmaps, postprocess_predictions\n'), ((2726, 2749), 'models.sam_resnet', 'sam_resnet', (['[x, x_maps]'], {}), '([x, x_maps])\n', (2736, 2749), False, 'from models import sam_vgg, sam_resnet, schedule_vgg, schedule_resnet, kl_divergence, correlation_coefficient, nss\n'), ((4612, 4638), 'os.listdir', 'os.listdir', (['imgs_test_path'], {}), '(imgs_test_path)\n', (4622, 4638), False, 'import os, cv2, sys\n'), ((3537, 3562), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)'}), '(patience=3)\n', (3550, 3562), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((3607, 3697), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl"""'], {'save_best_only': '(True)'}), "('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl',\n save_best_only=True)\n", (3622, 3697), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((3738, 3782), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', ([], {'schedule': 'schedule_vgg'}), '(schedule=schedule_vgg)\n', (3759, 3782), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((4103, 4128), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)'}), '(patience=3)\n', (4116, 4128), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((4173, 4266), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl"""'], {'save_best_only': '(True)'}), "('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl',\n save_best_only=True)\n", (4188, 4266), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((4307, 4354), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', ([], {'schedule': 'schedule_resnet'}), '(schedule=schedule_resnet)\n', (4328, 4354), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 21:38:07 2018
@author: nn
"""
import os
import cv2
import yaml
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def make_tf_recoord(in_dir, out_file):
files = os.listdir(in_dir)
with tf.python_io.TFRecordWriter(out_file) as writer:
for f, file in enumerate(tqdm(files)):
arr = cv2.imread(os.path.join(in_dir, file))
example = tf.train.Example(features=tf.train.Features(feature={
"index": tf.train.Feature(int64_list=tf.train.Int64List(value=[f])),
"image": tf.train.Feature(bytes_list=tf.train.BytesList(value=[arr.tostring()]))
}))
writer.write(example.SerializeToString())
def make_tf_record_imgs(imgs, labels, out_dir, offset=0, class_names=[],
param_filename="params.yml"):
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_file = os.path.join(out_dir, "record.tfrecord")
with tf.python_io.TFRecordWriter(out_file) as writer:
for f, img in enumerate(tqdm(imgs)):
example = tf.train.Example(features=tf.train.Features(feature={
"index": tf.train.Feature(int64_list=tf.train.Int64List(value=[f + offset])),
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[labels[f]])),
"image": tf.train.Feature(bytes_list=tf.train.BytesList(value=[imgs[f].tostring()]))
}))
writer.write(example.SerializeToString())
n_data = len(labels)
num_classes = len(np.unique(labels))
shape = imgs[0].shape
if len(class_names) == 0:
class_names =[chr(ord("A") + i) for i in range(num_classes)]
param_file = os.path.join(out_dir, param_filename)
params ={
"n_data":n_data,
"num_classes":num_classes,
"class_names":class_names,
"shape":list(shape)
}
with open(param_file, "w") as hndl:
yaml.dump(params, hndl)
if __name__ == "__main__":
# in_dir = "/home/naoki/Document/ml_data/mtg/cardlists/KLD"
# out_file = "/home/naoki/Document/ml_data/mtg/tfrecoords/KLD.tfrecoord"
# make_tf_recoord(in_dir, out_file)
img_dir = "../../data/cifar10"
if not os.path.isdir(img_dir):
os.makedirs(img_dir)
tr, te = tf.keras.datasets.cifar10.load_data()
train = tr[0].reshape([-1, 32, 32, 3])
test = te[0].reshape([-1, 32, 32, 3])
make_tf_record_imgs(train, tr[1], os.path.join(img_dir, "Train"))
make_tf_record_imgs(test, te[1], os.path.join(img_dir, "Test"))
| [
"os.listdir",
"numpy.unique",
"os.makedirs",
"yaml.dump",
"tqdm.tqdm",
"os.path.join",
"tensorflow.train.Int64List",
"tensorflow.keras.datasets.cifar10.load_data",
"os.path.isdir",
"tensorflow.python_io.TFRecordWriter"
] | [((259, 277), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (269, 277), False, 'import os\n'), ((984, 1024), 'os.path.join', 'os.path.join', (['out_dir', '"""record.tfrecord"""'], {}), "(out_dir, 'record.tfrecord')\n", (996, 1024), False, 'import os\n'), ((1774, 1811), 'os.path.join', 'os.path.join', (['out_dir', 'param_filename'], {}), '(out_dir, param_filename)\n', (1786, 1811), False, 'import os\n'), ((2321, 2358), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), '()\n', (2356, 2358), True, 'import tensorflow as tf\n'), ((285, 322), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_file'], {}), '(out_file)\n', (312, 322), True, 'import tensorflow as tf\n'), ((922, 944), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (935, 944), False, 'import os\n'), ((950, 970), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (961, 970), False, 'import os\n'), ((1032, 1069), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_file'], {}), '(out_file)\n', (1059, 1069), True, 'import tensorflow as tf\n'), ((1623, 1640), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1632, 1640), True, 'import numpy as np\n'), ((1989, 2012), 'yaml.dump', 'yaml.dump', (['params', 'hndl'], {}), '(params, hndl)\n', (1998, 2012), False, 'import yaml\n'), ((2261, 2283), 'os.path.isdir', 'os.path.isdir', (['img_dir'], {}), '(img_dir)\n', (2274, 2283), False, 'import os\n'), ((2289, 2309), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (2300, 2309), False, 'import os\n'), ((2476, 2506), 'os.path.join', 'os.path.join', (['img_dir', '"""Train"""'], {}), "(img_dir, 'Train')\n", (2488, 2506), False, 'import os\n'), ((2543, 2572), 'os.path.join', 'os.path.join', (['img_dir', '"""Test"""'], {}), "(img_dir, 'Test')\n", (2555, 2572), False, 'import os\n'), ((363, 374), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (367, 374), False, 'from tqdm import tqdm\n'), ((1109, 1119), 'tqdm.tqdm', 'tqdm', (['imgs'], {}), '(imgs)\n', (1113, 1119), False, 'from tqdm import tqdm\n'), ((400, 426), 'os.path.join', 'os.path.join', (['in_dir', 'file'], {}), '(in_dir, file)\n', (412, 426), False, 'import os\n'), ((559, 588), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[f]'}), '(value=[f])\n', (577, 588), True, 'import tensorflow as tf\n'), ((1253, 1291), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[f + offset]'}), '(value=[f + offset])\n', (1271, 1291), True, 'import tensorflow as tf\n'), ((1355, 1392), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[labels[f]]'}), '(value=[labels[f]])\n', (1373, 1392), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 2019-4-28
import numpy as np
import pickle
from pprint import pprint
def main(tag_gt, word2idx_tag, idx2word, ref):
for idx, sent in enumerate(ref[0]):
vid_idx = ref[1][idx]
for w in sent:
if idx2word[w] in word2idx_tag:
tag_gt[vid_idx, word2idx_tag[idx2word[w]]] = 1
if __name__ == "__main__":
tag_gt = np.zeros([1970, 300], np.int32)
with open('youtube_corpus.pkl', 'rb') as fo:
data = pickle.load(fo)
train, val, test = data[0], data[1], data[2]
word2idx, idx2word = data[3], data[4]
with open('tag_idx_word.pkl', 'rb') as fo:
idx2word_tag, word2idx_tag = pickle.load(fo)
main(tag_gt, word2idx_tag, idx2word, data[0])
main(tag_gt, word2idx_tag, idx2word, data[1])
main(tag_gt, word2idx_tag, idx2word, data[2])
np.save('msvd_tag_gt', tag_gt) | [
"numpy.zeros",
"pickle.load",
"numpy.save"
] | [((411, 442), 'numpy.zeros', 'np.zeros', (['[1970, 300]', 'np.int32'], {}), '([1970, 300], np.int32)\n', (419, 442), True, 'import numpy as np\n'), ((880, 910), 'numpy.save', 'np.save', (['"""msvd_tag_gt"""', 'tag_gt'], {}), "('msvd_tag_gt', tag_gt)\n", (887, 910), True, 'import numpy as np\n'), ((508, 523), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (519, 523), False, 'import pickle\n'), ((708, 723), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (719, 723), False, 'import pickle\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
sphere.py : SphereReflect intersection, polarization calculation and spatial plot
=====================================================================================
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from opticks.ana.base import opticks_main
from opticks.ana.nbase import vnorm
from opticks.ana.evt import Evt, costheta_, cross_, norm_
from opticks.ana.boundary import Boundary
deg = np.pi/180.
class SphereReflect(object):
def __init__(self, sel):
p0 = sel.rpost_(0)[:,:3]
p1 = sel.rpost_(1)[:,:3]
p_in = p1 - p0
pp = sel.rpost_(1)[:,:3]
pl = sel.rpost_(2)[:,:3]
p_out = pl - pp
e0 = sel.rpol_(0)
e1 = sel.rpol_(1)
self.p0 = p0
self.p1 = p1
self.pl = pl
self.p_in = p_in
self.p_out = p_out
self.e0 = e0
self.e1 = e1
def check_radius(self):
"""
Asymmetric wider distribution than expected, from ~99 to 101.4 until
fixed sphere positioning, when get expected symmetry
::
In [50]: r.min()
Out[50]: 99.96880356309731
In [51]: r.max()
Out[51]: 100.03083354506256
In [53]: np.average(r)
Out[53]: 100.00002525999686
"""
#r = np.linalg.norm(self.p1, 2, 1)
r = vnorm(self.p1)
log.info("r min/max %s %s " % (r.min(), r,max() ))
plt.hist(r, bins=100)
return r
def intersection(self):
"""
"""
origin = np.array([0,0,0])
radius = 100.
direction = np.array([1,0,0])
ray_origin = self.p0
ray_direction = np.tile(direction, len(ray_origin)).reshape(-1,3)
center = np.tile(origin, len(ray_origin)).reshape(-1,3)
O = ray_origin - center
D = ray_direction
b = np.sum( O*D, axis=1 )
c = np.sum( O*O, axis=1 ) - radius*radius
disc = b*b-c # hmm potential unhealthy subtraction of two large values
#assert np.all(disc > 0)
msk = disc > 0
sdisc = np.sqrt(disc)
root1 = -b -sdisc
p1c = root1[:,None]*ray_direction + ray_origin
nrm = (O + D*root1[:,None])/radius
return p1c, nrm, msk
def check_surface_normal(self):
"""
In [187]: nnrm = np.linalg.norm(nrm, 2, 1)
In [188]: nnrm[sr.msk].min()
Out[188]: 0.99999999999999667
In [189]: nnrm[sr.msk].max()
Out[189]: 1.0000000000000033
"""
sr = self
p1c, nrm, msk = sr.intersection()
#nnrm = np.linalg.norm(nrm, 2, 1)
nnrm = vnorm(nrm)
def check_intersection(self):
"""
In [155]: sr.mdp1[:,0].min()
Out[155]: -1.7650434697386426
In [156]: sr.mdp1[:,0].max()
Out[156]: 1.8097476126588909
plt.hist(sr.dp1[sr.msk], bins=100) # sharp zero spike
"""
sr = self
p1c, nrm, msk = sr.intersection()
p1 = self.p1
rperp = np.sqrt(np.sum( p1[:,1:3]*p1[:,1:3] , axis=1))
dp1 = p1c - p1
mdp1 = dp1[msk]
self.p1c = p1c
self.msk = msk
self.dp1 = dp1
self.mdp1 = mdp1
self.rperp = rperp
nrm = norm_(p1c) # surface normal at intersection points
#inc = np.tile( [1,0,0], len(nrm) ).reshape(-1,3) # directions of squadron incident along +X
idir = norm_(self.p_in)
ndir = norm_(self.p_out)
trans = np.cross(idir, nrm ) # direction perpendicular to plane of incidence, A_trans
paral = norm_(np.cross( ndir, trans )) # exit basis
self.nrm = nrm
self.idir = idir
self.ndir = ndir
self.trans = trans
self.paral = paral
#sr = self
#plt.hist2d(sr.rperp[sr.msk], sr.mdp1[:,0], bins=100) # largest deviations are tangential
def check_polarization(self):
"""
Direction of incident rays and reflection/transmission rays together
with surface normal allow the orthonormal bases at each stage to be calculated.
With which the polarisation can be projected upon to see if it makes sense.
Polarized in direction of photon(not real) will that mess things up?::
In [78]: e0 = sel.recpolarization(0)
In [79]: e1 = sel.recpolarization(1)
In [80]: e0
Out[80]:
array([[ 1., 0., 0.],
[ 1., 0., 0.],
[ 1., 0., 0.],
...,
[ 1., 0., 0.],
[ 1., 0., 0.],
[ 1., 0., 0.]])
In [81]: e1
Out[81]:
array([[ 0.843, -0.528, -0.11 ],
[-0.386, -0.835, -0.386],
[-0.52 , 0.819, -0.252],
...,
[-0.071, -0.15 , -0.984],
[-0.236, -0.898, -0.378],
[-0.362, -0.598, -0.717]])
In [82]: paral
Out[82]:
array([[ 0.841, -0.531, -0.108],
[ 0.385, 0.838, 0.387],
[ 0.517, -0.817, 0.255],
...,
[ 0.072, 0.148, 0.986],
[ 0.238, 0.896, 0.374],
[ 0.363, 0.598, 0.715]])
In [83]: np.sum( e1*paral , axis=1)
Out[83]: array([ 1. , -0.997, -1.002, ..., -0.998, -1.002, -1.001])
In [84]: trans
Out[84]:
array([[ 0. , 0.095, -0.469],
[ 0. , 0.411, -0.89 ],
[-0. , 0.287, 0.92 ],
...,
[ 0. , 0.988, -0.148],
[ 0. , 0.383, -0.916],
[ 0. , 0.754, -0.631]])
In [85]: np.sum(e1*trans, axis=1)
Out[85]: array([ 0.002, 0.001, 0.003, ..., -0.002, 0.003, 0.001])
"""
pass
def check_incident_sphere_pol(self):
"""
::
In [14]: xyz = evt_g4.p.rpost_(0)[:,:3]
In [16]: xyz[:,2] = 0
In [17]: xyz
Out[17]:
A([[-42.7747, -26.7342, 0. ],
[ 88.5891, -44.203 , 0. ],
[-63.3198, -62.6606, 0. ],
...,
[-77.2729, 22.1198, 0. ],
[ 57.7898, 29.7739, 0. ],
[-86.3918, 1.8311, 0. ]])
In [18]: norm_(xyz)
Out[18]:
A([[-0.848 , -0.53 , 0. ],
[ 0.8948, -0.4465, 0. ],
[-0.7108, -0.7034, 0. ],
...,
[-0.9614, 0.2752, 0. ],
[ 0.889 , 0.458 , 0. ],
[-0.9998, 0.0212, 0. ]])
In [19]: evt_g4.p.rpol_(0)
Out[19]:
array([[-0.8504, -0.5276, 0. ],
[ 0.8976, -0.4488, 0. ],
[-0.7087, -0.7008, 0. ],
...,
[-0.9606, 0.2756, 0. ],
[ 0.8898, 0.4567, 0. ],
[-1. , 0.0236, 0. ]])
In [20]: evt_g4.s.rpol_(0)
Out[20]:
array([[ 0.5276, -0.8504, 0. ],
[ 0.4488, 0.8976, 0. ],
[ 0.7008, -0.7087, 0. ],
...,
[-0.2756, -0.9606, 0. ],
[-0.4567, 0.8898, 0. ],
[-0.0236, -1. , 0. ]])
"""
pass
def spatial(self):
"""
Initial observation of asymmetry,
*FIXED: THE SPHERE WAS OFFSET (-1,1): A HANGOVER TO AVOID LEAKY TRIANGLE CRACKS*
after placing sphere at origin no asymmetry apparent
::
In [44]: r.min()
Out[44]: 99.96880356309731
In [45]: r.max()
Out[45]: 100.03083354506256
"""
fig = plt.figure()
x0 = self.p0[:,0]
y0 = self.p0[:,1]
z0 = self.p0[:,2]
x1 = self.p1[:,0]
y1 = self.p1[:,1]
z1 = self.p1[:,2]
nr = 2
nc = 3
nb = 100
ax = fig.add_subplot(nr,nc,1)
plt.hist2d(x0, y0, bins=nb)
ax.set_xlabel("x0 y0")
ax = fig.add_subplot(nr,nc,2)
plt.hist2d(x0, z0, bins=nb)
ax.set_xlabel("x0 z0")
ax = fig.add_subplot(nr,nc,3)
plt.hist2d(y0, z0, bins=nb)
ax.set_xlabel("y0 z0")
ax = fig.add_subplot(nr,nc,4)
plt.hist2d(x1, y1, bins=nb) # xy: not symmetric, seems -Y tangentials favored over +Y tangentials
ax.set_xlabel("x1 y1")
ax = fig.add_subplot(nr,nc,5)
plt.hist2d(x1, z1, bins=nb) # xz: only 0:-100 as only half illuminated
ax.set_xlabel("x1 z1")
ax = fig.add_subplot(nr,nc,6)
plt.hist2d(y1, z1, bins=nb) # yz: looks symmetric
ax.set_xlabel("y1 z1")
if __name__ == '__main__':
args = opticks_main(tag="-5", det="rainbow", src="torch")
plt.ion()
plt.close()
boundary = Boundary("Vacuum///MainH2OHale")
seqs = ["TO BR SA"]
#evt = Evt(tag="-6", det="rainbow", seqs=seqs, label="P G4")
tag = args.tag
if tag == "-6":
label = "P G4"
elif tag == "-5":
label = "S G4"
elif tag == "5":
label = "S Op"
elif tag == "6":
label = "P Op"
else:
label = "label?"
try:
evt = Evt(tag=tag, det=args.det, src=args.src, seqs=seqs, label=label, args=args )
except IOError as err:
log.fatal(err)
sys.exit(args.mrc)
log.info("loaded %s " % repr(evt))
sr = SphereReflect(evt)
p1 = sr.p1
sr.spatial()
#sr.check_intersection()
| [
"logging.getLogger",
"opticks.ana.boundary.Boundary",
"matplotlib.pyplot.hist",
"opticks.ana.base.opticks_main",
"numpy.sqrt",
"numpy.cross",
"matplotlib.pyplot.hist2d",
"opticks.ana.evt.Evt",
"opticks.ana.nbase.vnorm",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sum",
"opticks.ana.evt.... | [((940, 967), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (957, 967), False, 'import os, sys, logging, numpy as np\n'), ((10046, 10096), 'opticks.ana.base.opticks_main', 'opticks_main', ([], {'tag': '"""-5"""', 'det': '"""rainbow"""', 'src': '"""torch"""'}), "(tag='-5', det='rainbow', src='torch')\n", (10058, 10096), False, 'from opticks.ana.base import opticks_main\n'), ((10103, 10112), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (10110, 10112), True, 'import matplotlib.pyplot as plt\n'), ((10117, 10128), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10126, 10128), True, 'import matplotlib.pyplot as plt\n'), ((10145, 10177), 'opticks.ana.boundary.Boundary', 'Boundary', (['"""Vacuum///MainH2OHale"""'], {}), "('Vacuum///MainH2OHale')\n", (10153, 10177), False, 'from opticks.ana.boundary import Boundary\n'), ((2176, 2190), 'opticks.ana.nbase.vnorm', 'vnorm', (['self.p1'], {}), '(self.p1)\n', (2181, 2190), False, 'from opticks.ana.nbase import vnorm\n'), ((2260, 2281), 'matplotlib.pyplot.hist', 'plt.hist', (['r'], {'bins': '(100)'}), '(r, bins=100)\n', (2268, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2371, 2390), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2379, 2390), True, 'import os, sys, logging, numpy as np\n'), ((2432, 2451), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2440, 2451), True, 'import os, sys, logging, numpy as np\n'), ((2691, 2712), 'numpy.sum', 'np.sum', (['(O * D)'], {'axis': '(1)'}), '(O * D, axis=1)\n', (2697, 2712), True, 'import os, sys, logging, numpy as np\n'), ((2925, 2938), 'numpy.sqrt', 'np.sqrt', (['disc'], {}), '(disc)\n', (2932, 2938), True, 'import os, sys, logging, numpy as np\n'), ((3479, 3489), 'opticks.ana.nbase.vnorm', 'vnorm', (['nrm'], {}), '(nrm)\n', (3484, 3489), False, 'from opticks.ana.nbase import vnorm\n'), ((4120, 4130), 'opticks.ana.evt.norm_', 'norm_', (['p1c'], {}), '(p1c)\n', (4125, 4130), False, 'from opticks.ana.evt import Evt, costheta_, cross_, norm_\n'), ((4327, 4343), 'opticks.ana.evt.norm_', 'norm_', (['self.p_in'], {}), '(self.p_in)\n', (4332, 4343), False, 'from opticks.ana.evt import Evt, costheta_, cross_, norm_\n'), ((4359, 4376), 'opticks.ana.evt.norm_', 'norm_', (['self.p_out'], {}), '(self.p_out)\n', (4364, 4376), False, 'from opticks.ana.evt import Evt, costheta_, cross_, norm_\n'), ((4394, 4413), 'numpy.cross', 'np.cross', (['idir', 'nrm'], {}), '(idir, nrm)\n', (4402, 4413), True, 'import os, sys, logging, numpy as np\n'), ((8988, 9000), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8998, 9000), True, 'import matplotlib.pyplot as plt\n'), ((9257, 9284), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x0', 'y0'], {'bins': 'nb'}), '(x0, y0, bins=nb)\n', (9267, 9284), True, 'import matplotlib.pyplot as plt\n'), ((9366, 9393), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x0', 'z0'], {'bins': 'nb'}), '(x0, z0, bins=nb)\n', (9376, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9476, 9503), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['y0', 'z0'], {'bins': 'nb'}), '(y0, z0, bins=nb)\n', (9486, 9503), True, 'import matplotlib.pyplot as plt\n'), ((9587, 9614), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x1', 'y1'], {'bins': 'nb'}), '(x1, y1, bins=nb)\n', (9597, 9614), True, 'import matplotlib.pyplot as plt\n'), ((9767, 9794), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x1', 'z1'], {'bins': 'nb'}), '(x1, z1, bins=nb)\n', (9777, 9794), True, 'import matplotlib.pyplot as plt\n'), ((9919, 9946), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['y1', 'z1'], {'bins': 'nb'}), '(y1, z1, bins=nb)\n', (9929, 9946), True, 'import matplotlib.pyplot as plt\n'), ((10526, 10601), 'opticks.ana.evt.Evt', 'Evt', ([], {'tag': 'tag', 'det': 'args.det', 'src': 'args.src', 'seqs': 'seqs', 'label': 'label', 'args': 'args'}), '(tag=tag, det=args.det, src=args.src, seqs=seqs, label=label, args=args)\n', (10529, 10601), False, 'from opticks.ana.evt import Evt, costheta_, cross_, norm_\n'), ((2725, 2746), 'numpy.sum', 'np.sum', (['(O * O)'], {'axis': '(1)'}), '(O * O, axis=1)\n', (2731, 2746), True, 'import os, sys, logging, numpy as np\n'), ((3897, 3936), 'numpy.sum', 'np.sum', (['(p1[:, 1:3] * p1[:, 1:3])'], {'axis': '(1)'}), '(p1[:, 1:3] * p1[:, 1:3], axis=1)\n', (3903, 3936), True, 'import os, sys, logging, numpy as np\n'), ((4519, 4540), 'numpy.cross', 'np.cross', (['ndir', 'trans'], {}), '(ndir, trans)\n', (4527, 4540), True, 'import os, sys, logging, numpy as np\n'), ((10661, 10679), 'sys.exit', 'sys.exit', (['args.mrc'], {}), '(args.mrc)\n', (10669, 10679), False, 'import os, sys, logging, numpy as np\n')] |
# Este arquivo contém todas as operações necessárias para as permutações
import numpy as np
import itertools
import random
def getAllBreakPoints(permutation):
breakpoints = [(i + 1) for i in range(0, len(permutation) - 1) if (isAdjacent(permutation[i], permutation[i+1]) is False)]
if(permutation[0] != 1):
breakpoints.append(0)
if(permutation[len(permutation) - 1] != len(permutation)):
breakpoints.append(len(permutation))
return breakpoints
def getAllReversals(n):
return [(i, j) for i in range(0, n) for j in range(i + 1, n)]
def getAllPermutations(n):
return [list(permutation) for permutation in itertools.permutations([i for i in range(1, n + 1)])]
def getAllSigmas(permutation, operation):
return [applyReversal(permutation, rev[0], rev[1]) for rev in operation(len(permutation))]
def getAllScores(model):
return [[(permutation, sigma, model.predict(np.array([join(permutation, sigma)]))[0][0]) for sigma in getAllSigmas(permutation, getAllReversals)] for permutation in getAllPermutations(3)]
def getSigmasProtectionBreakpoint(permutation, operation):
return [applyReversal(permutation, rev[0], rev[1]) for rev in operation(len(permutation)) if rev[0] in getAllBreakPoints(permutation) and (rev[1] + 1) in getAllBreakPoints(permutation)]
def getNumberBreakPoints(permutation) :
return len(getAllBreakPoints(permutation))
def getIdentity(n):
return [i for i in range(1, n + 1)]
def isIdentity(permutation):
return permutation == [i for i in range(1, len(permutation) + 1)]
def isAdjacent(x, y) :
return abs(x - y) == 1
def join(permutation, sigma):
return permutation + sigma
def randomState(n):
identity = [i for i in range(1, n + 1)]
np.random.shuffle(identity)
return identity
def applyReversal(permutation, i, j):
if(i > j):
i, j = j, i
strip = permutation[i:j+1]
strip.reverse()
return permutation[0:i] + strip + permutation[j+1:len(permutation)]
def nextByPermutationMarkovDecisionProcess(choices, intention, temperature):
choices = list(choices)
choices.remove(intention)
if choices == []:
return intention
result = np.array([intention, random.choice(choices)])
index = np.random.choice(a = [0, 1], size = 1, replace = True, p = [temperature, 1 - temperature])
return result[index].tolist()[0] | [
"numpy.random.choice",
"random.choice",
"numpy.random.shuffle"
] | [((1748, 1775), 'numpy.random.shuffle', 'np.random.shuffle', (['identity'], {}), '(identity)\n', (1765, 1775), True, 'import numpy as np\n'), ((2249, 2335), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[0, 1]', 'size': '(1)', 'replace': '(True)', 'p': '[temperature, 1 - temperature]'}), '(a=[0, 1], size=1, replace=True, p=[temperature, 1 -\n temperature])\n', (2265, 2335), True, 'import numpy as np\n'), ((2212, 2234), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (2225, 2234), False, 'import random\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.session_bundle import manifest_pb2
class ExportTest(tf.test.TestCase):
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
def testExportMonitor(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1, export_dir=export_dir, exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor.fit(x, y, steps=10,
monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
# Only the written checkpoints are exported.
self.assertTrue(tf.gfile.Exists(export_dir + '00000001/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('generic_signature'))
def testExportMonitorRegressionSignature(self):
def _regression_signature(examples, unused_features, predictions):
signatures = {}
signatures['regression'] = (
tf.contrib.session_bundle.exporter.regression_signature(examples,
predictions))
return signatures['regression'], signatures
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=_regression_signature)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('regression_signature'))
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.contrib.learn.LinearRegressor",
"tensorflow.gfile.Exists",
"numpy.random.rand",
"tensorflow.contrib.session_bundle.manifest_pb2.Signatures",
"tensorflow.Session",
"tensorflow.contrib.layers.real_valued_column",
"random.seed",
"tensorflow.test.main",
"tensorflow.contrib.learn.monitors.Exp... | [((3936, 3950), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3948, 3950), True, 'import tensorflow as tf\n'), ((1746, 1761), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1757, 1761), False, 'import random\n'), ((1770, 1790), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1784, 1790), True, 'import numpy as np\n'), ((1901, 1953), 'tensorflow.contrib.learn.LinearRegressor', 'learn.LinearRegressor', ([], {'feature_columns': 'cont_features'}), '(feature_columns=cont_features)\n', (1922, 1953), False, 'from tensorflow.contrib import learn\n'), ((2023, 2156), 'tensorflow.contrib.learn.monitors.ExportMonitor', 'learn.monitors.ExportMonitor', ([], {'every_n_steps': '(1)', 'export_dir': 'export_dir', 'exports_to_keep': '(2)', 'signature_fn': 'export.generic_signature_fn'}), '(every_n_steps=1, export_dir=export_dir,\n exports_to_keep=2, signature_fn=export.generic_signature_fn)\n', (2051, 2156), False, 'from tensorflow.contrib import learn\n'), ((3048, 3063), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (3059, 3063), False, 'import random\n'), ((3072, 3092), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (3086, 3092), True, 'import numpy as np\n'), ((3203, 3255), 'tensorflow.contrib.learn.LinearRegressor', 'learn.LinearRegressor', ([], {'feature_columns': 'cont_features'}), '(feature_columns=cont_features)\n', (3224, 3255), False, 'from tensorflow.contrib import learn\n'), ((3325, 3452), 'tensorflow.contrib.learn.monitors.ExportMonitor', 'learn.monitors.ExportMonitor', ([], {'every_n_steps': '(1)', 'export_dir': 'export_dir', 'exports_to_keep': '(1)', 'signature_fn': '_regression_signature'}), '(every_n_steps=1, export_dir=export_dir,\n exports_to_keep=1, signature_fn=_regression_signature)\n', (3353, 3452), False, 'from tensorflow.contrib import learn\n'), ((1235, 1247), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1245, 1247), True, 'import tensorflow as tf\n'), ((1262, 1310), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['export_meta_filename'], {}), '(export_meta_filename)\n', (1288, 1310), True, 'import tensorflow as tf\n'), ((1555, 1580), 'tensorflow.contrib.session_bundle.manifest_pb2.Signatures', 'manifest_pb2.Signatures', ([], {}), '()\n', (1578, 1580), False, 'from tensorflow.contrib.session_bundle import manifest_pb2\n'), ((1830, 1883), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['""""""'], {'dimension': '(1)'}), "('', dimension=1)\n", (1866, 1883), True, 'import tensorflow as tf\n'), ((1971, 1989), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1987, 1989), False, 'import tempfile\n'), ((2270, 2297), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (2285, 2297), True, 'import tensorflow as tf\n'), ((2368, 2415), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(export_dir + '00000001/export')"], {}), "(export_dir + '00000001/export')\n", (2383, 2415), True, 'import tensorflow as tf\n'), ((2437, 2484), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(export_dir + '00000010/export')"], {}), "(export_dir + '00000010/export')\n", (2452, 2484), True, 'import tensorflow as tf\n'), ((2847, 2925), 'tensorflow.contrib.session_bundle.exporter.regression_signature', 'tf.contrib.session_bundle.exporter.regression_signature', (['examples', 'predictions'], {}), '(examples, predictions)\n', (2902, 2925), True, 'import tensorflow as tf\n'), ((3132, 3185), 'tensorflow.contrib.layers.real_valued_column', 'tf.contrib.layers.real_valued_column', (['""""""'], {'dimension': '(1)'}), "('', dimension=1)\n", (3168, 3185), True, 'import tensorflow as tf\n'), ((3273, 3291), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3289, 3291), False, 'import tempfile\n'), ((3564, 3591), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (3579, 3591), True, 'import tensorflow as tf\n'), ((3614, 3661), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(export_dir + '00000000/export')"], {}), "(export_dir + '00000000/export')\n", (3629, 3661), True, 'import tensorflow as tf\n'), ((3683, 3730), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(export_dir + '00000010/export')"], {}), "(export_dir + '00000010/export')\n", (3698, 3730), True, 'import tensorflow as tf\n')] |
#!/usr/local/Cellar/python/3.7.4_1
# -*- coding: utf-8 -*-
# @File : metric.py
# @Author : 姜小帅
# @Moto : 良好的阶段性收获是坚持的重要动力之一
# @Contract: <EMAIL>
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
def flat_accuracy(logits, labels):
logits = logits.detach().cpu().numpy()
labels = labels.cpu().numpy()
pred_flat = np.argmax(logits, axis=1).flatten()
labels_flat = labels.flatten()
return accuracy_score(labels_flat, pred_flat)
def flat_f1(logits, labels):
logits = logits.detach().cpu().numpy()
labels = labels.cpu().numpy()
pred_flat = np.argmax(logits, axis=1).flatten()
labels_flat = labels.flatten()
return f1_score(labels_flat, pred_flat)
| [
"numpy.argmax",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score"
] | [((436, 474), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels_flat', 'pred_flat'], {}), '(labels_flat, pred_flat)\n', (450, 474), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((682, 714), 'sklearn.metrics.f1_score', 'f1_score', (['labels_flat', 'pred_flat'], {}), '(labels_flat, pred_flat)\n', (690, 714), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((354, 379), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (363, 379), True, 'import numpy as np\n'), ((599, 624), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (608, 624), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.