text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 9 12:48:42 2017
@author: cdw2be
"""
import warnings
warnings.simplefilter('ignore', UserWarning)
import tkinter as tk
from tkinter import filedialog
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
import mrimodel
import confocalmodel
import mesh
import numpy as np
from cardiachelpers import displayhelper
import math
import winreg, glob, os
warnings.simplefilter('default', UserWarning)
class modelGUI(tk.Frame):
"""Generates a GUI to control the Python-based cardiac modeling toolbox.
"""
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.identifyPostView()
self.createWidgets()
self.scar_assign = False
self.dense_assign = False
self.mri_model = False
self.mri_mesh = False
# Set row and column paddings
master.rowconfigure(0, pad=5)
master.rowconfigure(1, pad=5)
master.rowconfigure(2, pad=5)
master.rowconfigure(3, pad=5)
master.rowconfigure(4, pad=5)
master.rowconfigure(5, pad=5)
master.rowconfigure(6, pad=5)
master.rowconfigure(7, pad=5)
master.columnconfigure(8, pad=5)
master.columnconfigure(9, pad=5)
# On window close by user
master.protocol('WM_DELETE_WINDOW', self.destroy())
self.master = master
def identifyPostView(self):
try:
postview_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\University of Utah\\PostView")
postview_folder = winreg.QueryValueEx(postview_key, "Location")[0]
os.chdir(postview_folder)
postview_exe_file = glob.glob('P*.exe')[0]
self.postview_exe = os.path.join(postview_folder, postview_exe_file)
except FileNotFoundError:
self.postview_exe = ''
def createWidgets(self):
"""Place widgets throughout GUI frame and assign functionality.
"""
# Establish tk variables
sa_filename = tk.StringVar()
la_filename = tk.StringVar()
lge_filename = tk.StringVar()
la_lge_filenames = tk.StringVar()
dense_filenames = tk.StringVar()
premade_mesh_filename = tk.StringVar()
confocal_dir = tk.StringVar()
self.scar_plot_bool = tk.IntVar(value=0)
self.dense_plot_bool = tk.IntVar(value=0)
self.nodes_plot_bool = tk.IntVar(value=0)
# Import Settings
# Place labels
import_label = ttk.Label(text='Model Import Options and Settings')
import_label.grid(row=0, column=0, columnspan=9)
ttk.Label(text='Short-Axis File:').grid(row=1, sticky='W')
ttk.Label(text='Long-Axis File:').grid(row=2, sticky='W')
ttk.Label(text='SA LGE File:').grid(row=3, sticky='W')
ttk.Label(text='LA LGE Files:').grid(row=4, sticky='W')
ttk.Label(text='DENSE Files:').grid(row=5, sticky='W')
ttk.Label(text='Premade Mesh File:').grid(row=6, sticky='W')
ttk.Label(text='Confocal Directory:').grid(row=7, sticky='W')
# Create entry objects
sa_file_entry = ttk.Entry(width=80, textvariable=sa_filename)
la_file_entry = ttk.Entry(width=80, textvariable=la_filename)
lge_file_entry = ttk.Entry(width=80, textvariable=lge_filename)
la_lge_file_entry = ttk.Entry(width=80, textvariable=la_lge_filenames)
dense_file_entry = ttk.Entry(width=80, textvariable=dense_filenames)
confocal_dir_entry = ttk.Entry(width=80, textvariable=confocal_dir)
premade_mesh_entry = ttk.Entry(width=80, textvariable=premade_mesh_filename)
# Place entry object
sa_file_entry.grid(row=1, column=1, columnspan=5)
la_file_entry.grid(row=2, column=1, columnspan=5)
lge_file_entry.grid(row=3, column=1, columnspan=5)
la_lge_file_entry.grid(row=4, column=1, columnspan=5)
dense_file_entry.grid(row=5, column=1, columnspan=5)
premade_mesh_entry.grid(row=6, column=1, columnspan=5)
confocal_dir_entry.grid(row=7, column=1, columnspan=5)
# Place "Browse" buttons
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(sa_file_entry)).grid(row=1, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(la_file_entry)).grid(row=2, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(lge_file_entry)).grid(row=3, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(la_lge_file_entry, multi='True')).grid(row=4, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(dense_file_entry, multi='True')).grid(row=5, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(premade_mesh_entry)).grid(row=6, column=6)
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(confocal_dir_entry, multi='Dir')).grid(row=7, column=6)
# Model Options
# Place labels
ttk.Label(text='Primary Cine Timepoint:').grid(row=1, column=7, sticky='W')
# Create options comboboxes
self.cine_timepoint_cbox = ttk.Combobox(state='disabled', width=5)
self.cine_timepoint_cbox.bind('<<ComboboxSelected>>', lambda _ : self.cineTimeChanged())
self.cine_timepoint_cbox.grid(row=1, column=8)
# Buttons to generate models
ttk.Button(text='Generate MRI Model', command= lambda: self.createMRIModel(sa_filename, la_filename, lge_filename, la_lge_filenames, dense_filenames)).grid(row=2, column=7, columnspan=2)
# Confocal Model Options
# Place labels
# Create options entries
self.confocal_slice_button = ttk.Menubutton(text='Select confocal slices', state='disabled')
self.confocal_slice_menu = tk.Menu(self.confocal_slice_button, tearoff=False)
self.confocal_slice_button.configure(menu=self.confocal_slice_menu)
self.confocal_slice_button.grid(row=4, column=7, columnspan=2)
# Buttons to generate models
ttk.Button(text='Generate Confocal Model', command= lambda: self.createConfocalModel(confocal_dir_entry)).grid(row=3, column=7, columnspan=2)
ttk.Button(text='Stitch Selected Slices', command= lambda: self.startStitching()).grid(row=5, column=7, columnspan=2)
# Mesh Options / Creation
# Place labels
mesh_label = ttk.Label(text='Mesh Settings')
mesh_label.grid(row=0, column=10, columnspan=2)
ttk.Label(text='Number of Rings:').grid(row=1, column=10, sticky='W')
ttk.Label(text='Elements per Ring:').grid(row=2, column=10, sticky='W')
ttk.Label(text='Elements through Wall:').grid(row=3, column=10, sticky='W')
ttk.Label(text='Mesh Type:').grid(row=4, column=10, sticky='W')
ttk.Label(text='Select conn matrix:').grid(row=5, column=10, sticky='W')
# Create mesh option entry boxes
num_rings_entry = ttk.Entry(width=10)
elem_per_ring_entry = ttk.Entry(width=10)
elem_thru_wall_entry = ttk.Entry(width=10)
mesh_type_cbox = ttk.Combobox(values=['4x2', '4x4', '4x8'], state='readonly', width=10)
self.conn_mat_cbox = ttk.Combobox(state='disabled', values=['hex', 'pent'], width=10)
# Place mesh option entry boxes
num_rings_entry.grid(row=1, column=11, sticky='W')
elem_per_ring_entry.grid(row=2, column=11, sticky='W')
elem_thru_wall_entry.grid(row=3, column=11, sticky='W')
mesh_type_cbox.grid(row=4, column=11, sticky='W')
self.conn_mat_cbox.grid(row=5, column=11, sticky='W')
# Mesh option entry boxes default text and input validation
num_rings_entry.insert(0, '28')
elem_per_ring_entry.insert(0, '48')
elem_thru_wall_entry.insert(0, '5')
num_rings_entry.configure(validate='key', validatecommand=(num_rings_entry.register(self.intValidate), '%P'))
elem_per_ring_entry.configure(validate='key', validatecommand=(num_rings_entry.register(self.intValidate), '%P'))
elem_thru_wall_entry.configure(validate='key', validatecommand=(num_rings_entry.register(self.intValidate), '%P'))
mesh_type_cbox.current(2)
self.conn_mat_cbox.current(0)
# Create mesh option buttons
self.premadeMeshButton = ttk.Button(text='Use Premade Mesh', state='enabled', command= lambda: self.createMRIMesh(num_rings_entry, elem_per_ring_entry, elem_thru_wall_entry, premade_mesh_file=premade_mesh_filename))
self.meshButton = ttk.Button(text='Generate Model First', state='disabled', command= lambda: self.createMRIMesh(num_rings_entry, elem_per_ring_entry, elem_thru_wall_entry, mesh_type_cbox=mesh_type_cbox))
self.scar_fe_button = ttk.Button(text='Identify scar nodes', state='disabled', command= lambda: self.scarElem())
self.dense_fe_button = ttk.Button(text='Assign element displacements', state='disabled', command= lambda: self.denseElem())
self.scar_dense_button = ttk.Button(text='Get scar region DENSE average', state='disabled', command= lambda: self.scarDense())
# Place mesh option buttons
self.premadeMeshButton.grid(row=8, column=10, columnspan=2)
self.meshButton.grid(row=10, column=10, columnspan=2)
self.scar_fe_button.grid(row=11, column=10, columnspan=2)
self.dense_fe_button.grid(row=12, column=10, columnspan=2)
self.scar_dense_button.grid(row=13, column=10, columnspan=2)
# FEBio File Creation
# Place labels
postview_label = ttk.Label(text='Postview Options')
postview_label.grid(row=10, column=2, columnspan=7)
ttk.Label(text='Postview filename:').grid(row=11, column=2, sticky='W')
ttk.Label(text='Postview installation:').grid(row=12, column=2, sticky='W')
# Create entry objects
self.postview_file_entry = ttk.Entry()
self.postview_exe_entry = ttk.Entry()
self.postview_file_entry.grid(row=11, column=3, columnspan=5, sticky='WE')
self.postview_exe_entry.grid(row=12, column=3, columnspan=5, sticky='WE')
self.postview_exe_entry.insert(0, self.postview_exe)
# Buttons to create and open files
self.feb_file_button = ttk.Button(text='Generate FEBio File', state='disabled', command= lambda: self.genFebFile())
if self.postview_exe == '':
self.postview_open_button = ttk.Button(text='Launch PostView', state='disabled', command= lambda: self.openPostview())
else:
self.postview_open_button = ttk.Button(text='Launch PostView', state='enabled', command= lambda: self.openPostview())
self.feb_file_button.grid(row=13, column=3)
self.postview_open_button.grid(row=13, column=4)
# Create "Browse" button
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(self.postview_file_entry, multi='Feb')).grid(row=11, column=8, sticky='W')
ttk.Button(text='Browse', command= lambda: self.openFileBrowser(self.postview_exe_entry, multi='Exe')).grid(row=12, column=8, sticky='W')
# Plot Options
# Place labels
ttk.Label(text='Plot nodes in mesh?').grid(row=10, column=0, sticky='W')
ttk.Label(text='Plot Scar?').grid(row=11, column=0, sticky='W')
ttk.Label(text='Plot DENSE?').grid(row=12, column=0, sticky='W')
ttk.Label(text='DENSE Timepoint:').grid(row=13, column=0, sticky='W')
# DENSE Timepoint combobox
self.dense_timepoint_cbox = ttk.Combobox(state='disabled', width=5)
self.dense_timepoint_cbox.grid(row=13, column=1, sticky='W')
# Options checkboxes
self.scar_cbutton = ttk.Checkbutton(variable = self.scar_plot_bool, state='disabled')
self.dense_cbutton = ttk.Checkbutton(variable = self.dense_plot_bool, state='disabled')
self.nodes_cbutton = ttk.Checkbutton(variable = self.nodes_plot_bool, state='disabled')
self.scar_cbutton.grid(row=11, column=1, sticky='W')
self.dense_cbutton.grid(row=12, column=1, sticky='W')
self.nodes_cbutton.grid(row=10, column=1, sticky='W')
# Buttons to plot MRI Models or Meshes
self.plot_mri_button = ttk.Button(text='Plot MRI Model', command= lambda: self.plotMRIModel(), state='disabled')
self.plot_mesh_button = ttk.Button(text='Plot MRI Mesh', command= lambda: self.plotMRIMesh(), state='disabled')
self.plot_mri_button.grid(row=14, column=0, sticky='W')
self.plot_mesh_button.grid(row=14, column=1, sticky='W')
# Separators
ttk.Separator(orient='vertical').grid(column=9, row=0, rowspan=15, sticky='NS')
ttk.Separator(orient='horizontal').grid(column=0, row=9, columnspan=9, sticky='EW')
ttk.Separator(orient='vertical').grid(row=9, column=1, rowspan=6, sticky='NSE')
# Set specific label fonts
f = font.Font(mesh_label, mesh_label.cget('font'))
f.configure(underline=True)
mesh_label.configure(font=f)
import_label.configure(font=f)
postview_label.configure(font=f)
def openFileBrowser(self, entry_box, multi='False'):
"""Open a file browser window and assign the file name to the passed entry box.
Allows various options for type of file browser to be launched.
"""
if multi == 'True':
file_name = filedialog.askopenfilenames(title='Select Files')
elif multi == 'Dir':
file_name = filedialog.askdirectory(title='Select Folder')
elif multi == 'Feb':
file_name = filedialog.asksaveasfilename(title='Save File', filetypes=(('FEBio files','*.feb'), ('All files', '*')))
if not (file_name.split('.')[-1] == 'feb'):
file_name += '.feb'
else:
file_name = filedialog.askopenfilename(title='Select File')
if not file_name == '':
entry_box.delete(0, 'end')
entry_box.insert(0, file_name)
return(file_name)
def createMRIModel(self, sa_filename, la_filename, lge_filename, la_lge_filenames, dense_filenames):
"""Function run to instantiate MRI model based on input files.
"""
# Check that required files are present
if sa_filename.get() == '' or la_filename.get() == '':
if sa_filename.get() == '':
messagebox.showinfo('File Error', 'Need Short-Axis file.')
elif la_filename.get() == '':
messagebox.showinfo('File Error', 'Need Long-Axis file.')
return(False)
# Parse DENSE Filenames
if not(dense_filenames.get() == ''):
dense_filenames_replaced = list(self.master.tk.splitlist(dense_filenames.get()))
else:
dense_filenames_replaced = dense_filenames.get()
if not(la_lge_filenames.get() == ''):
la_lge_filenames_replaced = list(self.master.tk.splitlist(la_lge_filenames.get()))
else:
la_lge_filenames_replaced = la_lge_filenames.get()
# Instantiate MRI model object and import cine stack (at default timepoint)
self.mri_model = mrimodel.MRIModel(sa_filename.get(), la_filename.get(), sa_scar_file=lge_filename.get(), la_scar_files=la_lge_filenames_replaced, dense_file=dense_filenames_replaced)
self.mri_model.importCine(timepoint=0)
# Import LGE, if included, and generate full alignment array
if self.mri_model.scar:
self.scar_cbutton.configure(state='normal')
self.mri_model.importLGE()
if not(la_lge_filenames_replaced == ''):
self.mri_model.importScarLA()
self.mri_model.convertDataProlate()
interp_scar = self.mri_model.alignScar()
else:
# Only occurs if scar is removed from MRI model on later instantiation
self.scar_cbutton.configure(state='disabled')
self.scar_fe_button.configure(state='disabled')
# Import DENSE, if included
if self.mri_model.dense:
self.dense_cbutton.configure(state='normal')
self.mri_model.importDense()
else:
# Only occurs if DENSE is removed from MRI model on later instantiation
self.dense_cbutton.configure(state='disabled')
self.dense_timepoint_cbox.configure(values=[], state='disabled')
self.dense_fe_button.configure(state='disabled')
self.mri_model.convertDataProlate()
if self.mri_model.dense:
self.mri_model.alignDense(cine_timepoint=0)
self.dense_timepoint_cbox.configure(values=list(range(len(self.mri_model.circumferential_strain))), state='readonly')
self.dense_timepoint_cbox.current(0)
# Update GUI elements
self.cine_timepoint_cbox.configure(values=list(range(len(self.mri_model.cine_endo))), state='readonly')
self.cine_timepoint_cbox.current(0)
if not self.mri_mesh:
self.meshButton.configure(state='normal', text='Generate MRI Mesh')
else:
self.mri_mesh.assignInsertionPts(self.mri_model.cine_apex_pt, self.mri_model.cine_basal_pt, self.mri_model.cine_septal_pts)
if self.mri_model.scar:
self.scar_fe_button.configure(state='enabled')
if self.mri_model.dense:
self.dense_fe_button.configure(state='enabled')
def createMRIMesh(self, num_rings_entry, elem_per_ring_entry, elem_thru_wall_entry, mesh_type_cbox=False, premade_mesh_file=False):
"""Function to generate base-level mesh from MRI model object
"""
# Pull variables from GUI entry fields
if not (num_rings_entry.get() == '' or elem_per_ring_entry.get() == '' or elem_thru_wall_entry.get() == ''):
num_rings = int(num_rings_entry.get())
elem_per_ring = int(elem_per_ring_entry.get())
elem_in_wall = int(elem_thru_wall_entry.get())
else:
messagebox.showinfo('Mesh Settings', 'Mesh option left blank. Correct and try again.')
return(False)
if mesh_type_cbox:
time_point = int(self.cine_timepoint_cbox.get())
# Create base mesh
self.mri_mesh = mesh.Mesh(num_rings, elem_per_ring, elem_in_wall)
# Fit mesh to MRI model data
#temp_view = displayhelper.visPlot3d(self.mri_model.cine_endo_rotate[time_point][:, :3])
#displayhelper.visPlot3d(self.mri_model.cine_epi_rotate[time_point][:, :3], view=temp_view)
self.mri_mesh.fitContours(self.mri_model.cine_endo_rotate[time_point][:, :3], self.mri_model.cine_epi_rotate[time_point][:, :3], self.mri_model.cine_apex_pt, self.mri_model.cine_basal_pt, self.mri_model.cine_septal_pts, mesh_type_cbox.get())
self.mri_mesh.feMeshRender()
self.mri_mesh.nodeNum(self.mri_mesh.meshCart[0], self.mri_mesh.meshCart[1], self.mri_mesh.meshCart[2])
self.mri_mesh.getElemConMatrix()
# Update GUI elements as needed
self.plot_mri_button.configure(state='normal')
self.plot_mesh_button.configure(state='normal')
self.feb_file_button.configure(state='normal')
self.nodes_cbutton.configure(state='normal')
self.conn_mat_cbox.configure(state='readonly')
if self.mri_model.scar:
self.scar_fe_button.configure(state='normal')
else:
self.scar_fe_button.configure(state='disabled')
if self.mri_model.dense:
self.dense_fe_button.configure(state='normal')
else:
self.dense_fe_button.configure(state='disabled')
elif premade_mesh_file:
self.mri_mesh = mesh.Mesh(num_rings, elem_per_ring, elem_in_wall)
import_success = self.mri_mesh.importPremadeMesh(premade_mesh_file.get())
# Update GUI elements as needed
if import_success:
self.plot_mri_button.configure(state='normal')
self.plot_mesh_button.configure(state='normal')
self.feb_file_button.configure(state='normal')
self.nodes_cbutton.configure(state='normal')
self.meshButton.configure(state='disabled', text='Using Premade Mesh')
if self.mri_model:
self.mri_mesh.assignInsertionPts(self.mri_model.cine_apex_pt, self.mri_model.cine_basal_pt, self.mri_model.cine_septal_pts)
if self.mri_model.scar:
self.scar_fe_button.configure(state='normal')
else:
self.scar_fe_button.configure(state='disabled')
if self.mri_model.dense:
self.dense_fe_button.configure(state='normal')
else:
self.dense_fe_button.configure(state='disabled')
else:
self.mri_mesh = None
messagebox.showwarning('No Mesh File', 'Mesh file not found.')
def createConfocalModel(self, confocal_dir_entry):
"""Set up a new confocalModel object of stitched images.
Models contain ConfocalSlice objects, which are defined within the confocalModel file.
"""
# Establish the directory of interest in the model.
confocal_dir = confocal_dir_entry.get()
if confocal_dir == '':
messagebox.showinfo('No Directory', 'Please select a directory for confocal images.')
return(False)
# Instantiate a new model object
self.confocal_model = confocalmodel.ConfocalModel(confocal_dir)
self.confocal_slice_selections = {}
# Create a local variable to track slices in the confocalmodel object.
for slice_name in self.confocal_model.slice_names:
self.confocal_slice_selections[slice_name] = tk.IntVar(value=1)
self.confocal_slice_menu.add_checkbutton(label=slice_name, variable=self.confocal_slice_selections[slice_name], onvalue=1, offvalue=0)
# Enable GUI elements that require an instantiated confocalModel object.
self.confocal_slice_button.configure(state='enabled')
def cineTimeChanged(self):
"""Function to respond to timepoint adjustments in the base cine mesh / model
"""
# Insure timepoint is an integer (should always succeed, in place for possible errors)
try:
new_timepoint = int(self.cine_timepoint_cbox.get())
except:
return(False)
# Import the cine model at the selected timepoint (updates landmarks)
self.mri_model.importCine(timepoint = new_timepoint)
# If necessary, align DENSE to new cine timepoint (most important aspect)
if self.mri_model.dense:
self.mri_model.alignDense(cine_timepoint = new_timepoint)
def plotMRIModel(self):
"""Plots MRI Model based on raw data (slice contours, scar traces, etc.)
"""
# Pull timepoint from timepoint selection combobox
time_point = int(self.cine_timepoint_cbox.get())
# Plot overall cine segmentation data
mri_axes = displayhelper.segmentRender(self.mri_model.cine_endo_rotate[time_point], self.mri_model.cine_epi_rotate[time_point], self.mri_model.cine_apex_pt, self.mri_model.cine_basal_pt, self.mri_model.cine_septal_pts, self.mri_mesh.origin, self.mri_mesh.transform)
# If desired, plot scar data
if self.scar_plot_bool.get() and self.mri_model.scar:
mri_axes = displayhelper.displayScarTrace(self.mri_model.interp_scar_trace, self.mri_mesh.origin, self.mri_mesh.transform, ax=mri_axes)
if hasattr(self.mri_model, 'interp_scar_la_trace'):
mri_axes = displayhelper.displayScarTrace(self.mri_model.interp_scar_la_trace, self.mri_mesh.origin, self.mri_mesh.transform, ax=mri_axes)
# If desired, plot DENSE data
if self.dense_plot_bool.get() and self.mri_model.dense:
mri_axes = displayhelper.displayDensePts(self.mri_model.dense_aligned_pts, self.mri_model.dense_slice_shifted, self.mri_mesh.origin, self.mri_mesh.transform, self.mri_model.dense_aligned_displacement, dense_plot_quiver=1, timepoint=int(self.dense_timepoint_cbox.get()), ax=mri_axes)
def plotMRIMesh(self):
"""Plots the mesh data as a surface plot, with display options
"""
# Plot surface contours of endocardium and epicardium
mesh_axes = displayhelper.surfaceRender(self.mri_mesh.endo_node_matrix, self.mri_mesh.focus)
mesh_axes = displayhelper.surfaceRender(self.mri_mesh.epi_node_matrix, self.mri_mesh.focus, ax=mesh_axes)
# Display node positions, if selected
if self.nodes_plot_bool.get():
mesh_axes = displayhelper.nodeRender(self.mri_mesh.nodes, ax=mesh_axes)
# Display scar locations, if available and desired
if self.scar_plot_bool.get() and self.mri_mesh.nodes_in_scar.size:
mesh_axes = displayhelper.nodeRender(self.mri_mesh.nodes[self.mri_mesh.nodes_in_scar, :], ax=mesh_axes)
elif self.scar_plot_bool.get() and not self.mri_mesh.nodes_in_scar.size:
# Warn if scar box selected, but elements unidentified.
messagebox.showinfo('Warning', 'Identify scar nodes before plotting to view.')
def scarElem(self):
"""Requests mesh to process which elements are in scar
"""
time_point = int(self.cine_timepoint_cbox.get())
self.mri_model.convertDataProlate(self.mri_mesh.focus)
self.mri_mesh.rotateNodesProlate()
self.mri_model.alignScar()
self.mri_mesh.interpScarData(self.mri_model.interp_scar, trans_smooth=1, depth_smooth=0.5)
if not self.scar_assign:
self.scar_assign = True
if self.dense_assign and self.scar_assign:
self.scar_dense_button.configure(state='normal')
def denseElem(self):
"""Requests mesh to assign DENSE information to all applicable elements
"""
time_point = int(self.cine_timepoint_cbox.get())
self.mri_mesh.assignDenseElems(self.mri_model.dense_aligned_pts, self.mri_model.dense_slices, self.mri_model.dense_aligned_displacement, self.mri_model.radial_strain, self.mri_model.circumferential_strain)
if not self.dense_assign:
self.dense_assign = True
if self.dense_assign and self.scar_assign:
self.scar_dense_button.configure(state='normal')
def scarDense(self):
"""Function designed to calculate regional DENSE data based on model / mesh scar extent.
Calculates DENSE data in the identified scar and non-scar (remote) regions and reports / plots those values.
"""
scar_average_dense = [None]*len(self.mri_model.dense_aligned_displacement)
remote_average_dense = [None]*len(self.mri_model.dense_aligned_displacement)
for time_point in range(len(self.mri_model.dense_aligned_displacement)):
scar_average_dense[time_point] = self.mri_mesh.getElemData(self.mri_mesh.elems_in_scar, 'dense', timepoint=time_point).tolist()
remote_average_dense[time_point] = self.mri_mesh.getElemData(self.mri_mesh.elems_out_scar, 'dense', timepoint=time_point).tolist()
scar_radial = [scar_dense[0] for scar_dense in scar_average_dense]
scar_circ = [scar_dense[1] for scar_dense in scar_average_dense]
remote_radial = [remote_dense[0] for remote_dense in remote_average_dense]
remote_circ = [remote_dense[1] for remote_dense in remote_average_dense]
displayhelper.plotListData([scar_radial, remote_radial], ['Scar', 'Remote'])
displayhelper.plotListData([scar_circ, remote_circ], ['Scar', 'Remote'])
def genFebFile(self):
"""Generate FEBio file in indicated location
"""
# Perform filename checks to ensure proper filename entered.
feb_file_name = self.postview_file_entry.get()
if feb_file_name == '':
messagebox.showinfo('Filename Error', 'File name is not indicated.')
return(False)
elif not (feb_file_name.split('.')[-1] == 'feb'):
messagebox.showinfo('Filename Error', 'File must be an FEBio file (*.feb).')
return(False)
# Generate FEBio file through Mesh function
self.mri_mesh.generateFEFile(feb_file_name, self.conn_mat_cbox.get())
# Update GUI Elements
self.postview_open_button.configure(state='normal')
def openPostview(self):
"""Launch a PostView instance pointed at the FEBio File
Note for this function to operate as expected, the function displayhelper/displayMeshPostview must point to the FEBio PostView executable on your machine.
"""
# Pull FEBio file name
feb_file_name = self.postview_file_entry.get()
feb_executable = self.postview_exe_entry.get()
# Check that file is an accessible file
try:
open(feb_file_name)
except:
messagebox.showinfo('File Warning', 'FEBio File not found. Check file name and try again.')
return(False)
# Ensure that file is an FEBio file
if not (feb_file_name.split('.')[-1] == 'feb'):
messagebox.showinfo('File Warning', 'File selected is not an FEBio file. Check file name and try again.')
return(False)
# Request PostView Launch
try:
open(feb_executable)
displayhelper.displayMeshPostview(feb_file_name, feb_executable)
except FileNotFoundError:
messagebox.showinfo('Executable Warning', 'PostView installation not found. Please identify executable location.')
return(False)
except OSError:
messagebox.showinfo('Executable Warning', 'Invalid file selected. Please identify correct executable location.')
return(False)
def startStitching(self):
"""Stitch selected slices into a large, combined image and save.
"""
stitch_slices = []
for slice_name, stitch_var in self.confocal_slice_selections.items():
if stitch_var.get():
stitch_slices.append(self.confocal_model.slice_names.index(slice_name))
# Get subslice list
sub_slices = [None]*len(stitch_slices)
for slice_num, cur_slice in enumerate(stitch_slices):
sub_slices[slice_num] = self.confocal_model.getSubsliceList(cur_slice)
# Get channel list
channels = [None]*len(stitch_slices)
for slice_num, cur_slice in enumerate(stitch_slices):
channels[slice_num] = self.confocal_model.getChannelList(cur_slice)
self._createSubsliceWindow(stitch_slices, sub_slices, channels)
return(True)
def intValidate(self, new_value):
"""Simple validation function to ensure an entry receives only int-able inputs or null
"""
# Accept empty entry box to allow clearing the box
if new_value == '':
return(True)
# Attempt integer conversion. If possible, accept new input
try:
int(new_value)
return(True)
except:
return(False)
def _createSubsliceWindow(self, slice_list, subslice_list, channel_list):
"""Create a window to select subslices and channels.
"""
# Calculate window height
root = tk.Tk()
screen_height = root.winfo_screenheight()
root.destroy()
self.slice_menu = tk.Toplevel(self.master)
self.slice_menu.wm_title('Select Subslices and Channels')
# Set up canvas to allow scrollable window
slice_canvas = tk.Canvas(self.slice_menu, borderwidth=0)
slice_frame = tk.Frame(slice_canvas)
scroll_bar = tk.Scrollbar(self.slice_menu, orient="vertical", command=slice_canvas.yview)
slice_canvas.configure(yscrollcommand=scroll_bar.set)
scroll_bar.pack(side="right", fill="y")
slice_canvas.pack(side="left", fill="both", expand=True)
slice_canvas.create_window((4,4), window=slice_frame, anchor="nw")
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
slice_canvas.bind_all("<MouseWheel>", lambda event: slice_canvas.yview_scroll(int(-1*(event.delta/40)), "units"))
slice_frame.bind("<Configure>", lambda event, canvas=slice_canvas: onFrameConfigure(slice_canvas))
self.subslice_selections = {}
for slice_num, slice_index in enumerate(slice_list):
slice_frame.columnconfigure(slice_num, pad=10)
ttk.Label(slice_frame, text=self.confocal_model.slice_names[slice_index]).grid(row=0, column=slice_num)
for sub_num, sub_slice in enumerate(subslice_list[slice_num]):
cur_string = self.confocal_model.slice_names[slice_index] + " Frame " + str(sub_slice)
self.subslice_selections[cur_string] = tk.IntVar(value=1)
ttk.Checkbutton(slice_frame, text="Frame " + str(sub_slice), variable=self.subslice_selections[cur_string]).grid(row=sub_num+1, column=slice_num)
self.channel_selections = {}
farthest_column, lowest_row = slice_frame.grid_size()
# Place Channel List
for slice_num, slice_index in enumerate(slice_list):
ttk.Label(slice_frame, text='Channels').grid(row=lowest_row, column=slice_num)
for channel_num, channel in enumerate(channel_list[slice_num]):
cur_string = self.confocal_model.slice_names[slice_index] + ' ' + channel
self.channel_selections[cur_string] = tk.IntVar(value=1)
ttk.Checkbutton(slice_frame, text=channel, variable=self.channel_selections[cur_string]).grid(row=lowest_row+channel_num+1, column=slice_num)
farthest_column, lowest_row = slice_frame.grid_size()
ttk.Button(slice_frame, text='Generate Stitched Image', command= lambda: self._stitchSlices(slice_list)).grid(row=lowest_row, column=math.ceil(farthest_column/2)-1, columnspan=2-(farthest_column % 2))
ttk.Button(slice_frame, text='Generate Stitch Files', command= lambda: self._generateStitchFiles(slice_list)).grid(row=lowest_row+1, column=math.ceil(farthest_column/2)-1, columnspan=2-(farthest_column % 2))
# Update and resize the canvas to match the frame
slice_frame.update()
slice_canvas.update()
frame_height = np.min([int(screen_height * 3 / 4), slice_frame.winfo_height()])
slice_canvas.config(width = slice_frame.winfo_width(), height=frame_height)
def _stitchSlices(self, slice_list):
"""Actually iterate through and run the stitching process for each item selected by the user.
"""
subslice_list, channel_list = self.__getSubsChannels(slice_list)
self.confocal_model.generateStitchedImages(slice_list, subslice_list, compress_ratio=1)
def _generateStitchFiles(self, slice_list):
self.confocal_model.generateImageGridFiles(slice_list)
def __getSubsChannels(self, slice_list):
"""Get the subslices and channels based on the selections made in the slice selection window.
"""
sub_slices = [None]*len(slice_list)
slice_chans = [None]*len(slice_list)
for slice_num, slice_index in enumerate(slice_list):
subslice_list = self.confocal_model.getSubsliceList(slice_index)
channel_list = self.confocal_model.getChannelList(slice_index)
subslice_selected = [False]*len(subslice_list)
channel_selected = [False]*len(channel_list)
for sub_num, sub_slice in enumerate(subslice_list):
subslice_string = self.confocal_model.slice_names[slice_index] + " Frame " + str(sub_slice)
subslice_selected[sub_num] = self.subslice_selections[subslice_string].get()
for channel_num, channel in enumerate(channel_list):
channel_string = self.confocal_model.slice_names[slice_index] + ' ' + channel
channel_selected[channel_num] = self.channel_selections[channel_string].get()
sub_slices[slice_num] = list(np.where(subslice_selected)[0])
slice_chans[slice_num] = list(np.where(channel_selected)[0])
return([sub_slices, slice_chans])
root = tk.Tk()
gui = modelGUI(master=root)
gui.master.title('Cardiac Modeling Toolbox')
gui.mainloop()
|
{"hexsha": "36cb04b0feed3f75ae6f5645af200b7c8a891a3c", "size": 32470, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/modelGUI.py", "max_stars_repo_name": "cardiacbiomechanicsgroup/lvdatamap", "max_stars_repo_head_hexsha": "d9020f9baaf9a77f4c9b9138758663361369d48e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-18T19:25:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T07:44:16.000Z", "max_issues_repo_path": "Python/modelGUI.py", "max_issues_repo_name": "cardiacbiomechanicsgroup/lvdatamap", "max_issues_repo_head_hexsha": "d9020f9baaf9a77f4c9b9138758663361369d48e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/modelGUI.py", "max_forks_repo_name": "cardiacbiomechanicsgroup/lvdatamap", "max_forks_repo_head_hexsha": "d9020f9baaf9a77f4c9b9138758663361369d48e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-12-21T15:58:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-02T22:21:44.000Z", "avg_line_length": 49.1225416036, "max_line_length": 285, "alphanum_fraction": 0.756513705, "include": true, "reason": "import numpy", "num_tokens": 8699}
|
subroutine dlocate(xx,n,is,ie,x,j)
C%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
C %
C Copyright (C) 1996, The Board of Trustees of the Leland Stanford %
C Junior University. All rights reserved. %
C %
C The programs in GSLIB are distributed in the hope that they will be %
C useful, but WITHOUT ANY WARRANTY. No author or distributor accepts %
C responsibility to anyone for the consequences of using them or for %
C whether they serve any particular purpose or work at all, unless he %
C says so in writing. Everyone is granted permission to copy, modify %
C and redistribute the programs in GSLIB, but only under the condition %
C that this notice and the above copyright notice remain intact. %
C %
C%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
c-----------------------------------------------------------------------
c
c Given an array "xx" of length "n", and given a value "x", this routine
c returns a value "j" such that "x" is between xx(j) and xx(j+1). xx
c must be monotonic, either increasing or decreasing. j=0 or j=n is
c returned to indicate that x is out of range.
c
c Modified to set the start and end points by "is" and "ie"
c
c Bisection Concept From "Numerical Recipes", Press et. al. 1986 pp 90.
c-----------------------------------------------------------------------
implicit real*8 (a-h,o-z)
dimension xx(n)
c
c Initialize lower and upper methods:
c
jl = is-1
ju = ie
c
c If we are not done then compute a midpoint:
c
10 if(ju-jl.gt.1) then
jm = (ju+jl)/2
c
c Replace the lower or upper limit with the midpoint:
c
if((xx(ie).gt.xx(is)).eqv.(x.gt.xx(jm))) then
jl = jm
else
ju = jm
endif
go to 10
endif
c
c Return with the array index:
c
j = jl
return
end
|
{"hexsha": "e9f87db5d5735e3616015a23f697797c12e4a15f", "size": 2144, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "visim/visim_src/gslib/dlocate.f", "max_stars_repo_name": "fcecinati/QUICS_UOB", "max_stars_repo_head_hexsha": "88cc8534a304520f5b25f84f4516712befaf13b3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-06T01:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-06T01:43:12.000Z", "max_issues_repo_path": "visim/visim_src/gslib/dlocate.f", "max_issues_repo_name": "fcecinati/QUICS_UOB", "max_issues_repo_head_hexsha": "88cc8534a304520f5b25f84f4516712befaf13b3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visim/visim_src/gslib/dlocate.f", "max_forks_repo_name": "fcecinati/QUICS_UOB", "max_forks_repo_head_hexsha": "88cc8534a304520f5b25f84f4516712befaf13b3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-07T17:56:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-19T23:43:04.000Z", "avg_line_length": 38.9818181818, "max_line_length": 72, "alphanum_fraction": 0.4944029851, "num_tokens": 466}
|
import numpy as np
from .._model import Generalmodel
# import spartan2.ioutil as ioutil
from spartan.util.ioutil import saveDictListData, loadDictListData
class IAT(Generalmodel):
aggiat = {} # key:user; value:iat list
user_iatpair = {} # key:user; value: (iat1, iat2) list
iatpair_user = {} # key:(iat1, iat2) list; value: user
iatpaircount = {} # key:(iat1, iat2); value:count
iatcount = {} # key:iat; value:count
iatprob = {} # key:iat; value:probability
usrdict = {} # key:usr, value:frequency
def __init__(self, aggiat={}, user_iatpair={}, iatpair_user={}, iatpaircount={}, iatcount={}):
self.aggiat = aggiat
self.user_iatpair = user_iatpair
self.iatpair_user = iatpair_user
self.iatpaircount = iatpaircount
self.iatcount = iatcount
def calaggiat(self, aggts):
'aggts: key->user; value->timestamp list'
for k, lst in aggts.items():
if len(lst) < 2:
continue
lst.sort()
iat = np.diff(lst)
self.aggiat[k] = iat
def save_aggiat(self, outfile):
saveDictListData(self.aggiat, outfile)
def load_aggiat(self, infile):
self.aggiat = loadDictListData(infile, ktype=int, vtype=int)
def get_iatpair_user_dict(self):
'construct dict for iat pair to keys'
for k, lst in self.aggiat.items():
for i in range(len(lst) - 1):
pair = (lst[i], lst[i + 1])
if pair not in self.iatpair_user:
self.iatpair_user[pair] = []
self.iatpair_user[pair].append(k)
def get_user_iatpair_dict(self):
for k, lst in self.aggiat.items():
pairs = []
for i in range(len(lst) - 1):
pair = (lst[i], lst[i + 1])
pairs.append(pair)
self.user_iatpair[k] = pairs
def getiatpairs(self):
xs, ys = [], []
for k, lst in self.aggiat.items():
for i in range(len(lst) - 1):
xs.append(lst[i])
ys.append(lst[i + 1])
return xs, ys
def caliatcount(self):
for k, lst in self.aggiat.items():
for iat in lst:
if iat not in self.iatcount:
self.iatcount[iat] = 0
self.iatcount[iat] += 1
allcount = sum(self.iatcount.values()) # total sum of iat
self.iatprob = {iat: self.iatcount[iat]/allcount for iat in self.iatcount.keys()} #cal probability of iat
def caliatpaircount(self):
for k, lst in self.aggiat.items():
for i in range(len(lst) - 1):
pair = (lst[i], lst[i+1])
if pair not in self.iatcount:
self.iatpaircount[pair] = 0
self.iatpaircount[pair] += 1
def find_iatpair_user(self, iatpairs):
'find users that have pairs in iatpairs'
usrset = set()
for pair in iatpairs:
if pair in self.iatpair_user:
usrlist = self.iatpair_user[pair]
usrset.update(usrlist)
return list(usrset)
def get_user_dict(self, iatpairs):
'''get users dict that have pairs in iatpairs ordered by decreasing frequency
Parameters:
--------
:param iatpairs: dict
iat pair returned by find_peak_rect function in RectHistogram class
'''
for pair in iatpairs:
if pair in self.iatpair_user:
usrlist = self.iatpair_user[pair]
for usr in usrlist:
if usr in self.usrdict:
self.usrdict[usr] += 1
else:
self.usrdict[usr] = 1
self.usrdict = sorted(self.usrdict.items(), key=lambda item:item[1], reverse=True)
def find_topk_user(self, k=-1):
'''find Top-K users that have pairs in iatpairs ordered by decreasing frequency
Parameters:
--------
:param k: int
default: -1 , means return all user
else return Top-k user
'''
usrlist = [usrcountpair[0] for usrcountpair in self.usrdict[:k]]
return usrlist
def drawIatPdf(self, usrlist: list, outfig=None):
'''Plot Iat-Pdf line
Parameters:
--------
:param usrlist: list
Top-k user returned by find_iatpair_user_ordered function
:param outfig: str
fig save path
'''
iatset = set()
for usrid in usrlist:
iatset.update(self.aggiat[usrid])
iatlist = list(iatset)
iatlist.sort()
import matplotlib.pyplot as plt
fig = plt.figure()
xs = iatlist
ys = [self.iatprob[iat] for iat in iatlist]
plt.plot(xs, ys, 'b')
plt.xscale('log')
plt.xlabel('IAT(seconds)')
plt.ylabel('pdf')
if outfig is not None:
fig.savefig(outfig)
return fig
|
{"hexsha": "fe7549963d564245b76a68bb4b6c19a713cfc33a", "size": 5023, "ext": "py", "lang": "Python", "max_stars_repo_path": "spartan/model/iat/iat.py", "max_stars_repo_name": "sunxiaobing1999/spartan2", "max_stars_repo_head_hexsha": "95e80fce52c7c9274e7424fb4d9c6511b128b4c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2020-08-24T15:21:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T09:18:17.000Z", "max_issues_repo_path": "spartan/model/iat/iat.py", "max_issues_repo_name": "sunxiaobing1999/spartan2", "max_issues_repo_head_hexsha": "95e80fce52c7c9274e7424fb4d9c6511b128b4c4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-08-24T15:20:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-20T03:13:40.000Z", "max_forks_repo_path": "spartan/model/iat/iat.py", "max_forks_repo_name": "sunxiaobing1999/spartan2", "max_forks_repo_head_hexsha": "95e80fce52c7c9274e7424fb4d9c6511b128b4c4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2020-08-24T15:13:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T15:25:05.000Z", "avg_line_length": 33.4866666667, "max_line_length": 113, "alphanum_fraction": 0.5472825005, "include": true, "reason": "import numpy", "num_tokens": 1327}
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels import _generate_groundtruth
from test_generate_proposal_labels import _bbox_overlaps, _box_to_delta
def rpn_target_assign(gt_anchor_iou, rpn_batch_size_per_im,
rpn_positive_overlap, rpn_negative_overlap, fg_fraction):
iou = np.transpose(gt_anchor_iou)
anchor_to_gt_max = iou.max(axis=1)
anchor_to_gt_argmax = iou.argmax(axis=1)
gt_to_anchor_argmax = iou.argmax(axis=0)
gt_to_anchor_max = iou[gt_to_anchor_argmax, np.arange(iou.shape[1])]
anchors_with_max_overlap = np.where(iou == gt_to_anchor_max)[0]
tgt_lbl = np.ones((iou.shape[0], ), dtype=np.int32) * -1
tgt_lbl[anchors_with_max_overlap] = 1
tgt_lbl[anchor_to_gt_max >= rpn_positive_overlap] = 1
num_fg = int(fg_fraction * rpn_batch_size_per_im)
fg_inds = np.where(tgt_lbl == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
tgt_lbl[disable_inds] = -1
fg_inds = np.where(tgt_lbl == 1)[0]
num_bg = rpn_batch_size_per_im - np.sum(tgt_lbl == 1)
bg_inds = np.where(anchor_to_gt_max < rpn_negative_overlap)[0]
tgt_lbl[bg_inds] = 0
if len(bg_inds) > num_bg:
enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]
tgt_lbl[enable_inds] = 0
bg_inds = np.where(tgt_lbl == 0)[0]
tgt_lbl[bg_inds] = 0
loc_index = fg_inds
score_index = np.hstack((fg_inds, bg_inds))
tgt_lbl = np.expand_dims(tgt_lbl, axis=1)
gt_inds = anchor_to_gt_argmax[fg_inds]
return loc_index, score_index, tgt_lbl, gt_inds
def get_anchor(n, c, h, w):
input_feat = np.random.random((n, c, h, w)).astype('float32')
anchors, _ = anchor_generator_in_python(
input_feat=input_feat,
anchor_sizes=[32., 64.],
aspect_ratios=[0.5, 1.0],
variances=[1.0, 1.0, 1.0, 1.0],
stride=[16.0, 16.0],
offset=0.5)
return anchors
def rpn_blob(anchor, gt_boxes, iou, lod, rpn_batch_size_per_im,
rpn_positive_overlap, rpn_negative_overlap, fg_fraction):
loc_indexes = []
score_indexes = []
tmp_tgt_labels = []
tgt_bboxes = []
anchor_num = anchor.shape[0]
batch_size = len(lod) - 1
for i in range(batch_size):
b, e = lod[i], lod[i + 1]
iou_slice = iou[b:e, :]
bboxes_slice = gt_boxes[b:e, :]
loc_idx, score_idx, tgt_lbl, gt_inds = rpn_target_assign(
iou_slice, rpn_batch_size_per_im, rpn_positive_overlap,
rpn_negative_overlap, fg_fraction)
fg_bboxes = bboxes_slice[gt_inds]
fg_anchors = anchor[loc_idx]
box_deltas = _box_to_delta(fg_anchors, fg_bboxes, [1., 1., 1., 1.])
if i == 0:
loc_indexes = loc_idx
score_indexes = score_idx
tmp_tgt_labels = tgt_lbl
tgt_bboxes = box_deltas
else:
loc_indexes = np.concatenate(
[loc_indexes, loc_idx + i * anchor_num])
score_indexes = np.concatenate(
[score_indexes, score_idx + i * anchor_num])
tmp_tgt_labels = np.concatenate([tmp_tgt_labels, tgt_lbl])
tgt_bboxes = np.vstack([tgt_bboxes, box_deltas])
tgt_labels = tmp_tgt_labels[score_indexes]
return loc_indexes, score_indexes, tgt_bboxes, tgt_labels
class TestRpnTargetAssignOp(OpTest):
def setUp(self):
n, c, h, w = 2, 4, 14, 14
anchor = get_anchor(n, c, h, w)
gt_num = 10
anchor = anchor.reshape(-1, 4)
anchor_num = anchor.shape[0]
im_shapes = [[64, 64], [64, 64]]
gt_box, lod = _generate_groundtruth(im_shapes, 3, 4)
bbox = np.vstack([v['boxes'] for v in gt_box])
iou = _bbox_overlaps(bbox, anchor)
anchor = anchor.astype('float32')
bbox = bbox.astype('float32')
iou = iou.astype('float32')
loc_index, score_index, tgt_bbox, tgt_lbl = rpn_blob(
anchor, bbox, iou, [0, 4, 8], 25600, 0.95, 0.03, 0.25)
self.op_type = "rpn_target_assign"
self.inputs = {
'Anchor': anchor,
'GtBox': (bbox, [[4, 4]]),
'DistMat': (iou, [[4, 4]]),
}
self.attrs = {
'rpn_batch_size_per_im': 25600,
'rpn_positive_overlap': 0.95,
'rpn_negative_overlap': 0.03,
'fg_fraction': 0.25,
'fix_seed': True
}
self.outputs = {
'LocationIndex': loc_index.astype('int32'),
'ScoreIndex': score_index.astype('int32'),
'TargetBBox': tgt_bbox.astype('float32'),
'TargetLabel': tgt_lbl.astype('int64'),
}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "bd548009b3ada9512e4b5f7d7b61b67b0717a39b", "size": 5628, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py", "max_stars_repo_name": "lijiancheng0614/Paddle", "max_stars_repo_head_hexsha": "f980b29e6259b8e51f4ee04260e3a84233f337df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py", "max_issues_repo_name": "lijiancheng0614/Paddle", "max_issues_repo_head_hexsha": "f980b29e6259b8e51f4ee04260e3a84233f337df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py", "max_forks_repo_name": "lijiancheng0614/Paddle", "max_forks_repo_head_hexsha": "f980b29e6259b8e51f4ee04260e3a84233f337df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1090909091, "max_line_length": 79, "alphanum_fraction": 0.6398365316, "include": true, "reason": "import numpy", "num_tokens": 1548}
|
library(sf)
library(watershed)
library(raster)
library(data.table)
dem = raster("data/dem.tif")
stream = stack("output/neretva.grd")
corine = st_read("data/neretva_lc.gpkg")
geo = st_read("data/neretva_geology.gpkg")
geo = st_transform(geo, st_crs(corine))
Tp = pixel_topology(stream)
neretva_rn = vectorise_stream(stream, Tp)
neretva_rn$slope = river_slope(neretva_rn, dem)
neretva_lc = w_intersect(neretva_rn, areas = corine,
area_id = "code_18", drainage = stream$drainage)
neretva_geo = w_intersect(neretva_rn, areas = geo,
area_id = "xx", drainage = stream$drainage)
neretva_lc = neretva_lc[method == "catchment"]
neretva_geo = neretva_geo[method == "catchment"]
neretva_lc[, layer := "lc"]
neretva_geo[, layer := "geo"]
neretva_stats = rbind(neretva_lc, neretva_geo)
neretva_stats$method = NULL
neretva_stats$area = NULL
neretva_stats = neretva_stats[, .(reach_id = reachID, category = paste(layer, category, sep = "_"), proportion = proportion)]
neretva_stats_w = dcast(neretva_stats, reach_id ~ category, fill = 0, value.var = "proportion")
c_area_reach = catchment(stream, type = 'reach', Tp = Tp)
neretva_rn = merge(neretva_rn, neretva_stats_w, by = 'reach_id')
neretva_rn$catchment_area = c_area_reach
st_write(neretva_rn, "output/neretva.gpkg", append = FALSE)
|
{"hexsha": "52e1b8e6bb289403a4dd5823016a71975e39a5c0", "size": 1310, "ext": "r", "lang": "R", "max_stars_repo_path": "r/stream_vector.r", "max_stars_repo_name": "flee-group/neretva_rn", "max_stars_repo_head_hexsha": "b960fb157ced5533bc39bbfbe7b516cbbbfe50a6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "r/stream_vector.r", "max_issues_repo_name": "flee-group/neretva_rn", "max_issues_repo_head_hexsha": "b960fb157ced5533bc39bbfbe7b516cbbbfe50a6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "r/stream_vector.r", "max_forks_repo_name": "flee-group/neretva_rn", "max_forks_repo_head_hexsha": "b960fb157ced5533bc39bbfbe7b516cbbbfe50a6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9512195122, "max_line_length": 125, "alphanum_fraction": 0.7358778626, "num_tokens": 424}
|
import numpy as np
import scipy.interpolate as interp
import astropy.units as u
from . import detector
from . import binary
def Get_SNR_Matrix(
source, instrument, var_x, sample_rate_x, var_y, sample_rate_y, **kwargs
):
"""Calculates SNR Matrix
Parameters
----------
source: object
Instance of a gravitational wave source class
instrument: object
Instance of a gravitational wave detector class
var_x: str
x-axis variable
sample_rate_x: int
Number of samples at which ``SNRMatrix`` is calculated corresponding to the x-axis variable
var_y: str
y-axis variable
sample_rate_y: array
samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable
inc: int, float, Quantity, Optional
The inclination of the source in degrees
integral_consts: int, float, Optional
Used to adjust the SNR scaling in ``Calc_Chirp_SNR``
method: str, {'SPA','PN'}
Switches between methods of calculating the monochromatic strain based on the stationary phase approximation,
or a rescaling of the source waveform in the low frequency regime (Post-Newtonian approximation)
Returns
-------
sample_x: array
samples at which SNRMatrix was calculated corresponding to the x-axis variable
sample_y: array
samples at which SNRMatrix was calculated corresponding to the y-axis variable
SNRMatrix: array-like
the ``sample_rate_y`` X ``sample_rate_x`` matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices
Notes
-----
Uses the variable given and the data range to sample the space either logrithmically or linearly based on the
selection of variables. Then it computes the SNR for each value.
Returns the variable ranges used to calculate the SNR for each matrix, then returns the SNRs with size of the ``sample_y``X``sample_x``
"""
if "inc" in kwargs.keys():
inc = kwargs["inc"]
else:
inc = None
if "integral_consts" in kwargs.keys():
integral_consts = kwargs["integral_consts"]
else:
integral_consts = None
if "method" in kwargs.keys():
method = kwargs["method"]
else:
method = "SPA"
source.instrument = instrument
# Get Samples for variables
[sample_x, sample_y, recalculate_strain, recalculate_noise] = Get_Samples(
source, instrument, var_x, sample_rate_x, var_y, sample_rate_y
)
switch = False
if recalculate_noise == "y":
"""Calculation runs much faster when it doesn't recalculate
the noise every time."""
switch = True
recalculate_noise = "x"
original_sample_x = sample_x
original_sample_y = sample_y
original_var_x = var_x
original_var_y = var_y
var_x = original_var_y
var_y = original_var_x
sample_x = original_sample_y
sample_y = original_sample_x
sampleSize_x = len(sample_x)
sampleSize_y = len(sample_y)
SNRMatrix = np.zeros((sampleSize_y, sampleSize_x))
for i in range(sampleSize_x):
if recalculate_noise in ["x", "both"]:
# Update Attribute (also updates dictionary)
if isinstance(instrument, detector.GroundBased):
var_x_names = var_x.split()
if len(var_x_names) == 2:
updated_dict_x = {var_x_names[0]: {var_x_names[1]: sample_x[i]}}
elif len(var_x_names) == 3:
updated_dict_x = {
var_x_names[0]: {var_x_names[1]: {var_x_names[2]: sample_x[i]}}
}
instrument.Set_Noise_Dict(updated_dict_x)
else:
setattr(instrument, var_x, sample_x[i])
Recalculate_Noise(source, instrument)
elif recalculate_noise in ["neither"]:
if var_x == "chii":
# Used to change both spins simultaneously
# Update Attribute (also updates dictionary)
setattr(source, "chi1", sample_x[i])
setattr(source, "chi2", sample_x[i])
else:
# Update Attribute (also updates dictionary)
setattr(source, var_x, sample_x[i])
for j in range(sampleSize_y):
if recalculate_noise in ["x", "neither"]:
if var_y == "chii":
# Used to change both spins simultaneously
# Update Attribute (also updates dictionary)
setattr(source, "chi1", sample_y[j])
setattr(source, "chi2", sample_y[j])
else:
# Update Attribute (also updates dictionary)
setattr(source, var_y, sample_y[j])
elif recalculate_noise in ["both"]:
# Update Attribute (also updates dictionary)
if isinstance(instrument, detector.GroundBased):
var_y_names = var_y.split()
if len(var_y_names) == 2:
updated_dict_y = {var_y_names[0]: {var_y_names[1]: sample_y[i]}}
elif len(var_y_names) == 3:
updated_dict_y = {
var_y_names[0]: {
var_y_names[1]: {var_y_names[2]: sample_y[i]}
}
}
instrument.Set_Noise_Dict(updated_dict_y)
else:
setattr(instrument, var_y, sample_y[j])
Recalculate_Noise(source, instrument)
binary.Check_Freq_Evol(
source, T_evol=None, T_evol_frame="observer", f_gw_frame="observer"
)
if source.ismono:
# Monochromatic Source and not diff EOB SNR
if hasattr(source, "h_gw"):
del source.h_gw
if method == "PN":
if recalculate_strain:
# If we need to calculate the waveform everytime
# Delete old PhenomD waveform
if hasattr(source, "_phenomD_f"):
del source._phenomD_f
if hasattr(source, "_phenomD_h"):
del source._phenomD_h
if hasattr(source, "f"):
del source.f
if hasattr(source, "h_f"):
del source.h_f
SNRMatrix[j, i] = Calc_Mono_SNR(
source, instrument, inc=inc, method=method
)
else: # Chirping Source
if recalculate_strain:
# If we need to calculate the waveform everytime
# Delete old PhenomD waveform
if hasattr(source, "_phenomD_f"):
del source._phenomD_f
if hasattr(source, "_phenomD_h"):
del source._phenomD_h
if hasattr(source, "f"):
del source.f
if hasattr(source, "h_f"):
del source.h_f
SNRMatrix[j, i] = Calc_Chirp_SNR(
source, instrument, integral_consts=integral_consts
)
if switch:
return [original_sample_x, original_sample_y, SNRMatrix.T]
else:
return [sample_x, sample_y, SNRMatrix]
def Get_Samples(source, instrument, var_x, sample_rate_x, var_y, sample_rate_y):
"""Gets the x and y-axis samples
Parameters
----------
source: object
Instance of a gravitational wave source class
instrument: object
Instance of a gravitational wave detector class
var_x: str
x-axis variable
sample_rate_x: int
Number of samples at which ``SNRMatrix`` is calculated corresponding to the x-axis variable
var_y: str
y-axis variable
sample_rate_y: array
samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable
Returns
-------
sample_x: array
samples at which ``SNRMatrix`` was calculated corresponding to the x-axis variable
sample_y: array
samples at which ``SNRMatrix`` was calculated corresponding to the y-axis variable
Notes
-----
The function uses that to create a
sample space for the variable either in linear space or logspace for ``M, z, L, A_acc``
for everything else.
"""
sample_x = []
sample_y = []
recalculate_strain = False
recalculate_noise = "neither"
# Used to change both spins simultaneously, should be arbitary if one uses chi1 or chi2 since they are set to the same value in `Get_SNR_Matrix`
if var_x == "chii":
var_x = "chi1"
elif var_y == "chii":
var_y = "chi1"
if var_x in source.var_dict.keys():
if isinstance(source.var_dict[var_x]["min"], u.Quantity):
var_x_min = source.var_dict[var_x]["min"].value
var_x_max = source.var_dict[var_x]["max"].value
else:
var_x_min = source.var_dict[var_x]["min"]
var_x_max = source.var_dict[var_x]["max"]
elif var_x in instrument.var_dict.keys():
recalculate_noise = "x"
if isinstance(instrument.var_dict[var_x]["min"], u.Quantity):
var_x_min = instrument.var_dict[var_x]["min"].value
var_x_max = instrument.var_dict[var_x]["max"].value
else:
var_x_min = instrument.var_dict[var_x]["min"]
var_x_max = instrument.var_dict[var_x]["max"]
else:
raise ValueError(var_x + " is not a variable in the source or the instrument.")
if var_y in source.var_dict.keys():
if isinstance(source.var_dict[var_y]["min"], u.Quantity):
var_y_min = source.var_dict[var_y]["min"].value
var_y_max = source.var_dict[var_y]["max"].value
else:
var_y_min = source.var_dict[var_y]["min"]
var_y_max = source.var_dict[var_y]["max"]
elif var_y in instrument.var_dict.keys():
if recalculate_noise == "x":
recalculate_noise = "both"
else:
recalculate_noise = "y"
if isinstance(instrument.var_dict[var_y]["min"], u.Quantity):
var_y_min = instrument.var_dict[var_y]["min"].value
var_y_max = instrument.var_dict[var_y]["max"].value
else:
var_y_min = instrument.var_dict[var_y]["min"]
var_y_max = instrument.var_dict[var_y]["max"]
else:
raise ValueError(var_y + " is not a variable in the source or the instrument.")
if var_x in ["q", "chi1", "chi2"] or var_y in ["q", "chi1", "chi2"]:
recalculate_strain = True # Must recalculate the waveform at each point
# order of magnitude cut
oom_cut = 2.0
if (
var_x_min is not None and var_x_max is not None
): # If the variable has non-None 'min',and 'max' dictionary attributes
if var_x == "n_p":
instrument.var_dict[var_x]["sampled"] = True
# sample in integer steps
sample_range = var_x_max - var_x_min
if sample_range > 10:
sample_rate = max(2, int(sample_range / 10))
sample_x = np.arange(var_x_min, var_x_max, sample_rate)
if var_x_max not in sample_x:
sample_x = np.append(sample_x, var_x_max)
else:
sample_x = np.arange(var_x_min, var_x_max + 1)
else:
# Any other variables get sorted to linear if max-min < order of magnitude cut (oom_cut)
# Otherwse the samples are in logspace
if var_x_max <= 0.0 or var_x_min <= 0.0:
sample_x = np.linspace(var_x_min, var_x_max, sample_rate_x)
else:
scale = np.log10(var_x_max) - np.log10(var_x_min)
if scale >= oom_cut:
sample_x = np.logspace(
np.log10(var_x_min), np.log10(var_x_max), sample_rate_x
)
else:
sample_x = np.linspace(var_x_min, var_x_max, sample_rate_x)
else:
raise ValueError(var_x + " does not have an assigned min and/or max.")
if (
var_y_min is not None and var_y_max is not None
): # If the variable has non-None 'min',and 'max' dictionary attributes
if var_y == "n_p":
instrument.var_dict[var_y]["sampled"] = True
# sample in integer steps
sample_range = var_y_max - var_y_min
if sample_range > 10:
sample_rate = max(2, int(sample_range / 10))
sample_y = np.arange(var_y_min, var_y_max, sample_rate)
if var_y_max not in sample_y:
sample_y = np.append(sample_y, var_y_max)
else:
sample_y = np.arange(var_y_min, var_y_max + 1)
else:
# Any other variables get sorted to linear if max-min < order of magnitude cut (oom_cut)
# Otherwse the samples are in logspace
if var_y_max <= 0.0 or var_y_min <= 0.0:
sample_y = np.linspace(var_y_min, var_y_max, sample_rate_y)
else:
scale = np.log10(var_y_max) - np.log10(var_y_min)
if scale >= oom_cut:
sample_y = np.logspace(
np.log10(var_y_min), np.log10(var_y_max), sample_rate_y
)
else:
sample_y = np.linspace(var_y_min, var_y_max, sample_rate_y)
else:
raise ValueError(var_y + " does not have an assigned min and/or max value.")
return sample_x, sample_y, recalculate_strain, recalculate_noise
def Recalculate_Noise(source, instrument):
"""Recalculate noise curves if something is varied
Parameters
----------
source: object
Instance of a gravitational wave source class
instrument: object
Instance of a gravitational wave detector class
"""
if hasattr(instrument, "I_type") or hasattr(instrument, "load_location"):
raise ValueError("Cannot vary a loaded instrument's parameters")
if not isinstance(instrument, detector.GroundBased):
if hasattr(instrument, "P_n_f"):
del instrument.P_n_f
if hasattr(instrument, "fT"):
del instrument.fT
if hasattr(instrument, "S_n_f"):
del instrument.S_n_f
if hasattr(instrument, "h_n_f"):
del instrument.h_n_f
if isinstance(instrument, detector.PTA) and hasattr(
instrument, "_sensitivitycurve"
):
del instrument._sensitivitycurve
if hasattr(source, "instrument"):
source.instrument = instrument
def Calc_Mono_SNR(source, instrument, inc=None, method="SPA"):
r"""Calculates the SNR for a monochromatic source
Parameters
----------
source: object
Instance of a gravitational wave source class
instrument: object
Instance of a gravitational wave detector class
inc: None,float,int, optional
The inclination of the monochromatic source in radians.
method: str, {'SPA','PN'}
Switches between methods of calculating the monochromatic strain based on the stationary phase approximation,
or a rescaling of the source waveform in the low frequency regime (Post-Newtonian approximation)
Notes
-----
When comparing :math:`h_{0}` from ``Get_Mono_Strain`` (i.e., the typical monochromatic stationary phase approximation with the IMRPhenomD :math:`h_{0}` from :math:`|\tilde{h}(f)|\sqrt{2\dot{f}}`,
the former is a factor of :math:`\frac{\pi}{2}` larger. We scale this out to make the transition between monochromatic and chirping if ``'SPA'`` is used.
"""
if not hasattr(source, "instrument"):
source.instrument = instrument
# Assumes mass and frequency in source class are in the source frame and observer frame, respectively
source.h_gw = binary.Get_Mono_Strain(
source,
inc=inc,
freq=source.f_gw,
f_gw_frame="observer",
pn_frame="observer",
out_frame="observer",
method=method,
)
if method == "SPA":
scale = 2 / np.pi
else:
scale = 1.0
indxfgw = np.abs(instrument.fT - source.f_gw).argmin()
if indxfgw == 0 or indxfgw >= len(instrument.fT) - 1:
# The source frequency is assumed to be outside the instrument's frequency, thus the SNR is ~0.
# print(f"Your assigned source GW frequency is {source.f_gw} and the instrument frequency range is [{np.unique(np.min(instrument.fT))[0]:.1e},{np.unique(np.max(instrument.fT))[0]:.1e}]")
return 1e-30
else:
return (
scale
* source.h_gw
* np.sqrt(
np.max(np.unique(instrument.T_obs.to("s"))) / instrument.S_n_f[indxfgw]
)
)
def Calc_Chirp_SNR(source, instrument, integral_consts=None):
"""Calculates the SNR for an evolving source
Parameters
----------
source: object
Instance of a gravitational wave source class
instrument: object
Instance of a gravitational wave detector class
integral_consts: int, float, Optional
Used to adjust the SNR scaling
Notes
-----
Uses an interpolated method to align waveform and instrument noise, then integrates
over the overlapping region. See eqn 18 from Robson,Cornish,and Liu 2018 <https://arxiv.org/abs/1803.01944>
Values outside of the sensitivity curve are arbitrarily set to 1e30 so the SNR is effectively 0
"""
# Previously, it was designed to integrate from initial observed frequency f(t_init) to f(t_init-T_obs)
# Does not work unless t_init is randomly sampled, which we don't do
# indxfgw_start = np.abs(source.f-source.f_init).argmin()
# indxfgw_end = np.abs(source.f-source.f_T_obs).argmin()
if not hasattr(source, "instrument"):
source.instrument = instrument
if not hasattr(source, "f_T_obs"):
binary.Check_Freq_Evol(source)
# Only want to integrate from observed frequency (f(T_obs_before_merger)) till merger
indxfgw_start = np.abs(source.f - source.f_T_obs).argmin()
if indxfgw_start == 0:
statement_1 = "Uh, you probably should set your source f_min to lower. "
statement_1 += f"Your minimum calculated frequency is {source.f[0]} and f(T_obs) is {source.f_T_obs}"
print(statement_1)
indxfgw_end = len(source.f)
if indxfgw_start >= indxfgw_end - 1:
# If the SMBH has already merged set the SNR to ~0
return 1e-30
else:
f_cut = source.f[indxfgw_start:indxfgw_end]
h_cut = source.h_f[indxfgw_start:indxfgw_end]
#################################
# Interpolate the Strain Noise Spectral Density to only the frequencies the
# strain runs over
# Set Noise to 1e30 outside of signal frequencies
S_n_f_interp_old = interp.interp1d(
np.log10(instrument.fT.value),
np.log10(instrument.S_n_f.value),
kind="cubic",
fill_value=30.0,
bounds_error=False,
)
S_n_f_interp = 10 ** S_n_f_interp_old(np.log10(f_cut.value))
if not isinstance(integral_consts, (int, float)):
integral_consts = 16.0 / 5.0
# CALCULATE SNR FOR BOTH NOISE CURVES
denom = S_n_f_interp # Sky Averaged Noise Spectral Density
numer = h_cut ** 2
integrand = numer / denom
if isinstance(integrand, u.Quantity) and isinstance(f_cut, u.Quantity):
SNRsqrd = integral_consts * np.trapz(
integrand.value, f_cut.value, axis=0
) # SNR**2
else:
SNRsqrd = integral_consts * np.trapz(integrand, f_cut, axis=0) # SNR**2
return np.sqrt(SNRsqrd)
|
{"hexsha": "065616b19651efd757b1637dffa5afe796668b4b", "size": 19859, "ext": "py", "lang": "Python", "max_stars_repo_path": "gwent/snr.py", "max_stars_repo_name": "ark0015/GWDetectorDesignToolkit", "max_stars_repo_head_hexsha": "6ee2f7a633c973ea10b450257b1ad4dbd0323738", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-10-16T13:27:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T02:14:49.000Z", "max_issues_repo_path": "gwent/snr.py", "max_issues_repo_name": "ark0015/GWDetectorDesignToolkit", "max_issues_repo_head_hexsha": "6ee2f7a633c973ea10b450257b1ad4dbd0323738", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-29T21:21:40.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-29T21:21:40.000Z", "max_forks_repo_path": "gwent/snr.py", "max_forks_repo_name": "ark0015/gwent", "max_forks_repo_head_hexsha": "6ee2f7a633c973ea10b450257b1ad4dbd0323738", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-11-27T09:45:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T02:14:31.000Z", "avg_line_length": 40.2004048583, "max_line_length": 199, "alphanum_fraction": 0.6033032882, "include": true, "reason": "import numpy,import scipy,import astropy", "num_tokens": 4609}
|
import random
import numpy as np
import pytest
from pandas import DataFrame
from tests.utils import assert_dataframes_equals
from weaverbird.backends.pandas_executor.steps.statistics import execute_statistics
from weaverbird.pipeline.steps import StatisticsStep
@pytest.fixture
def sample_df():
return DataFrame(
{
'Label': ['Label 1', 'Label 2', 'Label 3', 'Label 4', 'Label 5', 'Label 6'],
'Group': ['Group 1'] * 3 + ['Group 2'] * 3,
'Value': [13, 7, 20, 1, 10, 5],
}
)
def test_statistics(sample_df: DataFrame):
step = StatisticsStep(
name='statistics',
column='Value',
groupbyColumns=[],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
df_result = execute_statistics(step, sample_df)
expected_result = DataFrame({'average': [9.33333], 'count': [6], 'median': [8.5]})
assert_dataframes_equals(df_result, expected_result)
def test_statistics_with_groups(sample_df: DataFrame):
step = StatisticsStep(
name='statistics',
column='Value',
groupbyColumns=['Group'],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
df_result = execute_statistics(step, sample_df)
expected_result = DataFrame(
{
'Group': ['Group 1', 'Group 2'],
'average': [13.33333, 5.33333],
'count': [3, 3],
'median': [13, 5],
}
)
assert_dataframes_equals(df_result, expected_result)
def test_benchmark_statistics(benchmark):
groups = ['group_1', 'group_2']
df = DataFrame(
{
'value': np.random.random(1000),
'id': list(range(1000)),
'group': [random.choice(groups) for _ in range(1000)],
}
)
step = StatisticsStep(
name='statistics',
column='value',
groupbyColumns=['group'],
statistics=['average', 'count'],
quantiles=[{'label': 'median', 'nth': 1, 'order': 2}],
)
benchmark(execute_statistics, step, df)
|
{"hexsha": "2620532ac8e6c5ea45a98778f3ae3ccd5ad7b26c", "size": 2132, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/tests/steps/test_statistics.py", "max_stars_repo_name": "JeremyJacquemont/weaverbird", "max_stars_repo_head_hexsha": "e04ab6f9c8381986ab71078e5199ece7a875e743", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2019-11-20T15:07:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T22:13:51.000Z", "max_issues_repo_path": "server/tests/steps/test_statistics.py", "max_issues_repo_name": "JeremyJacquemont/weaverbird", "max_issues_repo_head_hexsha": "e04ab6f9c8381986ab71078e5199ece7a875e743", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 786, "max_issues_repo_issues_event_min_datetime": "2019-10-20T11:48:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T08:58:18.000Z", "max_forks_repo_path": "server/tests/steps/test_statistics.py", "max_forks_repo_name": "JeremyJacquemont/weaverbird", "max_forks_repo_head_hexsha": "e04ab6f9c8381986ab71078e5199ece7a875e743", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-11-21T10:16:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T10:34:06.000Z", "avg_line_length": 28.0526315789, "max_line_length": 88, "alphanum_fraction": 0.5848968105, "include": true, "reason": "import numpy", "num_tokens": 540}
|
%
% IEEE Transactions on Microwave Theory and Techniques example
% Tibault Reveyrand - http://www.microwave.fr
%
% http://www.microwave.fr/LaTeX.html
% ---------------------------------------
% ================================================
% Please HIGHLIGHT the new inputs such like this :
% Text :
% \hl{comment}
% Aligned Eq.
% \begin{shaded}
% \end{shaded}
% ================================================
\documentclass[journal]{IEEEtran}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{tabularx}
\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
%\usepackage{hyperref} % hyperlinks
\usepackage{url} % simple URL typesetting
\usepackage{booktabs} % professional-quality tables
\usepackage{amsfonts} % blackboard math symbols
\usepackage{nicefrac} % compact symbols for 1/2, etc.
\usepackage{microtype} % microtypography
\usepackage{graphicx}
\usepackage{float}
\restylefloat{table}
%\usepackage[toc,page]{appendix}
\usepackage{multicol}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{amsmath}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{tikz}
\usetikzlibrary{trees}
\usepackage{listings}
\DeclareMathOperator*{\argmax}{arg\,max} % in your preamble
\DeclareMathOperator*{\argmin}{arg\,min} % in your preamble
\usepackage{textcomp}
%\usepackage[retainorgcmds]{IEEEtrantools}
%\usepackage{bibentry}
\usepackage{xcolor,soul,framed} %,caption
\usepackage[noadjust]{cite}
%\usepackage{biblatex}
%\bibliographystyle{plain}
%=== TITLE & AUTHORS ====================================================================
\begin{document}
\bstctlcite{IEEEexample:BSTcontrol}
\title{test}
\title{Multiagent Reinforcement Learning based Energy Beamforming Control}
\author{Zhongqiang Pang, Liping Bai \thanks{Nanjing Unversity of Posts and Telecommunications, College of Automation and College of Artificial Intelligence, Nanjing, Jiangsu,210000 China email:zqpang@njupt.edu.cn}}
% ====================================================================
\maketitle
% === ABSTRACT ====================================================================
% =================================================================================
\begin{abstract}
%\boldmath
Ultra low power devices make far-field wireless power transfer a viable option for energy delivery despite the exponential attenuation. Electromagnetic beams are constructed from the stations such
that wireless energy is directionally concentrated around the ultra low power devices. Energy beamforming faces different challenges compare to information beamforming due to the lack of feedback on channel state. Various methods have been proposed such as one-bit channel feedback to enhance energy beamforming capacity, yet it still has considerable computation overhead and need to be computed centrally. Valuable resources and time is wasted on transfering control information back and forth. In this paper, we propose a novel multiagent reinforcement learning(MARL) formulation for codebook based beamforming control. It takes advantage of the inherienntly distributed structure in a wirelessly powered network and lay the ground work for fully locally computed beam control algorithms. Source code can be found at \url{https://github.com/BaiLiping/WirelessPowerTransfer}.
\end{abstract}
% === KEYWORDS ====================================================================
% =================================================================================
\begin{IEEEkeywords}
Multiagent Reinforcement Learning,MARL, Wireless Power Transfer, Beamforming
\end{IEEEkeywords}
% For peer review papers, you can put extra information on the cover
% page as needed:
% \ifCLASSOPTIONpeerreview
% \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
% \fi
%
% For peerreview papers, this IEEEtran command inserts a page break and
% creates the second title. It will be ignored for other modes.
\IEEEpeerreviewmaketitle
% ====================================================================
% ====================================================================
% ====================================================================
% === I. INTRODUCTION =============================================================
% =================================================================================
\section{Introduction}
\IEEEPARstart{W}{ireless} power transfer(WPT) can be divided into near-field WPT with inductive coupling, magnetic resonant coupling or capacitive coupling {8357386} and far-field WPT with electromagnetic power beams. \cite{8246215} Compare to near-field WPT, the far-field option has considerable attenuation, yet the increased application of ultra low power devices such as RFID, low power sensor networks \cite{7578025}, together with various forms of joint wireless information and power transfer technology such as Simultaneous Wireless Information and Power Transfer (SWIPT) \cite{8214104}, Wirelessly Powered Communication Networks (WPCNs) \cite{7462480}, Wirelessly Powered Backscatter Communication (WPBC) \cite{7842391} has made far-field WPT an important tool for powering those devices. Current far-field WPT technology can effectively transfer tens of microwatts of RF power to wireless devices from a distance of more than 10 meters. \cite{7462480}
For far-field WPT to be as effective as it can be, directional RF phased array, a group of radiating elements whose phase and magnitude can be controlled to generate a directional beam pattern, \cite{665} is utilized to increase the directional gain of power transfer. Digital phased control has high fidelity and is mostly used for communication systems such as 5G Antenna. However, its energy and thermal cost make it prohibitively expensive for other applications. Analog phased control utilizes RF chain to systematically shift the phase discretely. In this paper, we focus on analog phased array.
There are two kinds of control algorithms for analog beamformer, one is adaptive beamforming, which can adjust according to various channel conditions, but it is expensive in terms of data collection and computational time. A less versatile control algorithm is switch-based control. There are set of predetermined codes for beamforming control. An exhaustive search is performed to find the "optimal code" for the given circumstances\cite{5262295} \cite{1237152}. The codebook exhaustive search algorithm or codebook based beam training process still has a large overhead, particularly for a multi-station scenario. In previous works reinforcement learning based solutions have been proposed where the multi-armed bandit framework \cite{8662770} or Q-learning framework \cite{Cui2019SecureWC} was used to render the process more effective.
In this paper, beamforming control is formulated as a multi-agent reinforcement learning problem. Rollout algorithm proposed by Dimitri P. Bertsekas \cite{Bertsekas2019MultiagentRA} is utilized to properly trade-off action space complexity and state-space complexity, hence reducing the learning time. This paper is arranged as the following. In section II, the system model described. In section III, the setup of multiagent reinforcement learning is introduced. In section IV, the problem of WPT is seen through the lense of multiagent reinforcement learning and the simulation result is presented.
% === II. Harmonically-Terminated Power Rectifier Analysis ========================
% =================================================================================
\section{Enegy Beamforming}
\subsection{Uniform Linear Array}
The theories of phased array were fully formulated during the WWII era where an array of radars was deployed to detect an accurate angle of arrival \cite{5237174}. Today, phased array hardware is widely available as a commercial product as shown in Figure \ref{fig:pivotal}. Together with various forms of Space Time Signal Processing(STSP), phased array and the beamforming technology has become the enabling components for future communication networks. There are different configurations of arrays, in this paper, we only consider one dimension uniform linear array.
\begin{figure}
\centering
\includegraphics[width=0.3\textwidth]{beamformer.png}
\caption{Pivotal 39GHz Beamformer}
\label{fig:pivotal}
\end{figure}
\subsection{Channel Model}
Suppose there are p propogation path from transmitter to receiver. the gain for each path is denoted by $\alpha_i$. The channel is modeled as a sum of each path. When Line of Sight(LoS) not available and the number of path is large, Rayleigh fading is used to model Non Line of Sight channel gain. In this paper, we only consider the environment with direct Line of Sight transmission and no reflection path.
\begin{equation}
h=\sum_{i=1}^p \alpha_i e^{-j2\pi \frac{d_i}{\lambda}}
\end{equation}
\begin{figure}
\centering
\includegraphics[width=0.3\textwidth]{channel.png}
\caption{Multipath Channel Model}
\label{fig:channel}
\end{figure}
\subsection{beamforming codebook $\mathcal{F}$}
Let Angle of Departure(AoD) be denoted as $\varphi$. Suppose the the range of adjustment for the beamformer is $\zeta$ degrees and is deicretized into N portions, each with the angle adjustment of $\frac{\zeta}{N}$ degree. For the $i^{th}$ code in the codebook with AoD of $\varphi_i$, the beamforming vector is computed as the following:
\begin{equation}
\textbf{f} := \textbf{a}(\varphi_i)= [1,e^{jdcos(\varphi_i)},...,e^{jd(M-1)cos(\varphi_i)}]^T
\end{equation}
\subsection{System Model}
The schematics of wirelessly powered communication network is shown in Figure \ref{fig:MIMO}. L energy transmitting node, each equipped with M radiating elements arranged as a uniform linear array, transmit power to K energy receivers scattered in an open field.
\begin{figure}
\centering
\includegraphics[width=0.3\textwidth]{6.png}
\caption{Wirelessly Powered Network}
\label{fig:MIMO}
\end{figure}
One challenge for energy beamforming is lack of channel information. Time Division Duplex is a common strategy to implement joint communication and energy transfer, where the wireless power transfer happens from time 0 to P and wireless information transfer(WIT) happens from time P to T. Therefore, no information can be sent before enough energy is stored during the power transfer phase. Ideally, pilots signals should be sent and decoded systematically for channel estimation. \cite{1597555}, yet this function is not available for energy beamforming because WIT and WPT functions are realized with separate circuits \cite{7462480}, where the latter does not provide decoding capacity. Previous works have proposed methods of channel estimation with only one-bit feedback \cite{6884811}, we would adopt this minimum feedback scheme in this paper.
Because the energy beamforming signal \textbf{x} does not carry any information, it is assumed to be independent sequences with zero mean and unit variance. \cite{6884811} Furthermore, because we consider the beamformer to be analog, $x_1$ to $x_M$ are the same signal $x \in \mathbb{C}$.The power in noise is significantly weaker than the energy signal, therefore it can be ignored for practical purposes.
Let $y_{j,p}$ denote the signal received on the $j^{th}$ receiver from the $p^{th}$ transmitter. $x_p$ be the signal transmitted from node p. $\textbf{f}_p$ be the beamforming code for node p. $h_{i,p}^j$ denote the line of signt channel connecting $j^{th}$ receiver to $i^{th}$ radiating element from $p^{th}$ transmitting node.
\[
y_{j,p}=
\begin{bmatrix}
h_{1,p}^j & h_{2,p}^j & ... & h_{M,p}^j\\
\end{bmatrix}
\begin{bmatrix}
f_{1,p}\\
.\\
.\\
f_{M,p}\\
\end{bmatrix}
x_p
\]
In this paper, we assume that the radiating elements from the same node share the same path gain $\alpha$. Let $\alpha_{j,p}$ denote path gain for line of sight channel connecting $j^{th}$ receiver to $p^{th}$ transmitter.$\varphi_{j,p}$ denote the Angle of Departure connecting $j^{th}$ receiver to $p^{th}$ transmitter. $\textbf{a}(\varphi_{j,p})^*=[1,e^{jdcos(\varphi_{j,p}},...,e^{jd(M-1)cos(\varphi_{j,p})})]$. Therefore:
\begin{equation}
y_{j,p}=\alpha_{j,p} \textbf{a}(\varphi_{j,p})^* \textbf{f}_p x_p
\end{equation}
The received signal on the $j^{th}$ receiver is a summation of signals delievered from all the transmitters to this receiver.
\begin{equation}
y_j=\sum_{p=1}^L \alpha_{j,p} \textbf{a}(\varphi_{j,p})^* \textbf{f}_p x_p
\end{equation}
Let the wireless power transfer happening for duration P. Received energe on the $j^{th}$ receiver for duration P is:
\begin{equation}
e_j=\int_0^P |y_j(t)|^2 dt=\int_0^P |\sum_{p=1}^L \alpha_{j,p} \textbf{a}(\varphi_{j,p})^* \textbf{f}_p x_p(t)|^2 dt
\end{equation}
\subsection{Wireless Power Transfer}
The objective of energy beamforming control is to choose a beamforming code for each energy transmitting node such that the total received power is maximized while satisfying the minimum energy requirement of each energy receiver.
\begin{equation*}
\begin{aligned}
& \underset{ \textbf{f}_p, \forall p}{\text{maximize}}
&& \displaystyle\sum_{j \in \{1,2,...,L\}} e_j\\
& \text{subject to}
&& \textbf{f}_p \in \mathcal{F}, \forall p;\\
&&& e_j \geq e_{min}\\
\end{aligned}
\end{equation*}
\section{Reinforcement Learning}
\subsection{problem setup}
The impetus of reinforcement learning is that an agent can learn by interacting with the environment. In the intersection between control, optimization, and learning, the problem have different mathematical formulations. Here, we follow the problem setup proposed by Richard Sutton in his book Introduction to Reinforcement Learning. \cite{10.5555/551283}
Agent can observe the state at each step, denoted as $ S_{t} $, where t is the $t^{th}$ step taken. For our discussion, we focus only on the subset of problems where state s is fully observable by the agent. There are action choices for each state denoted as $ A_{t} $. A reward is given for each action taken at step t denoted as $ R_{t} $. The terminal step is denoted as t=T. For an episodic problem, T is a finite number, for a non-episodic problem, T=$\infty$
An episode of data is registered as an alternating sequence of state, action, and reward:
$$ S_{0}, A_{0}, R_{0}, S_{1}, A_{1}, R_{1}.......S_{T-1},A_{T-1},R_{T-1},S_{T},A_{T},R_{T} $$
Gain at step t is defined as the accumulative reward the agent can get from step t onward. A discounting factor $\gamma$ between 0 to 1 is introduced to incorporate the sense of time, much like how interest rate encodes time in financial systems:
\begin{equation}
G_{t} := R_{t}+\gamma R_{t+1}+\gamma ^2 R_{t+2}+...+\gamma^{T-t}R_{T}
\end{equation}
This can be written in its recursive form, known as Bellman Equation, which is the basis for an iteratively implemented backward induction algorithm:
\begin{equation}
G_{t}=R_{t}+\gamma G_{t+1}
\label{bellman}
\end{equation}
Transition matrix is intruduced to encode the stochastidy in the environmental dynamics. Transaction Matrix $\mathcal{P}$ is defined as:
\begin{equation}
\mathcal{P}_{ss'}^a := Pr\{S_{t+1}=s'|S_{t}=s,A_{t}=a\}
\end{equation}
State/Action Function q(s,a) is definied as expected gain starting from state s by taking action a:
\begin{align}
\begin{split}
q(s,a) :&= \mathbb{E}\{G_t|S_t=s,A_t=a\}\\
&=\mathbb{E} \{\sum_{k=0}^{T-t} \gamma ^k R_{t+k+1}| S_t=s,A_t=a\}\\
\end{split}
\end{align}
Policy is defined as:
\begin{equation}
\pi(s,a):=Pr(A=a|S=s)
\end{equation}
Optimal Policy is defined as:
\begin{equation}
\pi^*(s):=\argmax_a q(s,a)
\end{equation}
Value Function v(s) is defined as the expected gain starting from state s:
\begin{align}
\begin{split}
v(s) :&= \mathbb{E}\{G_t|S_t=s\}\\
&=\mathbb{E} \{\sum_{k=0}^{T-t} \gamma ^k R_{t+k+1}| S_t=s\}\\
&=\sum_{a \in \mathcal{A}} \pi(s,a) q(s,a)\\
\end{split}
\end{align}
\subsection{Without Approximation}
One obvious approach to learning is to statistically construct a model of the environment, which is called Model-Based Learning. The most primitive form of model-based learning is Bellman Equation based backward induction. Statistical tactics, such as maximum likelihood, Bayesian methods, etc., can be deployed to approximate the model with the least amount of sampling. However, since the environment is implicitly embedded in v(s) and q(s,a), the model building process can be circumvented entirely, hence Model-Free Learning. Depending on whether the iteration rules is policy dependent, model-free learning can be subdivided into on-policy learning and off-policy learning.
One hindrance to the implementation of the brute force backward induction is its memory requirement. A more effective approach is to update q value and v value after one episode, one step, or n steps. They are called Monte Carlo Method, Temporal Difference Method, and $\lambda(n)$ Method respectively.
For online learning, $\epsilon$-greedy Policy $\pi_{\epsilon}(s)$ is frequently deployed to balance exploration and exploitation, such that the environment can be encoded most efficiently. $\epsilon$ is initiated set to 1 and then asymptotically goes to 0 as the episode counts increases.
\begin{equation*}
\pi_{\epsilon}(s,a) = \begin{cases}
1-\epsilon+\frac{\epsilon}{|A|}& \displaystyle\argmax_{a} q_{\epsilon}(s,a)\\
\frac{\epsilon}{|A|}& \text{otherwise}\\
\end{cases}
\end{equation*}
\subsection{With Approximation}
When the problem gets complex, state S becomes a rather large vector and function approximation with neuro networks can be utilized to facilitate learning. Reinforcement learning as a self-sustaining mathematical framework has been refined by Rich Sutton et al. since the 1980s. Only recently, the progress made with Deep Learning has been applied to the realm of Reinforcement Learning \cite{Mnih2013PlayingAW}, rendering the computation tenable with existing hardare.
Let the value function and state/action function be parameterized with $\textbf{w}: \hat{v}(s,\textbf{w}) \approx v(s)$ and $\hat{q}(s,a,\textbf{w}) \approx q(s,a) $
Let the $i^{th}$ iteration of parameter be denoted as $\textbf{$w_i$}$. The Loss Function $\mathcal{L}(\textbf{$w_i$})$ is defined as the following:
\begin{equation}
\mathcal{L}(\textbf{$w_i$}) := \mathbb{E}\{[v(s)-\hat{v}(s,\textbf{$w_i$})]^2\}
\label{v_loss}
\end{equation}
\begin{equation}
\mathcal{L}(\textbf{$w_i$}) := \mathbb{E}\{[q(s,a)-\hat{q}(s,a,\textbf{$w_i$})]^2\}
\label{q_loss}
\end{equation}
While the real value of v(s) and q(s,a) are not knowable, it can be approximated:
\begin{equation}
v(s) \approx \sum_{a \in A} R(s,a)+\gamma v(s',\textbf{w})
\label{v_approx}
\end{equation}
\begin{equation}
q(s,a) \approx r+\gamma \argmax_{a} q(s',a',\textbf{w})
\label{q_approx}
\end{equation}
The Gradient of weighing paramter \textbf{w} can be derived from \ref{v_loss} and \ref{q_loss} with the real values substituted by \ref{v_approx} and \ref{q_approx} respectively. By convention, constant is omitted. Parameter is updated following Gradient Descent:
\begin{equation}
\textbf{w}_{i}=\textbf{w}_{i-1}-\nabla_{w_{i-1}} \mathcal{L}(w_{i-1})
\end{equation}
\subsection{Policy Gradient Methods}
Policy $\pi(s)$ can be written as a function parameterized by $\theta$ with s as input and a smooth distribution overall all actions as output.By adjusting parameter $\theta$ we can adjust the distribution over action choices for different states. This style of learning is called policy gradient-based learning.
Let us register a path sequence taken by the agent as $\tau$ such that the sequence is denoted as \{$S_{\tau 0},A_{\tau 0}, R_{\tau 0}...S_{\tau T},A_{\tau T},R_{\tau T}$\}. the gain of sequence $\tau$ is defined as the gain of this entire sequence of state, action, reward:
\begin{equation}
G(\tau):=\displaystyle\sum_{t=0}^{T}\gamma^t R_t
\end{equation}
Denote $P(\tau,\theta)$ as the probability that path $\tau$ is travesed when the policy is parameterized by $\theta$. The Objective Function can be defined in various ways. Here we adopt the definition as the following:
\begin{equation}
U(\theta)=\sum_{\tau}P(\tau,\theta)G(\tau)
\end{equation}
The objective of the policy gradient method is to find the parameter $\theta$ to maximize the objective function.
The gradient of aforementioned utility function is:
\begin{equation}
\nabla_{\theta} U(\theta)= \nabla_{\theta}\sum_{\tau}P(\tau,\theta) G(\tau)
\end{equation}
A mathematical sleight of hand called Importance Sampling is deployed to convert this theoretical expression of gradient into something that is algorithmically feasible.
\begin{align}
\begin{split}
\nabla_{\theta} U(\theta) \approx \frac{1}{N}\displaystyle\sum_{\tau=1}^{N}\displaystyle\sum_{t=0}^{T-1} \nabla_{\theta}ln\pi_{\theta}(s,a)|_{\theta_{old}}[q^{\pi_{\theta_{old}}}(s,a)-b]\\
\end{split}
\end{align}
We can use stochastic gradient descent(SGD) method to update $\theta$:
\begin{equation}
\theta=\theta_{old}-\alpha \nabla_{\theta}ln\pi_{\theta}(s,a)|_{\theta_{old}}[q^{\pi_{\theta_{old}}}(s,a)-b]
\end{equation}
Actor-Critic Method takes advantage of both policy gradient and function approximation to build a bootstrap structure that lead up to fast convergence. state/action function for policy $\pi_{\theta}(s)$ is approximated by $q^{\pi_{\theta}}(s,a,\textbf{w})$.Baseline b is introduced into the bootstrap stracture to foster convergence. Different algorithms define baseline differently. In advantage Actor-Critic algorithm, baseline is defined as a value function based on $\pi_{\theta}$.Because the SGD updating process does not rely on the ordering of things, it is obvious that some of the aforementioned computations can be done asynchronously. Asynchronous Advantage Actor-Critic (A3C) is proven one of the most effective agents for renforcement learning, and is the one we will use in this paper.
\subsection{Multiagent Reinforcement Learning}
$A_t=\{A_t^1,A_t^2,...,A_t^M\}$ M is the number of agents. The action space is cartician product of action choices available to each agent. $A_t(s)=A_t^1(s) \times A_t^2(s) \times ... \times A_t^M(s)$, which grows exponentially as the number of agents grows.
The Rollout method proposed by Dimitri Bertsekas breakdown this collective decision into its sequential components, reducing the complexity of action space while increasing the complexity of state space. It is proven that the intermediate state rollout method yields the same result as does the regular method. \cite{Bertsekas2019MultiagentRA}
Without intermidiate state rollout, the sequences of data collected is:
...$S_t, A_t, R_t, S_{t+1}$...
The intermediate states rollout technique converting action space complexity into state-space complexity by introducing intermediate states, denoted as $S_t^k$ where k goes from 1 to M-1. The sequence of data is now:
...$S_t,A_t^1,R_t^1,S_t^1,A_t^2,R_t^2, S_t^2, ... , S_t^{M-1},A_t^M,R_t^M,S_{t+1}$...
where $S_t^k=(S_t^{k-1},A_t^k)$
suppose each agent has N choices. This formulation reduces the size action space from $N^M$ to $N \times M$.
\section{Beamforming as a Multiagent Reinforcement Learning problem}
\subsection{Environment}
The wirelessly powered communication network has L energy transmitting stations positioned at the corner of a 30m x 30m field. K energy receivers randomly scattered between 1m to 29m. 0.5s of energy transfer is followed with 0.5s of information transfer. Assume no energy leftover at each cycle, such that at the beginning of the next energy transfer interval, the remaining power at each energy receiver is 0.
\begin{small}
\begin{center}
\begin{tabular}{ c c }
\hline
Number of Energy Transmitting Nodes & L=4\\
\hline
&$TX_1$(0,0)\\
&$TX_2$(30,0)\\
{Positions of Trasmitting Node}&$TX_3$(30,30)\\
&$TX_4$(0,30)\\
\hline
Number of Radiation Elements per Trasmitting Node & M=64\\
\hline
Energy Carrier Frequency & 8M Hz\\
\hline
Field of Energy Receivers & 30m x 30m\\
\hline
Number of Energy Receivers & K \\
\hline
Energy Transfer Time & 0.5s \\
\hline
Information Transfer Time & 0.5s\\
\hline
Maximum Number of Steps & 100\\
\hline
\end{tabular}
\end{center}
\end{small}
Observation Space:\{$e_1,e_2,...e_K,c_1,c_2,c_3,c_4$\}
where $e_j$ is the energy received at the $j^{th}$ receiver, $c_i$ is the codebook choice for the $i^{th}$ energy emitting node.
Reward: If $e_j$ < $e_min$, reward is deducted by 50 points each. If $e_{total}^new$ > $e_{total}^new$, reward is increased by 100 points. If $e_{total}^new$ < $e_{total}^new$, reward is deduced by 300 points.
\subsection{A3C agent}
\begin{small}
\begin{center}
\begin{tabular}{ c c }
\hline
Layers of Actor Network & 3\\
\hline
Layers of Critic Network & 3\\
\hline
Learning Rate for Actor & $\alpha_a$=0.1\\
\hline
Learning Rate for Critic & $\alpha_c$=0.1\\
\hline
Discount Rate & $\gamma$=0.9 \\
\hline
Action Function & Softmax \\
\hline
\end{tabular}
\end{center}
\end{small}
\subsection{Simulation Result}
\section{Conclusion}
In this paper, we demonstrated the possibility to formulate WPT as a multiagent reinforcement learning problem, this lays the groundwork for further study towards fully locally computed control algorithms for wirelessly powered communication networks. Instead of group actions of all agents together, a multiagent rollout approach sees things sequentially, action taken by one agent becomes part of the state of another. This framework deduces the dimension of action space from exponential growth to multiplicative growth, and it can be applied to other problems. The most recent incarceration of beamforming technology is a passive reflective surface, or Intelligent Reflective Surface(IRS), where the reflective components are in the thousands. The multiagent approach proposed in this paper could be applied to IRS control as well, which should be a fruitful topic of future studies.
\bibliographystyle{IEEEtran}
\bibliography{Bibliography}
%\printbibliography
\end{document}
|
{"hexsha": "9aa35656a29b486f8624e20f33808ee570095950", "size": 26131, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "MTT_reveyrand.tex", "max_stars_repo_name": "BaiLiping/Paper4", "max_stars_repo_head_hexsha": "27044c80ecae9e8c03582237f08a8cd840ef8b26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MTT_reveyrand.tex", "max_issues_repo_name": "BaiLiping/Paper4", "max_issues_repo_head_hexsha": "27044c80ecae9e8c03582237f08a8cd840ef8b26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MTT_reveyrand.tex", "max_forks_repo_name": "BaiLiping/Paper4", "max_forks_repo_head_hexsha": "27044c80ecae9e8c03582237f08a8cd840ef8b26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 57.4307692308, "max_line_length": 963, "alphanum_fraction": 0.7272205427, "num_tokens": 6770}
|
#ifndef IRODS_RING_BUFFER_HPP
#define IRODS_RING_BUFFER_HPP
#include <boost/circular_buffer.hpp>
#include "lock_and_wait_strategy.hpp"
#include <iterator>
namespace irods {
namespace experimental {
// ring buffer with protection for overwrites
template <typename T>
class circular_buffer {
public:
explicit circular_buffer(
const size_t capacity,
std::unique_ptr<lock_and_wait_strategy> lws = std::make_unique<lock_and_wait>())
: cb_{capacity}
, lws_{std::move(lws)}
{
}
explicit circular_buffer(
const size_t capacity,
int timeout)
: circular_buffer(capacity, std::make_unique<lock_and_wait_with_timeout>(timeout))
{
}
void pop_front(T& entry)
{
(*lws_)([this] { return 0 < cb_.size(); },
[this, &entry] {
auto iter = cb_.begin();
entry = *iter;
cb_.pop_front();
} );
}
// erase n items from front of the queue
void pop_front(size_t n)
{
(*lws_)([this, n] { return n <= cb_.size(); },
[this, n] { cb_.erase_begin(n); } );
}
// peek item at offset from beginning without removing from queue
void peek(size_t offset, T& entry)
{
(*lws_)([this, offset] { return offset < cb_.size(); },
[this, offset, &entry] {
auto iter = cb_.begin();
entry = *(iter + offset);
} );
}
// peek n items starting at offset (from beginning) into array without removing from buffer
// precondition: array is large enough to hold n items
void peek(off_t offset, size_t n, T array[])
{
auto length = offset + n;
(*lws_)([this, length] { return length <= cb_.size(); },
[this, offset, n, &array] {
auto iter = cb_.begin() + offset;
std::copy(iter, iter + n, array);
} );
}
template <typename iter>
long push_back(iter begin, iter end)
{
// push what you can, return the number pushed
long insertion_count = 0;
(*lws_)([this] { return cb_.size() < cb_.capacity(); },
[this, begin, end, &insertion_count] {
auto distance = static_cast<unsigned long>(std::distance(begin, end));
auto empty_space = cb_.capacity() - cb_.size();
insertion_count = ( empty_space < distance ? empty_space : distance );
cb_.insert(cb_.end(), begin, begin + insertion_count );
} );
return insertion_count;
}
void push_back(const T& entry)
{
(*lws_)([this] { return cb_.size() < cb_.capacity(); },
[this, &entry] { cb_.push_back(entry); } );
}
private:
boost::circular_buffer<T> cb_;
std::unique_ptr<lock_and_wait_strategy> lws_;
}; // class circular_buffer
} // namespace experimental
} // namespace irods
#endif
|
{"hexsha": "a525eca1cab483c2a120b9046e5d19477bd5fdb7", "size": 3602, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "s3/s3_transport/include/circular_buffer.hpp", "max_stars_repo_name": "alanking/irods_resource_plugin_s3", "max_stars_repo_head_hexsha": "492839f885f432d30fa904ac9d5f89369d248ece", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "s3/s3_transport/include/circular_buffer.hpp", "max_issues_repo_name": "alanking/irods_resource_plugin_s3", "max_issues_repo_head_hexsha": "492839f885f432d30fa904ac9d5f89369d248ece", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2018-12-17T21:57:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-05T14:39:44.000Z", "max_forks_repo_path": "s3/s3_transport/include/circular_buffer.hpp", "max_forks_repo_name": "alanking/irods_resource_plugin_s3", "max_forks_repo_head_hexsha": "492839f885f432d30fa904ac9d5f89369d248ece", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9811320755, "max_line_length": 103, "alphanum_fraction": 0.4636313159, "num_tokens": 717}
|
#!/usr/bin/env python
"""
##############################################
Testing Package Reliability Growth Data Module
##############################################
"""
# -*- coding: utf-8 -*-
#
# rtk.testing.growth.Growth.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Import modules for localization support.
import gettext
import locale
# Import modules for mathematics.
from math import exp, log, sqrt
import numpy as np
from scipy.optimize import fsolve
from scipy.stats import chi2 # pylint: disable=E0611
# Import other RTK modules.
try:
import Configuration
import Utilities
import analyses.statistics.Bounds as Bounds
import analyses.statistics.growth.CrowAMSAA as CrowAMSAA
import analyses.statistics.growth.SPLAN as SPLAN
from testing.Testing import Model as Testing
from testing.Testing import Testing as dtcTesting
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
import rtk.analyses.statistics.Bounds as Bounds
import rtk.analyses.statistics.growth.CrowAMSAA as CrowAMSAA
import rtk.analyses.statistics.growth.SPLAN as SPLAN
from rtk.testing.Testing import Model as Testing
from rtk.testing.Testing import Testing as dtcTesting
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "Weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
def _gr(gr, mi, mf, ttt, t1):
"""
Function used to calculate the growth rate necessary to have the ideal
growth curve end at the final MTBF and to calculate the optimum growth rate
for a test phase.
"""
return (ttt / t1)**gr + (mf / mi) * (gr - 1.0)
def running_sum(values):
"""
Function used to calculate the running sum of values from a list.
:param list values: the list of values to calculate the running sum.
"""
_total = 0
for _item in values:
_total += _item
yield _total
class Model(Testing): # pylint: disable=R0902, R0904
"""
The Reliability Growth data model contains the attributes and methods for
planning and assessing a reliability growth test. The attributes of a
Reliability Growth model are:
:ivar dict dic_test_data: dictionary containing the test data for the
Growth data model. Key is an integer from 0 to
n records. Values are a list: [record_id,
failure date, left interval, right interval,
quantity]
:ivar list lst_p_growth_rate: list of planned growth rates per test phase.
:ivar list lst_p_ms: list of planned management strategies per test phase.
:ivar list lst_p_fef: list of planned fix effectiveness factors per test
phase.
:ivar list lst_p_prob: list of planned probabilities of observing a failure
per test phase.
:ivar list lst_p_mtbfi: list of planned initial MTBF per test phase.
:ivar list lst_p_mtbff: list of planned final MTBF per test phase.
:ivar list lst_p_mtbfa: list of planned average MTBF per test phase.
:ivar list lst_p_test_time: list of planned test times per test phase.
:ivar list lst_p_n_failures: list of planned number of failures per test
phase.
:ivar list lst_p_start_date: list of planned start dates per test phase.
:ivar list lst_p_end_date: list of planned end dates per test phase.
:ivar list lst_p_weeks: list of planned number of weeks per test phase.
:ivar list lst_p_n_test_units: list of planned number of test units per
test phase.
:ivar list lst_p_tpu: list of planned average test time per unit per test
phase.
:ivar list lst_p_tpupw: list of planned average test time per unit per week
per test phase.
:ivar list lst_o_growth_rate: list of observed growth rates per test phase.
:ivar list lst_o_ms: list of observed management strategies per test phase.
:ivar list lst_o_fef: list of observed fix effectiveness factors per test
phase.
:ivar list lst_o_mtbfi: list of observed initial MTBF per test phase.
:ivar list lst_o_mtbff: list of observed final MTBF per test phase.
:ivar list lst_o_mtbfa: list of observed average MTBF per test phase.
:ivar list lst_o_test_time: list of observed test times per test phase.
:ivar list alpha_hat: list of scale parameters estimated from the test data
[lower bound, point, upper bound].
:ivar list beta_hat: list of shape parameters estimated from the test data
[lower bound, point, upper bound].
:ivar list cum_mean: list of cumulative MTBF estimated from the test data
[lower bound, point, upper bound].
:ivar list instantaneous_mean: list of instantaneous MTBF estimated from
the test data
[lower bound, point, upper bound].
:ivar int rg_plan_model: the index in the list of reliability growth
planning models.
:ivar int rg_assess_model: the index in the list of reliability assessment
and projection models.
:ivar float alpha_hat: the point estimate of the scale parameter.
:ivar float beta_hat: the point estimate of the shape parameter.
:ivar float cum_mean: the point estimate of the cumulative MTBF.
:ivar float instantaneous_mean: the point estimate of the instantaneous
MTBF.
:ivar float se_scale: the estimated standard error of the scale parameter.
:ivar float se_shape: the estimated standard error of the shape parameter.
:ivar float se_cum_mean: the estimated standard error of the cumulative
MTBF.
:ivar float se_inst_mean: the estimated standard error of the instantaneous
MTBF.
:ivar float cramer_vonmises: the Cramer-von Mises test statistic.
:ivar float chi_square: the chi-square test statistic.
"""
def __init__(self, n_phases=1):
"""
Method to initialize a Reliability Growth Test data model instance.
:param int n_phases: the number of growth phases associated with the
Growth test.
"""
super(Model, self).__init__()
# Initialize private dict attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dict attributes.
self.dic_test_data = {}
# Initialize public list attributes.
# The following lists are used for holding ideal growth data for each
# test phase.
self.lst_i_mtbfi = [0.0] * n_phases # Initial phase MTBF.
self.lst_i_mtbff = [0.0] * n_phases # Final phase MTBF.
self.lst_i_mtbfa = [0.0] * n_phases # Average phase MTBF.
self.lst_i_n_failures = [0] * n_phases # Expected number of failures.
# The following lists are used for holding planned growth data for each
# test phase.
self.lst_p_growth_rate = [0.0] * n_phases
self.lst_p_ms = [0.0] * n_phases # Planned management strategy.
self.lst_p_fef = [0.0] * n_phases # Planned fix effectiveness factor.
self.lst_p_prob = [0.0] * n_phases
self.lst_p_mtbfi = [0.0] * n_phases # Initial phase MTBF.
self.lst_p_mtbff = [0.0] * n_phases
self.lst_p_mtbfa = [0.0] * n_phases
self.lst_p_test_time = [0.0] * n_phases # Planned test time.
self.lst_p_n_failures = [0] * n_phases # Expected number of failures.
self.lst_p_start_date = [0] * n_phases
self.lst_p_end_date = [0] * n_phases
self.lst_p_weeks = [0.0] * n_phases
self.lst_p_n_test_units = [0] * n_phases
self.lst_p_tpu = [0.0] * n_phases # Test time per unit.
self.lst_p_tpupw = [0.0] * n_phases # Test time per unit per week.
# The following lists are used for holding observed growth data for
# each test phase.
self.lst_o_growth_rate = [0.0, 0.0, 0.0]
self.lst_o_ms = [0.0] * n_phases
self.lst_o_fef = [0.0] * n_phases
self.lst_o_mtbfi = [0.0] * n_phases
self.lst_o_mtbff = [0.0] * n_phases
self.lst_o_mtbfa = [0.0] * n_phases
self.lst_o_test_time = [0.0] * n_phases # Actual test time.
self.lst_o_n_failures = [0] * n_phases # Observed number of failures.
self.lst_fixed_values = [True, True, True, True, True, True, True,
True]
# The following lists are used for holding model parameter estimates.
# The format is [lower bound, point estimate, upper bound].
self.alpha_hat = [0.0, 0.0, 0.0]
self.beta_hat = [0.0, 0.0, 0.0]
self.cum_mean = [[0.0, 0.0, 0.0]]
self.instantaneous_mean = [[0.0, 0.0, 0.0]]
self.growth_rate = [0.0, 0.0, 0.0]
self.chi2_critical_value = [0.0, 0.0]
# Initialize public scalar attributes.
self.rg_plan_model = 0
self.rg_assess_model = 0
self.tr = 0.0 # Program technical requirement MTBF.
self.mtbfg = 0.0 # Program goal MTBF.
self.mtbfgp = 0.0 # Growth potential MTBF.
self.n_phases = n_phases
self.ttt = 0.0 # Total time on test.
self.avg_growth = 0.0 # Average growth rate across all test phases.
self.avg_ms = 0.75 # Average management strategy across all test phases.
self.avg_fef = 0.7 # Average fix effectiveness factor across all test phases.
self.probability = 0.75 # Probability of observing a failure.
self.ttff = 0.0 # Time to first fix.
self.grouped = 0
self.group_interval = 0.0
self.se_scale = 0.0
self.se_shape = 0.0
self.se_cum_mean = 0.0
self.se_inst_mean = 0.0
self.cramer_vonmises = 0.0
self.chi_square = 0.0
self.cvm_critical_value = 0.0
def calculate_idealized_growth_curve(self, mtbf=True):
"""
Method to calculate the values for the idealized growth curve.
:keyword bool mtbf: indicates whether to calculate MTBF (default) or
failure intensity values.
:return: _ideal
:rtype: list of floats
"""
# WARNING: Refactor calculate_idealized_growth_curve; current McCabe Complexity metric=17.
_ideal = []
# Verify the first phase average MTBF is greater than zero. If not,
# attempt to calculate the average MTBF.
if self.lst_i_mtbfa[0] <= 0.0:
_mtbfa = CrowAMSAA.calculate_initial_mtbf(self.avg_growth,
self.mtbfg, self.ttt,
self.lst_p_test_time[0])
self.lst_i_mtbfa[0] = _mtbfa
# Verify the program final (goal) MTBF is greater than zero. If not,
# attempt to calculate the final MTBF.
if self.mtbfg <= 0.0:
_mtbfg = CrowAMSAA.calculate_final_mtbf(self.avg_growth,
self.lst_i_mtbfa[0],
self.ttt,
self.lst_p_test_time[0])
self.mtbfg = _mtbfg
# Verify the program total time on test is greater than zero. If not,
# attempt to calculate the total time on test.
if self.ttt <= 0.0:
self.ttt = CrowAMSAA.calculate_total_time(self.avg_growth,
self.lst_i_mtbfa[0],
self.mtbfg,
self.lst_p_test_time[0])
# Verify the first phase test time is greater than zero. If not,
# attempt to calculate the first phase test time.
if self.lst_p_test_time[0] <= 0.0:
_time = CrowAMSAA.calculate_t1(self.avg_growth,
self.lst_i_mtbfa[0],
self.mtbfg, self.ttt)
self.lst_p_test_time[0] = _time
# Verify the program average growth rate is greater than zero. If not,
# attempt to calculate the program average growth rate.
if self.avg_growth <= 0.0:
_alpha = CrowAMSAA.calculate_growth_rate(self.lst_i_mtbfa[0],
self.mtbfg, self.ttt,
self.lst_p_test_time[0])
self.avg_growth = _alpha
# Build the idealized curve. If the time is less than the time to
# first fix, the idealized value is the initial MTBF. If the time
# is equal to the time to first fix, the idealized value is set to
# numpy's not a number to force a jump in the plot. If the time is
# greater than the time to first failure, the idealized value is
# calculated from the inputs read above.
if(self.lst_i_mtbfa[0] > 0.0 and self.lst_p_test_time[0] > 0.0 and
self.mtbfg > 0.0 and self.ttt > 0.0 and self.avg_growth > 0.0):
for _time in range(int(self.ttt)):
if _time < int(self.lst_p_test_time[0]):
_ideal.append(self.lst_i_mtbfa[0])
elif _time == int(self.lst_p_test_time[0]):
_ideal.append(np.nan)
else:
_ideal.append((self.lst_i_mtbfa[0] *
(float(_time) /
self.lst_p_test_time[0])**self.avg_growth) /
(1.0 - self.avg_growth))
# Convert to failure intensity if that has been called for.
if not mtbf:
_ideal = [1.0 / _mtbf for _mtbf in _ideal]
# Calculate the initial MTBF, final MTBF, average MTBF, and
# expected number of failures for each phase.
_t1 = self.lst_p_test_time[0]
_mtbfa = self.lst_i_mtbfa[0]
self.lst_i_n_failures = [0.0] * self.n_phases
for _index in range(self.n_phases):
_time = sum(self.lst_p_test_time[:_index + 1])
_mtbf = CrowAMSAA.calculate_final_mtbf(self.avg_growth,
_mtbfa, _time, _t1)
if _index < self.n_phases - 1:
self.lst_i_mtbfi[_index + 1] = _mtbf
if _index > 0:
self.lst_i_mtbff[_index] = _mtbf
_cum_fails = sum(self.lst_i_n_failures[:_index + 1])
_n_failures = CrowAMSAA.calculate_n_failures(self.avg_growth,
_mtbfa, _time,
_t1, _cum_fails)
self.lst_i_n_failures[_index] = _n_failures
for _index in range(self.n_phases):
_time = self.lst_p_test_time[_index]
_n_failures = self.lst_i_n_failures[_index]
_mtbfi = self.lst_i_mtbfi[_index]
_mtbff = self.lst_i_mtbff[_index]
_mtbfa = CrowAMSAA.calculate_average_mtbf(_time, _n_failures,
_mtbfi, _mtbff)
self.lst_i_mtbfa[_index] = _mtbfa
return _ideal
def calculate_planned_growth_curve(self):
"""
Method to calculate the necessary values for each reliability growth
test phase. These are the start and end points of the planned growth
curve.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_mtbf1 = self.lst_p_mtbfa[0]
_t1 = self.lst_p_test_time[0]
for i in range(self.n_phases):
_alpha = self.lst_p_growth_rate[i]
_mtbfa = self.lst_p_mtbfa[i]
_mtbfi = self.lst_p_mtbfi[i]
_mtbff = self.lst_p_mtbff[i]
_time = self.lst_p_test_time[i]
_cum_time = sum(self.lst_p_test_time[:i + 1])
if _mtbff <= 0.0:
_mtbff = CrowAMSAA.calculate_final_mtbf(_alpha, _mtbf1,
_cum_time, _t1)
self.lst_p_mtbff[i] = _mtbff
if _mtbfa <= 0.0:
_mtbfa = CrowAMSAA.calculate_average_mtbf(0.0, 0, _mtbfi,
_mtbff)
self.lst_p_mtbfa[i] = _mtbfa
if _mtbfi <= 0.0:
_mtbfi = 2.0 * _mtbfa - _mtbff
self.lst_p_mtbfi[i] = _mtbfi
if _alpha <= 0.0:
_alpha = CrowAMSAA.calculate_growth_rate(_mtbfi, _mtbff,
_time, _t1)
self.lst_p_growth_rate[i] = _alpha
return False
def create_planned_values(self, mtbf=True):
"""
Method to create the planned growth curve values. These are used for
plotting the planned growth curve. The first curve created represents
the average MTBF values over each phase. These will be plotted as
horizontal lines. The second curve created represents the
straight-line linear change in MTBF over the phase.
:keyword boolean mtbf: indicates whether to calculate MTBF or failure
rates.
:return: _plan
:rtype: list
"""
_plan = []
for _phase in range(self.n_phases):
_time = 0.0
while _time < (self.lst_p_test_time[_phase] - 1.0):
if mtbf:
_plan.append(self.lst_p_mtbfa[_phase])
else:
_plan.append(1.0 / self.lst_p_mtbfa[_phase])
_time += 1.0
_plan.append(np.nan) # pylint: disable=E1101
return _plan
def assess_plan_feasibility(self):
"""
Method to assess the feasibility of a test plan. The assessment
criteria come from MIL-HDBK-189C, section 5.1.5 and section 5.1.6.\n\n
The criteria and acceptable ranges are:\n
- Initial MTBF / Goal MTBF 0.15 - 0.47\n
- Fix Effectiveness Factor 0.55 - 0.85\n
- Goal MTBF / Growth Potential MTBF 0.60 - 0.80\n
- Growth Rate 0.23 - 0.64\n
:return: _results
:rtype: list
"""
_results = [0.0, 0.0, -1, -1]
# Initial MTBF to goal MTBF ratio is high enough. Too low means growth
# testing is probably being started too early.
try:
_results[0] = self.lst_p_mtbfi[0] / self.mtbfg
except ZeroDivisionError:
_results[0] = 0.0
# Goal MTBF to growth potential MTBF ratio is high enough. Too
# high means there is a low probability of achieving the goal MTBF.
# Too low means the system may be over designed.
try:
_results[1] = self.mtbfg / self.mtbfgp
except ZeroDivisionError:
_results[1] = 0.0
# Calculate the test time per test unit and test time per test unit
# per week.
for _phase in range(self.n_phases):
# Assess logistics of test plan.
_weeks = (self.lst_p_end_date[_phase] -
self.lst_p_start_date[_phase]) / 7.0
try:
self.lst_p_tpu[_phase] = self.lst_p_test_time[_phase] / \
self.lst_p_n_test_units[_phase]
except ZeroDivisionError:
_results[2] = _phase
self.lst_p_tpu[_phase] = 0.0
try:
self.lst_p_tpupw[_phase] = self.lst_p_tpu[_phase] / _weeks
except ZeroDivisionError:
_results[3] = _phase
self.lst_p_tpupw[_phase] = 0.0
# Assess engineering effort and quality of test plan.
if self.lst_p_ms[_phase] <= 0.0 or self.lst_p_ms[_phase] > 1.0:
_fef = self.lst_p_fef[_phase]
_mtbfa = self.lst_p_mtbfa[_phase]
_ms = SPLAN.calculate_management_strategy(_fef, _mtbfa,
self.mtbfgp)
self.lst_p_ms[_phase] = _ms
if self.lst_p_fef[_phase] <= 0.0 or self.lst_p_fef[_phase] > 0.0:
_ms = self.lst_p_ms[_phase]
_mtbfa = self.lst_p_mtbfa[_phase]
_fef = SPLAN.calculate_fef(_ms, _mtbfa, self.mtbfgp)
self.lst_p_fef[_phase] = _fef
if self.lst_p_prob[_phase] <= 0.0 or self.lst_p_prob[_phase] > 1.0:
_time = self.lst_p_test_time[_phase]
_ms = self.lst_p_ms[_phase]
_mtbfi = self.lst_p_mtbfi[_phase]
_prob = SPLAN.calculate_probability(_time, _ms, _mtbfi)
self.lst_p_prob[_phase] = _prob
return _results
def estimate_crow_amsaa(self):
"""
Method to estimate the parameters of the Crow-AMSAA reliability growth
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cum_time = _times[-1]
self.cum_failures = sum(_failures)
(self.alpha_hat[1],
self.beta_hat[1]) = CrowAMSAA.calculate_crow_amsaa_parameters(
_failures, _times, 0.0, self.grouped)
(self.beta_hat[0],
self.beta_hat[2]) = Bounds.calculate_crow_bounds(
sum(_failures), _times[-1], self.alpha_hat[1],
self.beta_hat[1], self.confidence, 1)
(self.alpha_hat[0],
self.alpha_hat[2]) = Bounds.calculate_crow_bounds(
self.cum_failures, self.cum_time, self.alpha_hat[1],
self.beta_hat[1], self.confidence, 2)
return False
def calculate_crow_amsaa_mean(self):
"""
Method to calculate the cumulative and instantaneous mean from the
Crow-AMSAA reliability growth model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
_n_fail_times = len(_times)
self.cum_mean = []
self.instantaneous_mean = []
for i in range(_n_fail_times):
(_cum_mean,
_instantaneous_mean) = CrowAMSAA.calculate_crow_amsaa_mean(
_times[i], self.alpha_hat[1], self.beta_hat[1])
(_lower, _upper) = Bounds.calculate_crow_bounds(
sum(_failures[:i + 1]), _times[i], self.alpha_hat[1],
self.beta_hat[1], self.confidence, 3)
_cum_mean_ll = 1.0 / _upper
_cum_mean_ul = 1.0 / _lower
_i_mean_ll = 1.0 / (self.alpha_hat[2] * self.beta_hat[2] *
_times[-1]**(self.beta_hat[2] - 1.0))
_i_mean_ul = 1.0 / (self.alpha_hat[0] * self.beta_hat[0] *
_times[-1]**(self.beta_hat[0] - 1.0))
self.cum_mean.append([_cum_mean_ll, _cum_mean, _cum_mean_ul])
self.instantaneous_mean.append([_i_mean_ll, _instantaneous_mean,
_i_mean_ul])
return False
def calculate_cramer_vonmises(self, t_star=0.0, type2=True):
"""
Method to calculate the Cramer-von Mises test statistic from the
observed reliability growth data.
Test the hypothesis that the data fits the Crow-AMSAA model.
Ho: the data fits the Crow-AMSAA model
Ha: the data does not fit the Crow-AMSAA model
Reject Ho if _CvM exceeds the critical value.
:param float t_star: termination time for Type I tests.
:param bool type2: whether or not the test is time terminated (Type I)
or failure terminated (Type II).
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cramer_vonmises = CrowAMSAA.calculate_cramer_vonmises(
_failures, _times, self.beta_hat[1], t_star, type2)
self.cvm_critical_value = CrowAMSAA.cramer_vonmises_critical_value(
self.cum_failures, self.confidence)
return False
def calculate_chi_square(self):
"""
Method to calculate the chi-square test statistic from the observed
reliability growth data.
Test the hypothesis that the data fits the Crow-AMSAA model.
Ho: the data fits the Crow-AMSAA model
Ha: the data does not fit the Crow-AMSAA model
Reject Ho if _chi2 exceeds the critical values.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Ensure the confidence level is a fraction.
if self.confidence > 1.0:
self.confidence = self.confidence / 100.0
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cum_failures = sum(_failures)
self.chi_square = CrowAMSAA.calculate_crow_amsaa_chi_square(
_failures, _times, self.beta_hat[1], _times[-1], self.grouped)
_alpha_half = (1.0 - self.confidence) / 2.0
if self.grouped == 0: # Individual failure times.
if self.test_termination_time > 0.0: # Time truncated test.
_df = 2.0 * self.cum_failures
else: # Failure truncated test.
_df = 2.0 * (self.cum_failures - 1)
_upper = _alpha_half
_lower = self.confidence + _alpha_half
else: # Grouped failure times.
_df = len(_failures) - 1
_upper = self.confidence
_lower = 1.0 - self.confidence
self.chi2_critical_value[0] = chi2.ppf(_lower, _df)
self.chi2_critical_value[1] = chi2.ppf(_upper, _df)
return False
def assess_growth_rate(self):
"""
Method to assess the actual growth rate occuring during a Growth Test.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self.lst_o_growth_rate[0] = 1.0 - self.beta_hat[2]
self.lst_o_growth_rate[1] = 1.0 - self.beta_hat[1]
self.lst_o_growth_rate[2] = 1.0 - self.beta_hat[0]
return False
class Growth(dtcTesting):
"""
The Reliability Growth data controller provides an interface between the
Reliability Growth data model and an RTK view model. A single Growth
controller can manage one or more Growth data models. The attributes of a
Growth data controller are:
:ivar _dao: the Data Access Object to use when communicating with the RTK
Project database.
:ivar dicTests: Dictionary of the Growth data models managed. Key is the
Test ID; value is a pointer to the Growth data model
instance.
"""
def __init__(self): # pylint: disable=E1002
"""
Method to initialize a Growth data controller instance.
"""
super(Growth, self).__init__()
# Initialize private scalar attributes.
self._dao = None
self._last_id = None
def request_tests(self, dao, growth_test):
"""
Reads the RTK Project database and loads all the Growth Tests
associated with the selected Revision. For each Growth Test returned:
#. Retrieve the inputs from the RTK Project database.
#. Create a Growth data model instance.
#. Set the attributes of the data model instance from the returned
results.
#. Add the instance to the dictionary of Growth Tests being managed
by this controller.
:param rtk.DAO dao: the Data Access object to use for communicating
with the RTK Project database.
:param tuple growth_test: the Growth test attributes from the RTK
Project database.
:return: (_results, _error_code)
:rtype: tuple
"""
self._dao = dao
self._last_id = self._dao.get_last_id('rtk_tests')[0]
# Create an instance of a Growth data model, set it's attributes, and
# add it to the dictionary of Growth tests controlled by this data
# controller.
_test = Model(growth_test[17])
_test.set_attributes(growth_test)
self.dicTests[_test.test_id] = _test
# Gather the Growth model phase attributes.
_query = "SELECT * FROM rtk_growth_testing \
WHERE fld_test_id={0:d}".format(growth_test[2])
(_phases,
_error_code, __) = self._dao.execute(_query, commit=False)
try:
_n_phases = len(_phases)
except TypeError:
_n_phases = 0
for j in range(_n_phases):
_test.set_phase_attributes(_phases[j], j)
self._request_test_data(_test.test_id)
return(_phases, _error_code)
def _request_test_data(self, test_id):
"""
Method to read the RTK Project database and retrieves all the test
records associated with the selected Test.
:param int test_id: the Growth Test ID to select data for.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_query = "SELECT fld_record_id, fld_failure_date, \
fld_left_interval, fld_right_interval, \
fld_quantity \
FROM rtk_survival_data \
WHERE fld_dataset_id={0:d} \
AND fld_source=1 \
ORDER BY fld_right_interval".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=False)
_test.dic_test_data = {}
try:
_n_records = len(_results)
except TypeError:
_n_records = 0
for i in range(_n_records):
_test.dic_test_data[i] = [_results[i][0], _results[i][1],
_results[i][2], _results[i][3],
_results[i][4]]
return(_results, _error_code)
def add_test(self, revision_id, assembly_id):
"""
Adds a new Test to the RTK Project for the selected Revision.
:param int revision_id: the Revision ID to add the new Test.
:param int assembly_id: the Assembly ID to add the new Test.
:return: (_test, _error_code)
:rtype: tuple
"""
_query = "INSERT INTO rtk_tests \
(fld_revision_id, fld_assembly_id, fld_name, fld_test_type) \
VALUES ({0:d}, {1:d}, 'Test Plan', 4)".format(revision_id,
assembly_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# If the new test was added successfully to the RTK Project database:
# 1. Retrieve the ID of the newly inserted test.
# 2. Add a single growth phase to the growth testing table.
# 3. Create a new Testing model instance.
# 4. Set the attributes of the new Testing model instance.
# 5. Add the new Testing model to the controller dictionary.
if _results:
self._last_id = self._dao.get_last_id('rtk_tests')[0]
(_results, _error_code) = self.add_test_phase(self._last_id)
_test = Model()
_test.set_attributes((revision_id, assembly_id, self._last_id, '',
'', 4, '', 0.0, 0.0, 0.75, 0.0, 0.0))
self.dicTests[_test.test_id] = _test
return(_test, _error_code)
def add_test_phase(self, test_id, phase_id=0):
"""
Adds a new test phase to the RTK Project for the selected Reliability
Growth test.
:param int test_id: the Test ID to add the new phase.
:param int phase_id: the Phase ID of the new phase to add.
:return: (_results, _error_code)
:rtype: tuple
"""
self._last_id = self._dao.get_last_id('rtk_tests')[0]
_query = "INSERT INTO rtk_growth_testing \
(fld_test_id, fld_phase_id) \
VALUES ({0:d}, {1:d})".format(test_id, phase_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def add_test_record(self, test_id, date, time, n_failures,
additional=False):
"""
Method to add a new record to the selected Reliability Growth test.
:param int test_id: the ID of the test to add the record to.
:param int date: the ordinal date of the failure(s).
:param float time: the operating time at failure.
:param int n_failures: the number of failures occurring at time.
:keyword bool additional: indicates whether or not the time is
cumulative.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_query = "SELECT MAX(fld_record_id), MAX(fld_right_interval) \
FROM rtk_survival_data \
WHERE fld_dataset_id={0:d} \
AND fld_source=1".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=False)
if _results[0][0] is None or _results[0][0] == '':
_last_id = 0
else:
_last_id = _results[0][0] + 1
if _results[0][1] is None or _results[0][1] == '':
_last_time = 0.0
else:
_last_time = float(_results[0][1])
if additional:
time = time + _last_time
_query = "INSERT INTO rtk_survival_data \
(fld_record_id, fld_dataset_id, fld_left_interval, \
fld_right_interval, fld_quantity, fld_mode_type, \
fld_failure_date, fld_source) \
VALUES ({0:d}, {1:d}, {2:f}, {3:f}, {4:d}, {5:d}, \
{6:d}, 1)".format(_last_id, test_id, 0.0,
time, n_failures, 0, date)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
try:
_id = max(_test.dic_test_data.keys()) + 1
except ValueError:
_id = 0
_test.dic_test_data[_id] = [_last_id, date, 0.0, time, n_failures]
return(_results, _error_code)
def delete_test(self, test_id):
"""
Deletes a Testing input from the RTK Project.
:param int test_id: the Test ID to delete the phase from.
:return: (_results, _error_code)
:rtype: tuple
"""
# Delete the phase information.
_query = "DELETE FROM rtk_growth_testing \
WHERE fld_test_id={0:d}".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# Then delete the growth test itself.
_query = "DELETE FROM rtk_tests \
WHERE fld_test_id={0:d}".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
self.dicTests.pop(test_id)
return(_results, _error_code)
def delete_test_phase(self, test_id, phase_id):
"""
Deletes the selected test phase from the RTK Project database.
:param int test_id: the Test ID to add the new phase.
:param int phase_id: the Phase ID to delete from the test.
:return: (_results, _error_code)
:rtype: tuple
"""
_query = "DELETE FROM rtk_growth_testing \
WHERE fld_test_id={0:d} \
AND fld_phase_id={1:d}".format(test_id, phase_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def delete_test_record(self, record_id, dataset_id):
"""
Method to delete a test record from the RTK Program database.
:param int record_id: the ID of the record to delete.
:param int dataset_id: the ID of the dataset to delete the record from.
:return: (_results, _error_code)
:rtype: tuple
"""
_query = "DELETE FROM rtk_survival_data \
WHERE fld_record_id={0:d} \
AND fld_dataset_id={1:d} \
AND fld_source=1".format(record_id, dataset_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def request_calculate(self, test_id, mtbf=True):
"""
Method to request the various calculate methods of the Reliability
Growth test data model.
:param int test_id: the ID of the test to calculate.
:keyword bool mtbf: indicates whether to calculate MTBF or failure
intensity values.
:return: (_ideal, _plan)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_ideal = _test.calculate_idealized_growth_curve()
if not _test.calculate_planned_growth_curve():
_plan = _test.create_planned_values(mtbf)
return(_ideal, _plan)
def request_assessment(self, test_id):
"""
Method to request the various methods to assess actual test data for
the Reliability Growth test data model.
:param int test_id: the ID of the test to assess.
:keyword bool mtbf: indicates whether to calculate MTBF or failure
intensity values.
:return: False if successful or True if an error is encountered
:rtype: bool
"""
_test = self.dicTests[test_id]
if len(_test.dic_test_data.values()) > 0:
_test.estimate_crow_amsaa()
_test.calculate_crow_amsaa_mean()
_test.assess_growth_rate()
_test.calculate_chi_square()
_test.calculate_cramer_vonmises()
return False
def save_test(self, test_id):
"""
Method to save the Reliability Growth Test attributes to the RTK
Project database.
:param int test_id: the ID of the Test to save.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
# Ensure confidence is stored as a fractional value.
if _test.confidence > 1.0:
_test.confidence = _test.confidence / 100.0
_query = "UPDATE rtk_tests \
SET fld_name='{1:s}', fld_description='{2:s}', \
fld_test_type={3:d}, fld_attachment='{4:s}', \
fld_cum_time={5:f}, fld_cum_failures={6:d}, \
fld_confidence={7:f}, fld_consumer_risk={8:f}, \
fld_producer_risk={9:f}, fld_plan_model={10:d}, \
fld_assess_model={11:d}, fld_tr={12:f}, fld_mg={13:f}, \
fld_mgp={14:f}, fld_num_phases={15:d}, fld_ttt={16:f}, \
fld_avg_growth={17:f}, fld_avg_ms={18:f}, \
fld_avg_fef={19:f}, fld_prob={20:f}, fld_ttff={21:f}, \
fld_grouped={22:d}, fld_group_interval={23:f}, \
fld_se_scale={24:f}, fld_se_shape={25:f}, \
fld_se_cum_mean={26:f}, fld_se_inst_mean={27:f}, \
fld_cramer_vonmises={28:f}, fld_chi_square={29:f}, \
fld_scale_ll={30:f}, fld_scale={31:f}, \
fld_scale_ul={32:f}, fld_shape_ll={33:f}, \
fld_shape={34:f}, fld_shape_ul={35:f}, \
fld_cum_mean_ll={36:f}, fld_cum_mean={37:f}, \
fld_cum_mean_ul={38:f}, fld_inst_mean_ll={39:f}, \
fld_inst_mean={40:f}, fld_inst_mean_ul={41:f} \
WHERE fld_test_id={0:d}".format(
_test.test_id, _test.name, _test.description,
_test.test_type, _test.attachment, _test.cum_time,
_test.cum_failures, _test.confidence,
_test.consumer_risk, _test.producer_risk,
_test.rg_plan_model, _test.rg_assess_model, _test.tr,
_test.mtbfg, _test.mtbfgp, _test.n_phases, _test.ttt,
_test.avg_growth, _test.avg_ms, _test.avg_fef,
_test.probability, _test.ttff, _test.grouped,
_test.group_interval, _test.se_scale, _test.se_shape,
_test.se_cum_mean, _test.se_inst_mean,
_test.cramer_vonmises, _test.chi_square,
_test.alpha_hat[0], _test.alpha_hat[1],
_test.alpha_hat[2], _test.beta_hat[0],
_test.beta_hat[1], _test.beta_hat[2],
_test.cum_mean[-1][0], _test.cum_mean[-1][1],
_test.cum_mean[-1][2], _test.instantaneous_mean[-1][0],
_test.instantaneous_mean[-1][1],
_test.instantaneous_mean[-1][2])
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# Save the phase-specific information.
for i in range(_test.n_phases):
_query = "UPDATE rtk_growth_testing \
SET fld_p_growth_rate={2:f}, fld_p_ms={3:f}, \
fld_p_fef_avg={4:f}, fld_p_prob={5:f}, \
fld_p_mi={6:f}, fld_p_mf={7:f}, fld_p_ma={8:f}, \
fld_p_test_time={9:f}, fld_p_num_fails={10:d}, \
fld_p_start_date={11:d}, fld_p_end_date={12:d}, \
fld_p_weeks={13:f}, fld_p_test_units={14:d}, \
fld_p_tpu={15:f}, fld_p_tpupw={16:f}, \
fld_o_ms={17:f}, fld_o_fef_avg={18:f}, \
fld_o_mi={19:f}, fld_o_mf={20:f}, fld_o_ma={21:f}, \
fld_o_ttff={22:f}, fld_i_mi={23:f}, \
fld_i_mf={24:f}, fld_i_ma={25:f}, \
fld_i_num_fails={26:d} \
WHERE fld_test_id={0:d} \
AND fld_phase_id={1:d}".format(
_test.test_id, i, _test.lst_p_growth_rate[i],
_test.lst_p_ms[i], _test.lst_p_fef[i],
_test.lst_p_prob[i], _test.lst_p_mtbfi[i],
_test.lst_p_mtbff[i], _test.lst_p_mtbfa[i],
_test.lst_p_test_time[i],
int(_test.lst_p_n_failures[i]),
_test.lst_p_start_date[i], _test.lst_p_end_date[i],
_test.lst_p_weeks[i], _test.lst_p_n_test_units[i],
_test.lst_p_tpu[i], _test.lst_p_tpupw[i],
_test.lst_o_ms[i], _test.lst_o_fef[i],
_test.lst_o_mtbfi[i], _test.lst_o_mtbff[i],
_test.lst_o_mtbfa[i], _test.ttff,
_test.lst_i_mtbfi[i], _test.lst_i_mtbff[i],
_test.lst_i_mtbfa[i], int(_test.lst_i_n_failures[i]))
(_results, _error_code, __) = self._dao.execute(_query,
commit=True)
return(_results, _error_code)
def save_test_data(self, test_id):
"""
Method to save the test data.
:param int test_id: the ID of the Test to save.
:return: (_results, _error_code)
:rtype: tuple
"""
_results = False
_error_code = 0
_test = self.dicTests[test_id]
# Save the actual test data.
for _key in _test.dic_test_data.keys():
_query = "UPDATE rtk_survival_data \
SET fld_failure_date={2:d}, fld_left_interval={3:f}, \
fld_right_interval={4:f}, fld_quantity={5:d} \
WHERE fld_dataset_id={0:d} \
AND fld_record_id={1:d} \
AND fld_source=1".format(
_test.test_id, _test.dic_test_data[_key][0],
_test.dic_test_data[_key][1],
_test.dic_test_data[_key][2],
_test.dic_test_data[_key][3],
_test.dic_test_data[_key][4])
(_results, _error_code, __) = self._dao.execute(_query,
commit=True)
return(_results, _error_code)
def save_all_tests(self):
"""
Method to save all Testing data models managed by the controller.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
for _test in self.dicTests.values():
(_results, _error_code) = self.save_test(_test.test_id)
(_results, _error_code) = self.save_test_data(_test.test_id)
return False
|
{"hexsha": "bdf422b82f8a0fd9c6a705e227b84bc07a8c1d30", "size": 48132, "ext": "py", "lang": "Python", "max_stars_repo_path": "rtk/testing/growth/Growth.py", "max_stars_repo_name": "rakhimov/rtk", "max_stars_repo_head_hexsha": "adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rtk/testing/growth/Growth.py", "max_issues_repo_name": "rakhimov/rtk", "max_issues_repo_head_hexsha": "adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rtk/testing/growth/Growth.py", "max_forks_repo_name": "rakhimov/rtk", "max_forks_repo_head_hexsha": "adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-03T04:14:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T05:30:35.000Z", "avg_line_length": 42.295254833, "max_line_length": 102, "alphanum_fraction": 0.5732153245, "include": true, "reason": "import numpy,from scipy", "num_tokens": 11476}
|
[STATEMENT]
lemma vector_inf_closed:
"vector x \<Longrightarrow> vector y \<Longrightarrow> vector (x \<sqinter> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>vector x; vector y\<rbrakk> \<Longrightarrow> vector (x \<sqinter> y)
[PROOF STEP]
by (simp add: vector_inf_comp)
|
{"llama_tokens": 105, "file": "Stone_Relation_Algebras_Relation_Algebras", "length": 1}
|
// Boost.Polygon library voronoi_structures_test.cpp file
// Copyright Andrii Sydorchuk 2010-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org for updates, documentation, and revision history.
#include <boost/core/lightweight_test.hpp>
#include <boost/polygon/detail/voronoi_structures.hpp>
#include <boost/polygon/voronoi_geometry_type.hpp>
#include <functional>
#include <vector>
using namespace boost::polygon::detail;
using namespace boost::polygon;
typedef point_2d<int> point_type;
typedef site_event<int> site_type;
typedef circle_event<int> circle_type;
typedef ordered_queue<int, std::greater<int> > ordered_queue_type;
typedef beach_line_node_key<int> node_key_type;
typedef beach_line_node_data<int, int> node_data_type;
void point_2d_test1()
{
point_type p(1, 2);
BOOST_TEST_EQ(1, p.x());
BOOST_TEST_EQ(2, p.y());
p.x(3);
BOOST_TEST_EQ(3, p.x());
p.y(4);
BOOST_TEST_EQ(4, p.y());
}
void site_event_test1()
{
site_type s(1, 2);
s.sorted_index(1);
s.initial_index(2);
s.source_category(SOURCE_CATEGORY_SEGMENT_START_POINT);
BOOST_TEST_EQ(1, s.x0());
BOOST_TEST_EQ(1, s.x1());
BOOST_TEST_EQ(2, s.y0());
BOOST_TEST_EQ(2, s.y1());
BOOST_TEST(s.is_point());
BOOST_TEST(!s.is_segment());
BOOST_TEST(!s.is_inverse());
BOOST_TEST_EQ(1, s.sorted_index());
BOOST_TEST_EQ(2, s.initial_index());
BOOST_TEST_EQ(SOURCE_CATEGORY_SEGMENT_START_POINT, s.source_category());
}
void site_event_test2()
{
site_type s(1, 2, 3, 4);
s.sorted_index(1);
s.initial_index(2);
s.source_category(SOURCE_CATEGORY_INITIAL_SEGMENT);
BOOST_TEST_EQ(1, s.x0());
BOOST_TEST_EQ(2, s.y0());
BOOST_TEST_EQ(3, s.x1());
BOOST_TEST_EQ(4, s.y1());
BOOST_TEST(!s.is_point());
BOOST_TEST(s.is_segment());
BOOST_TEST(!s.is_inverse());
BOOST_TEST_EQ(SOURCE_CATEGORY_INITIAL_SEGMENT, s.source_category());
s.inverse();
BOOST_TEST_EQ(3, s.x0());
BOOST_TEST_EQ(4, s.y0());
BOOST_TEST_EQ(1, s.x1());
BOOST_TEST_EQ(2, s.y1());
BOOST_TEST(s.is_inverse());
BOOST_TEST_EQ(SOURCE_CATEGORY_INITIAL_SEGMENT, s.source_category());
}
void circle_event_test()
{
circle_type c(0, 1, 2);
BOOST_TEST_EQ(0, c.x());
BOOST_TEST_EQ(1, c.y());
BOOST_TEST_EQ(2, c.lower_x());
BOOST_TEST_EQ(1, c.lower_y());
BOOST_TEST(c.is_active());
c.x(3);
c.y(4);
c.lower_x(5);
BOOST_TEST_EQ(3, c.x());
BOOST_TEST_EQ(4, c.y());
BOOST_TEST_EQ(5, c.lower_x());
BOOST_TEST_EQ(4, c.lower_y());
c.deactivate();
BOOST_TEST(!c.is_active());
}
void ordered_queue_test()
{
ordered_queue_type q;
BOOST_TEST(q.empty());
std::vector<int*> vi;
for (int i = 0; i < 20; ++i)
vi.push_back(&q.push(i));
for (int i = 0; i < 20; ++i)
*vi[i] <<= 1;
BOOST_TEST(!q.empty());
for (int i = 0; i < 20; ++i, q.pop())
BOOST_TEST_EQ(i << 1, q.top());
BOOST_TEST(q.empty());
}
void beach_line_node_key_test()
{
node_key_type key(1);
BOOST_TEST_EQ(1, key.left_site());
BOOST_TEST_EQ(1, key.right_site());
key.left_site(2);
BOOST_TEST_EQ(2, key.left_site());
BOOST_TEST_EQ(1, key.right_site());
key.right_site(3);
BOOST_TEST_EQ(2, key.left_site());
BOOST_TEST_EQ(3, key.right_site());
}
void beach_line_node_data_test()
{
node_data_type node_data(NULL);
BOOST_TEST(node_data.edge() == NULL);
BOOST_TEST(node_data.circle_event() == NULL);
int data = 4;
node_data.circle_event(&data);
BOOST_TEST(node_data.edge() == NULL);
BOOST_TEST(node_data.circle_event() == &data);
node_data.edge(&data);
BOOST_TEST(node_data.edge() == &data);
BOOST_TEST(node_data.circle_event() == &data);
}
int main()
{
point_2d_test1();
site_event_test1();
site_event_test2();
circle_event_test();
ordered_queue_test();
beach_line_node_key_test();
beach_line_node_data_test();
return boost::report_errors();
}
|
{"hexsha": "1693c317ee3774a2ff3839f739f7ee94073b7ba5", "size": 3962, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.71.0/libs/polygon/test/voronoi_structures_test.cpp", "max_stars_repo_name": "rajeev02101987/arangodb", "max_stars_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "3rdParty/boost/1.71.0/libs/polygon/test/voronoi_structures_test.cpp", "max_issues_repo_name": "rajeev02101987/arangodb", "max_issues_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "3rdParty/boost/1.71.0/libs/polygon/test/voronoi_structures_test.cpp", "max_forks_repo_name": "rajeev02101987/arangodb", "max_forks_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 892.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:44:30.000Z", "avg_line_length": 26.238410596, "max_line_length": 77, "alphanum_fraction": 0.691822312, "num_tokens": 1154}
|
import numpy as np
class RealignMatrix(object):
@staticmethod
def get_M_aligned_to_x(M, x):
x = x/np.linalg.norm(x)
z = np.cross(x, M[1][:3])
z = z/np.linalg.norm(z)
y = np.cross(z, x)
y = y/np.linalg.norm(y)
return np.array([x.tolist()+[0],y.tolist()+[0],z.tolist()+[0],M[3]], dtype=float)
@staticmethod
def get_M_aligned_to_y(M, y):
y = y/np.linalg.norm(y)
x = np.cross(y, M[2][:3])
x = x/np.linalg.norm(x)
z = np.cross(x, y)
z = z/np.linalg.norm(z)
return np.array([x.tolist()+[0],y.tolist()+[0],z.tolist()+[0],M[3]], dtype=float)
@staticmethod
def get_M_aligned_to_z(M, z):
z = z/np.linalg.norm(z)
y = np.cross(z, M[0][:3])
y = y/np.linalg.norm(y)
x = np.cross(y, z)
x = x/np.linalg.norm(x)
return np.array([x.tolist()+[0],y.tolist()+[0],z.tolist()+[0],M[3]], dtype=float)
def __init__(self, M, local_vectors):
I = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]], dtype=float)
#get the main axis of the vectors
self._aligned_axis = np.argmax(local_vectors*local_vectors, axis=1)
#compute all the local aligned matrix :
self._aligned_matrices = []
self._local_parent_matrices = []
for axis, vector in zip(self._aligned_axis, local_vectors):
matrix = [self.get_M_aligned_to_x, self.get_M_aligned_to_y, self.get_M_aligned_to_z][axis](M, vector)
matrix[3][:3] = M[3][:3]
self._aligned_matrices.append(np.dot(M, np.linalg.inv(matrix)))
def solve_from_local_vectors(self, M, local_vectors):
alignedMatrices = [
np.dot(
matrix,
[self.get_M_aligned_to_x, self.get_M_aligned_to_y, self.get_M_aligned_to_z][axis](M, vector)
)
for axis, matrix, vector in zip(self._aligned_axis, self._aligned_matrices, local_vectors)
]
result_matrix = alignedMatrices[0]
if len(alignedMatrices)>1:
result_matrix = np.sum(alignedMatrices, axis=0)
x = result_matrix[0][:3]
y = result_matrix[1][:3]
x = x/np.linalg.norm(x)
y = y/np.linalg.norm(y)
z = np.cross(x, y)
y = np.cross(z, x)
result_matrix = np.array([x.tolist()+[0],y.tolist()+[0],z.tolist()+[0],[0,0,0,1]], dtype=float)
result_matrix[3][:3] = M[3][:3]
return result_matrix
|
{"hexsha": "e75028e8d2c286d79274c768d99e4ab79e4ee14b", "size": 2576, "ext": "py", "lang": "Python", "max_stars_repo_path": "new_challenge_old_research/realign_matrix.py", "max_stars_repo_name": "JeromeEippers/python_rnd_collection", "max_stars_repo_head_hexsha": "8383a9759197cfd4c560792f0f06ba981bb1f933", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "new_challenge_old_research/realign_matrix.py", "max_issues_repo_name": "JeromeEippers/python_rnd_collection", "max_issues_repo_head_hexsha": "8383a9759197cfd4c560792f0f06ba981bb1f933", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "new_challenge_old_research/realign_matrix.py", "max_forks_repo_name": "JeromeEippers/python_rnd_collection", "max_forks_repo_head_hexsha": "8383a9759197cfd4c560792f0f06ba981bb1f933", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-17T08:40:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-17T08:40:08.000Z", "avg_line_length": 36.8, "max_line_length": 113, "alphanum_fraction": 0.5415372671, "include": true, "reason": "import numpy", "num_tokens": 722}
|
import copy
from datetime import datetime
import pandas as pd
import numpy as np
import warnings
from .events import UnplugEvent
from .interface import Interface, InvalidScheduleError
class Simulator:
""" Central class of the acnsim package.
The Simulator class is the central place where everything about a particular simulation is stored including the
network, scheduling algorithm, and events. It is also where timekeeping is done and orchestrates calling the
scheduling algorithm, sending pilots to the network, and updating the energy delivered to each EV.
Args:
network (ChargingNetwork): The charging network which the simulation will use.
scheduler (BasicAlgorithm): The scheduling algorithm used in the simulation.
events (EventQueue): Queue of events which will occur in the simulation.
start (datetime): Date and time of the first period of the simulation.
period (int): Length of each time interval in the simulation in minutes. Default: 1
signals (Dict[str, ...]):
store_schedule_history (bool): If True, store the scheduler output each time it is run. Note this can use lots
of memory for long simulations.
"""
def __init__(self, network, scheduler, events, start, period=1, signals=None,
store_schedule_history=False, verbose=True):
self.network = network
self.scheduler = scheduler
self.scheduler.register_interface(Interface(self))
self.event_queue = events
self.start = start
self.period = period
self.max_recompute = scheduler.max_recompute
self.signals = signals
self.verbose = verbose
# Information storage
self.pilot_signals = np.zeros((len(self.network.station_ids), self.event_queue.get_last_timestamp() + 1))
self.charging_rates = np.zeros((len(self.network.station_ids), self.event_queue.get_last_timestamp() + 1))
self.peak = 0
self.ev_history = {}
self.event_history = []
if store_schedule_history:
self.schedule_history = {}
else:
self.schedule_history = None
# Local Variables
self._iteration = 0
self._resolve = False
self._last_schedule_update = 0
@property
def iteration(self):
return self._iteration
def run(self):
""" Run the simulation until the event queue is empty.
The run function is the heart of the simulator. It triggers all actions and keeps the simulator moving forward.
Its actions are (in order):
1. Get current events from the event queue and execute them.
2. If necessary run the scheduling algorithm.
3. Send pilot signals to the network.
4. Receive back actual charging rates from the network and store the results.
Returns:
None
"""
while not self.event_queue.empty():
current_events = self.event_queue.get_current_events(self._iteration)
for e in current_events:
self.event_history.append(e)
self._process_event(e)
if self._resolve or \
self.max_recompute is not None and \
self._iteration - self._last_schedule_update >= self.max_recompute:
new_schedule = self.scheduler.run()
self._update_schedules(new_schedule)
if self.schedule_history is not None:
self.schedule_history[self._iteration] = new_schedule
self._last_schedule_update = self._iteration
self._resolve = False
self.network.update_pilots(self.pilot_signals, self._iteration, self.period)
self._store_actual_charging_rates()
self._iteration = self._iteration + 1
def get_active_evs(self):
""" Return all EVs which are plugged in and not fully charged at the current time.
Wrapper for self.network.active_evs. See its documentation for more details.
Returns:
List[EV]: List of all EVs which are plugged in but not fully charged at the current time.
"""
evs = copy.deepcopy(self.network.active_evs)
return evs
def _process_event(self, event):
""" Process an event and take appropriate actions.
Args:
event (Event): Event to be processed.
Returns:
None
"""
if event.type == 'Plugin':
self._print('Plugin Event...')
self.network.plugin(event.ev, event.ev.station_id)
self.ev_history[event.ev.session_id] = event.ev
self.event_queue.add_event(UnplugEvent(event.ev.departure, event.ev.station_id, event.ev.session_id))
self._resolve = True
self._last_schedule_update = event.timestamp
elif event.type == 'Unplug':
self._print('Unplug Event...')
self.network.unplug(event.station_id)
self._resolve = True
self._last_schedule_update = event.timestamp
elif event.type == 'Recompute':
self._print('Recompute Event...')
self._resolve = True
def _update_schedules(self, new_schedule):
""" Extend the current self.pilot_signals with the new pilot signal schedule.
Args:
new_schedule (Dict[str, List[number]]): Dictionary mappding station ids to a schedule of pilot signals.
Returns:
None
Raises:
KeyError: Raised when station_id is in the new_schedule but not registered in the Network.
"""
if len(new_schedule) == 0:
return
for station_id in new_schedule:
if station_id not in self.network.station_ids:
raise KeyError('Station {0} in schedule but not found in network.'.format(station_id))
schedule_lengths = set(len(x) for x in new_schedule.values())
if len(schedule_lengths) > 1:
raise InvalidScheduleError('All schedules should have the same length.')
schedule_length = schedule_lengths.pop()
schedule_matrix = np.array([new_schedule[evse_id] if evse_id in new_schedule else [0] * schedule_length for evse_id in self.network.station_ids])
if not self.network.is_feasible(schedule_matrix):
warnings.warn("Invalid schedule provided at iteration {0}".format(self._iteration), UserWarning)
if self._iteration + schedule_length <= len(self.pilot_signals[0]):
self.pilot_signals[:, self._iteration:(self._iteration + schedule_length)] = schedule_matrix
else:
# We've reached the end of pilot_signals, so double pilot_signal array width
self.pilot_signals = _increase_width(self.pilot_signals,
max(self.event_queue.get_last_timestamp() + 1, self._iteration + schedule_length))
self.pilot_signals[:, self._iteration:(self._iteration + schedule_length)] = schedule_matrix
def _store_actual_charging_rates(self):
""" Store actual charging rates from the network in the simulator for later analysis."""
current_rates = self.network.current_charging_rates
agg = np.sum(current_rates)
if self.iteration < len(self.charging_rates[0]):
self.charging_rates[:, self.iteration] = current_rates.T
else:
self.charging_rates = _increase_width(self.charging_rates, self.event_queue.get_last_timestamp() + 1)
self.charging_rates[:, self._iteration] = current_rates.T
self.peak = max(self.peak, agg)
def _print(self, s):
if self.verbose:
print(s)
def charging_rates_as_df(self):
""" Return the charging rates as a pandas DataFrame, with EVSE id as columns
and iteration as index.
"""
return pd.DataFrame(data=self.charging_rates.T, columns=self.network.station_ids)
def pilot_signals_as_df(self):
""" Return the pilot signals as a pandas DataFrame """
return pd.DataFrame(data=self.pilot_signals.T, columns=self.network.station_ids)
def index_of_evse(self, station_id):
""" Return the numerical index of the EVSE given by station_id in the (ordered) dictionary
of EVSEs.
"""
if station_id not in self.network.station_ids:
raise KeyError("EVSE {0} not found in network.".format(station_id))
return self.network.station_ids.index(station_id)
def _increase_width(a, target_width):
""" Returns a new 2-D numpy array with target_width number of columns, with the contents
of a up to the first len(a[0]) columns and 0's thereafter.
Args:
a (numpy.Array): 2-D numpy array to be expanded.
target_width (int): desired number of columns; must be greater than number of columns in a
Returns:
numpy.Array
"""
new_matrix = np.zeros((len(a), target_width))
new_matrix[:, :len(a[0])] = a
return new_matrix
|
{"hexsha": "86c23b987269dddc53aede242793c42c6b0fae6d", "size": 9036, "ext": "py", "lang": "Python", "max_stars_repo_path": "acnportal/acnsim/simulator.py", "max_stars_repo_name": "irasus-technologies/acnportal", "max_stars_repo_head_hexsha": "f6ac7b9ddb28ab48177c51a676f1619e88ea91e0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "acnportal/acnsim/simulator.py", "max_issues_repo_name": "irasus-technologies/acnportal", "max_issues_repo_head_hexsha": "f6ac7b9ddb28ab48177c51a676f1619e88ea91e0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "acnportal/acnsim/simulator.py", "max_forks_repo_name": "irasus-technologies/acnportal", "max_forks_repo_head_hexsha": "f6ac7b9ddb28ab48177c51a676f1619e88ea91e0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0285714286, "max_line_length": 153, "alphanum_fraction": 0.6544931386, "include": true, "reason": "import numpy", "num_tokens": 1849}
|
"""Adopted from https://github.com/DylanWusee/pointconv_pytorch/blob/master/model/pointconv.py"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(
device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, C]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(
device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(
sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
def sample_and_group(npoint, nsample, xyz, points, density_scale=None):
"""
Input:
npoint:
nsample:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, C]
new_points: sampled points data, [B, 1, N, C+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
new_xyz = index_points(xyz, fps_idx)
idx = knn_point(nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
# [B, npoint, nsample, C+D]
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1)
else:
new_points = grouped_xyz_norm
if density_scale is None:
return new_xyz, new_points, grouped_xyz_norm, idx
else:
grouped_density = index_points(density_scale, idx)
return new_xyz, new_points, grouped_xyz_norm, idx, grouped_density
def sample_and_group_all(xyz, points, density_scale=None):
"""
Input:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, C]
new_points: sampled points data, [B, 1, N, C+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = xyz.mean(dim=1, keepdim=True)
grouped_xyz = xyz.view(B, 1, N, C) - new_xyz.view(B, 1, 1, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
if density_scale is None:
return new_xyz, new_points, grouped_xyz
else:
grouped_density = density_scale.view(B, 1, N, 1)
return new_xyz, new_points, grouped_xyz, grouped_density
def group(nsample, xyz, points):
"""
Input:
npoint:
nsample:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, C]
new_points: sampled points data, [B, 1, N, C+D]
"""
B, N, C = xyz.shape
S = N
new_xyz = xyz
idx = knn_point(nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
# [B, npoint, nsample, C+D]
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1)
else:
new_points = grouped_xyz_norm
return new_points, grouped_xyz_norm
def compute_density(xyz, bandwidth):
'''
xyz: input points position data, [B, N, C]
'''
B, N, C = xyz.shape
sqrdists = square_distance(xyz, xyz)
gaussion_density = torch.exp(- sqrdists /
(2.0 * bandwidth * bandwidth)) / (2.5 * bandwidth)
xyz_density = gaussion_density.mean(dim=-1)
return xyz_density
class DensityNet(nn.Module):
def __init__(self, hidden_unit=[8, 8]):
super(DensityNet, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
self.mlp_convs.append(nn.Conv1d(1, hidden_unit[0], 1))
self.mlp_bns.append(nn.BatchNorm1d(hidden_unit[0]))
for i in range(1, len(hidden_unit)):
self.mlp_convs.append(
nn.Conv1d(hidden_unit[i - 1], hidden_unit[i], 1))
self.mlp_bns.append(nn.BatchNorm1d(hidden_unit[i]))
self.mlp_convs.append(nn.Conv1d(hidden_unit[-1], 1, 1))
self.mlp_bns.append(nn.BatchNorm1d(1))
def forward(self, xyz_density):
B, N = xyz_density.shape
density_scale = xyz_density.unsqueeze(1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
density_scale = bn(conv(density_scale))
if i == len(self.mlp_convs):
density_scale = F.sigmoid(density_scale) + 0.5
else:
density_scale = F.relu(density_scale)
return density_scale
class WeightNet(nn.Module):
def __init__(self, in_channel, out_channel, hidden_unit=[8, 8]):
super(WeightNet, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
if hidden_unit is None or len(hidden_unit) == 0:
self.mlp_convs.append(nn.Conv2d(in_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
else:
self.mlp_convs.append(nn.Conv2d(in_channel, hidden_unit[0], 1))
self.mlp_bns.append(nn.BatchNorm2d(hidden_unit[0]))
for i in range(1, len(hidden_unit)):
self.mlp_convs.append(
nn.Conv2d(hidden_unit[i - 1], hidden_unit[i], 1))
self.mlp_bns.append(nn.BatchNorm2d(hidden_unit[i]))
self.mlp_convs.append(nn.Conv2d(hidden_unit[-1], out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
def forward(self, localized_xyz):
# xyz : BxCxKxN
weights = localized_xyz
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
weights = F.relu(bn(conv(weights)))
return weights
class PointConvSetAbstraction(nn.Module):
def __init__(self, npoint, nsample, in_channel, mlp, group_all):
super(PointConvSetAbstraction, self).__init__()
self.npoint = npoint
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.weightnet = WeightNet(3, 16)
self.linear = nn.Linear(16 * mlp[-1], mlp[-1])
self.bn_linear = nn.BatchNorm1d(mlp[-1])
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
B = xyz.shape[0]
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points, grouped_xyz_norm = sample_and_group_all(
xyz, points)
else:
new_xyz, new_points, grouped_xyz_norm, _ = sample_and_group(
self.npoint, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
grouped_xyz = grouped_xyz_norm.permute(0, 3, 2, 1)
weights = self.weightnet(grouped_xyz)
new_points = torch.matmul(input=new_points.permute(
0, 3, 1, 2), other=weights.permute(0, 3, 2, 1)).view(B, self.npoint, -1)
new_points = self.linear(new_points)
new_points = self.bn_linear(new_points.permute(0, 2, 1))
new_points = F.relu(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointConvDensitySetAbstraction(nn.Module):
def __init__(self, npoint, nsample, in_channel, mlp, bandwidth, group_all):
super(PointConvDensitySetAbstraction, self).__init__()
self.npoint = npoint
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.weightnet = WeightNet(3, 16)
self.linear = nn.Linear(16 * mlp[-1], mlp[-1])
self.bn_linear = nn.BatchNorm1d(mlp[-1])
self.densitynet = DensityNet()
self.group_all = group_all
self.bandwidth = bandwidth
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
B = xyz.shape[0]
N = xyz.shape[2]
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
xyz_density = compute_density(xyz, self.bandwidth)
density_scale = self.densitynet(xyz_density)
if self.group_all:
new_xyz, new_points, grouped_xyz_norm, grouped_density = sample_and_group_all(
xyz, points, density_scale.view(B, N, 1))
else:
new_xyz, new_points, grouped_xyz_norm, _, grouped_density = sample_and_group(
self.npoint, self.nsample, xyz, points, density_scale.view(B, N, 1))
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
grouped_xyz = grouped_xyz_norm.permute(0, 3, 2, 1)
weights = self.weightnet(grouped_xyz)
new_points = new_points * grouped_density.permute(0, 3, 2, 1)
new_points = torch.matmul(input=new_points.permute(
0, 3, 1, 2), other=weights.permute(0, 3, 2, 1)).view(B, self.npoint, -1)
new_points = self.linear(new_points)
new_points = self.bn_linear(new_points.permute(0, 2, 1))
new_points = F.relu(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointConvDensityClsSsg(nn.Module):
def __init__(self, num_classes=40):
super(PointConvDensityClsSsg, self).__init__()
self.sa1 = PointConvDensitySetAbstraction(npoint=512, nsample=32, in_channel=3, mlp=[
64, 64, 128], bandwidth=0.1, group_all=False)
self.sa2 = PointConvDensitySetAbstraction(
npoint=128, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], bandwidth=0.2, group_all=False)
self.sa3 = PointConvDensitySetAbstraction(
npoint=1, nsample=None, in_channel=256 + 3, mlp=[256, 512, 1024], bandwidth=0.4, group_all=True)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 256)
self.bn2 = nn.BatchNorm1d(256)
self.drop2 = nn.Dropout(0.4)
self.fc3 = nn.Linear(256, num_classes)
def forward(self, xyz):
B, _, _ = xyz.shape
l1_xyz, l1_points = self.sa1(xyz, None)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
x = l3_points.view(B, 1024)
x = self.drop1(F.relu(self.bn1(self.fc1(x))))
x = self.drop2(F.relu(self.bn2(self.fc2(x))))
x = self.fc3(x)
return x
|
{"hexsha": "13ca4e6c4202ca7697c095ec6d3209646ed79c6c", "size": 15423, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/model/pointconv.py", "max_stars_repo_name": "code-roamer/IF-Defense", "max_stars_repo_head_hexsha": "4e2462b66fa1eac90cfbf61fa0dc635d223fdf2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2020-10-07T05:52:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T03:05:32.000Z", "max_issues_repo_path": "baselines/model/pointconv.py", "max_issues_repo_name": "code-roamer/IF-Defense", "max_issues_repo_head_hexsha": "4e2462b66fa1eac90cfbf61fa0dc635d223fdf2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-01-04T02:11:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-23T16:21:59.000Z", "max_forks_repo_path": "baselines/model/pointconv.py", "max_forks_repo_name": "Wuziyi616/IF-Defense", "max_forks_repo_head_hexsha": "4b1d69d03d76e8d5ca1b4d45f81a8c9c60791263", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-29T02:13:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T08:19:16.000Z", "avg_line_length": 36.4609929078, "max_line_length": 108, "alphanum_fraction": 0.6020229527, "include": true, "reason": "import numpy", "num_tokens": 4379}
|
import numpy as np
import scipy.spatial.distance as d
import matplotlib.pyplot as plt
# Helper Functions
def qsort(a, i):
return sorted(a, key = lambda arr: arr[i])
def search(a, pos, value_start, value_end):
'''
Search for a value within ordered lists.
Never used directly -> helper of helper.
'''
if len(a)<1:
return []
empty = []
i = 0
for x in a:
if x[pos] < value_end and x[pos]>=value_start:
empty.append(x)
i+=1
elif x[pos]<value_start:
i+=1
else:
return empty
return empty
def density(arr, depth, width, points_d, points_w ):
'''
Split region into smaller rectangles -> get density in each
rectangle
'''
# arr is a depth sorted array
# depth, width are the image dimension
# points_d, points_w are the number of points across depth and width.
density = []
depths = np.linspace(0, depth, points_d, endpoint = True)
depths = depths.astype(int)
widths = np.linspace(0, width, points_w, endpoint = True)
widths = widths.astype(int)
for i in range(len(depths)-1):
a = search(arr, 0, depths[i], depths[i+1])
b = qsort(a,1)
for j in range(len(widths)-1):
c = search(b, 1, widths[j], widths[j+1])
density.append(len(c))
return density
def hist(array, block, title, xlab, ylab):
'''
Generate histogram from given data in array
putting it into bins (#bins = block) with the given
title and labels
'''
hist, bins = np.histogram(array, bins=block)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
return plt.bar(center, hist, align='center', width=width)
def force_init_hex(n, radius, limit_x, limit_y):
x_range = np.arange(radius, limit_x, 2*radius)
y_range = np.arange(radius, limit_y, 2*radius)
i = 0
shift = 1* radius
positions = []
while i<n:
for y in y_range:
for x in x_range:
positions.append([float(x+shift),float(y)])
i+=1
if shift == 1* radius:
shift = 0.
else:
shift = 1*radius
positions = positions[:n]
return np.array(positions)
def force_init(n, radius, limit_x, limit_y):
x_range = np.arange(radius, limit_x, 2*radius)
y_range = np.arange(radius, limit_y, 2*radius)
i = 0
positions = []
while i<n:
for y in y_range:
for x in x_range:
positions.append([float(x),float(y)])
i+=1
positions = positions[:n]
return np.array(positions)
def entropy(centers):
'''
Calculates natural length scale of balls
Sets entropy to be std.dev in natural length scale
normalized against natural length scale
'''
dist = []
N = len(centers)
for i in range(N):
ind1 = np.random.randint(0,N)
ind2 = np.random.randint(0,N)
distance = (centers[ind1][0]-centers[ind2][0])**2 + (centers[ind1][1]-centers[ind2][1])**2
distance = distance**0.5
dist.append(distance)
norm = float(sum(dist))/float(len(dist))
entropy = np.std(dist)/norm
return dist, entropy
def profile(arr, depth, j):
'''
Returns density profile along j'th axis
j = 0 = x
j = 1 = y
'''
# arr is a depth sorted array
# depth/width is the profile direction
# points_d is the number of points across depth
profile = []
step = 1.
current = 0.
window = 10.
depths = []
while current+window<depth:
a = search(arr, j, current, current+window)
profile.append(len(a))
current += step
depths.append(current+window/2.)
return depths, profile
|
{"hexsha": "4a9a2e4228ad6580d642558e7729f7daaf1ad65c", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "helpers.py", "max_stars_repo_name": "DiogoRibeiro7/Physics", "max_stars_repo_head_hexsha": "f10e2df956055f498643490744131c34dbaccdc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "helpers.py", "max_issues_repo_name": "DiogoRibeiro7/Physics", "max_issues_repo_head_hexsha": "f10e2df956055f498643490744131c34dbaccdc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helpers.py", "max_forks_repo_name": "DiogoRibeiro7/Physics", "max_forks_repo_head_hexsha": "f10e2df956055f498643490744131c34dbaccdc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.496350365, "max_line_length": 92, "alphanum_fraction": 0.6527340395, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1023}
|
import numpy as np
import os
import torch
from torch import nn
from blocks import LinearBlock, Conv2dBlock, ResBlocks, ActFirstResBlock
from vgg_tro_channel3_modi import vgg19_bn
from recognizer.models.encoder_vgg import Encoder as rec_encoder
from recognizer.models.decoder import Decoder as rec_decoder
from recognizer.models.seq2seq import Seq2Seq as rec_seq2seq
from recognizer.models.attention import locationAttention as rec_attention
from load_data import OUTPUT_MAX_LEN, IMG_HEIGHT, IMG_WIDTH, vocab_size, index2letter, num_tokens
import cv2
gpu = torch.device('cuda')
def normalize(tar):
tar = (tar - tar.min())/(tar.max()-tar.min())
tar = tar * 255
tar = tar.astype(np.uint8)
return tar
def fine(label_list):
if type(label_list) != type([]):
return [label_list]
else:
return label_list
def write_image(imga, imgb, xg_list, pred_list, label_list, title):
folder = 'imgs'
if not os.path.exists(folder):
os.makedirs(folder)
batch_size = imga.shape[0]
imga = imga.cpu().numpy()
imgb = imgb.detach().cpu().numpy()
for i in range(len(xg_list)):
xg_list[i] = xg_list[i].cpu().numpy()
for i in range(len(pred_list)):
pred_list[i] = torch.topk(pred_list[i], 1, dim=-1)[1].squeeze(-1) # b,t,83 -> b,t,1 -> b,t
pred_list[i] = pred_list[i].cpu().numpy()
for i in range(len(label_list)):
label_list[i] = label_list[i].cpu().numpy()
outs = list()
for i in range(batch_size):
imgaa = imga[i].squeeze(0)
imgaa = normalize(imgaa)
imgbb = imgb[i].squeeze(0)
imgbb = normalize(imgbb)
new_xg = []
new_pred = []
new_label = []
for xg in xg_list:
new_xg.append(normalize(xg[i].squeeze(0)))
for pred in pred_list:
tmp_pred = fine(pred[i].tolist())
for j in range(num_tokens):
tmp_pred = list(filter(lambda x: x!=j, tmp_pred))
tmp_pred = ''.join([index2letter[c-num_tokens] for c in tmp_pred])
pred_img = np.zeros_like(imgaa)
cv2.putText(pred_img, tmp_pred, (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
new_pred.append(pred_img)
for label in label_list:
tmp_label = fine(label[i].tolist())
for j in range(num_tokens):
tmp_label = list(filter(lambda x: x!=j, tmp_label))
tmp_label = ''.join([index2letter[c-num_tokens] for c in tmp_label])
label_img = np.zeros_like(imgaa)
cv2.putText(label_img, tmp_label, (5, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
new_label.append(label_img)
triples = zip(new_xg, new_pred, new_label)
final_triples = []
for triple in triples:
final_triples.append(np.vstack(triple))
final_triples_out = np.vstack(final_triples)
out = np.vstack([imgaa, imgbb, final_triples_out])
out = 255 - out
outs.append(out)
final_out = np.hstack(outs)
cv2.imwrite(folder+'/'+title+'.png', final_out)
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class DisModel(nn.Module):
def __init__(self):
super(DisModel, self).__init__()
self.n_layers = 6
self.final_size = 1024
nf = 16
cnn_f = [Conv2dBlock(1, nf, 7, 1, 3,
pad_type='reflect',
norm='none',
activation='none')]
for i in range(self.n_layers - 1):
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_f += [nn.ReflectionPad2d(1)]
cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
nf = np.min([nf * 2, 1024])
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_c = [Conv2dBlock(nf_out, self.final_size, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1,
norm='none',
activation='lrelu',
activation_first=True)]
self.cnn_f = nn.Sequential(*cnn_f)
self.cnn_c = nn.Sequential(*cnn_c)
self.bce = nn.BCEWithLogitsLoss()
def forward(self, x):
feat = self.cnn_f(x)
out = self.cnn_c(feat)
return out.squeeze(-1).squeeze(-1) # b,1024 maybe b is also 1, so cannnot out.squeeze()
def calc_dis_fake_loss(self, input_fake):
label = torch.zeros(input_fake.shape[0], self.final_size).to(gpu)
resp_fake = self.forward(input_fake)
fake_loss = self.bce(resp_fake, label)
return fake_loss
def calc_dis_real_loss(self, input_real):
label = torch.ones(input_real.shape[0], self.final_size).to(gpu)
resp_real = self.forward(input_real)
real_loss = self.bce(resp_real, label)
return real_loss
def calc_gen_loss(self, input_fake):
label = torch.ones(input_fake.shape[0], self.final_size).to(gpu)
resp_fake = self.forward(input_fake)
fake_loss = self.bce(resp_fake, label)
return fake_loss
class WriterClaModel(nn.Module):
def __init__(self, num_writers):
super(WriterClaModel, self).__init__()
self.n_layers = 6
nf = 16
cnn_f = [Conv2dBlock(1, nf, 7, 1, 3,
pad_type='reflect',
norm='none',
activation='none')]
for i in range(self.n_layers - 1):
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_f += [nn.ReflectionPad2d(1)]
cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
nf = np.min([nf * 2, 1024])
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_c = [Conv2dBlock(nf_out, num_writers, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1,
norm='none',
activation='lrelu',
activation_first=True)]
self.cnn_f = nn.Sequential(*cnn_f)
self.cnn_c = nn.Sequential(*cnn_c)
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, x, y):
feat = self.cnn_f(x)
out = self.cnn_c(feat) # b,310,1,1
loss = self.cross_entropy(out.squeeze(-1).squeeze(-1), y)
return loss
'''VGG19_IN tro'''
class ImageEncoder(nn.Module):
def __init__(self):
super(ImageEncoder, self).__init__()
self.model = vgg19_bn(False)
self.output_dim = 512
def forward(self, x):
return self.model(x)
class GenModel_FC(nn.Module):
def __init__(self, text_max_len):
super(GenModel_FC, self).__init__()
self.enc_image = ImageEncoder().to(gpu)
self.rec = RecModel().to(gpu)
self.enc_text = self.rec.enc
self.dec = Decoder().to(gpu)
self.linear_mix = nn.Linear(1024, 512)
def decode(self, content, adain_params):
# decode content and style codes to an image
assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def mix(self, feat_xs, feat_embed):
feat_mix = torch.cat([feat_xs, feat_embed], dim=1) # b,1024,8,27
f = feat_mix.permute(0, 2, 3, 1)
ff = self.linear_mix(f) # b,8,27,1024->b,8,27,512
return ff.permute(0, 3, 1, 2)
class RecModel(nn.Module):
def __init__(self, pretrain=False):
super(RecModel, self).__init__()
hidden_size_enc = hidden_size_dec = 512
embed_size = 60
self.enc = rec_encoder(hidden_size_enc, IMG_HEIGHT, IMG_WIDTH, True, None, False).to(gpu)
self.dec = rec_decoder(hidden_size_dec, embed_size, vocab_size, rec_attention, None).to(gpu)
self.seq2seq = rec_seq2seq(self.enc, self.dec, OUTPUT_MAX_LEN, vocab_size).to(gpu)
if pretrain:
model_file = 'recognizer/save_weights/seq2seq-72.model_5.79.bak'
print('Loading RecModel', model_file)
self.seq2seq.load_state_dict(torch.load(model_file))
# mode: image o feature
def forward(self, inp, label, img_width, mode):
self.seq2seq.train()
output, attn_weights = self.seq2seq(inp, label, img_width, mode, teacher_rate=False, train=False)
return output.permute(1, 0, 2) # t,b,83->b,t,83
class TextEncoder_FC(nn.Module):
def __init__(self, text_max_len):
super(TextEncoder_FC, self).__init__()
embed_size = 64
self.embed = nn.Embedding(vocab_size, embed_size)
self.fc = nn.Sequential(
nn.Linear(text_max_len*embed_size, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 2048),
nn.BatchNorm1d(2048),
nn.ReLU(inplace=True),
nn.Linear(2048, 4096)
)
'''embed content force'''
self.linear = nn.Linear(embed_size, 512)
def forward(self, x):
xx = self.embed(x) # b,t,embed
batch_size = xx.shape[0]
xxx = xx.reshape(batch_size, -1) # b,t*embed
out = self.fc(xxx)
'''embed content force'''
xx_new = self.linear(xx) # b,9,512
ts = xx_new.shape[1]
tensor_list = list()
for i in range(ts):
# hard code mierda!!! 3=27/9 img:b,512,8,27 text:b,9,512
tmp = torch.cat([xx_new[:, i:i+1]]*3, dim=1)
tensor_list.append(tmp)
res = torch.cat(tensor_list, dim=1) # b,3*9,512
res = res.permute(0, 2, 1).unsqueeze(2) # b,512,1,3*9
final_res = torch.cat([res]*8, dim=2) # hard code mierda!!!
return out, final_res
class Decoder(nn.Module):
def __init__(self, ups=3, n_res=2, dim=512, out_dim=1, res_norm='adain', activ='relu', pad_type='reflect'):
super(Decoder, self).__init__()
self.model = []
self.model += [ResBlocks(n_res, dim, res_norm,
activ, pad_type=pad_type)]
for i in range(ups):
self.model += [nn.Upsample(scale_factor=2),
Conv2dBlock(dim, dim // 2, 5, 1, 2,
norm='in',
activation=activ,
pad_type=pad_type)]
dim //= 2
self.model += [Conv2dBlock(dim, out_dim, 7, 1, 3,
norm='none',
activation='tanh',
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, in_dim=64, out_dim=4096, dim=256, n_blk=3, norm='none', activ='relu'):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(in_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, out_dim,
norm='none', activation='none')]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
|
{"hexsha": "49a1d76149f594456a840c5a413e56508704e691", "size": 12529, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules_tro.py", "max_stars_repo_name": "omni-us/ContentDistillation_HTR", "max_stars_repo_head_hexsha": "be5af3cbc3a49dc5febf9b57480257faa42c7272", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-06T15:21:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T11:14:48.000Z", "max_issues_repo_path": "modules_tro.py", "max_issues_repo_name": "omni-us/ContentDistillation_HTR", "max_issues_repo_head_hexsha": "be5af3cbc3a49dc5febf9b57480257faa42c7272", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-09T05:31:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-09T05:31:43.000Z", "max_forks_repo_path": "modules_tro.py", "max_forks_repo_name": "omni-us/ContentDistillation_HTR", "max_forks_repo_head_hexsha": "be5af3cbc3a49dc5febf9b57480257faa42c7272", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-12T06:32:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T06:32:50.000Z", "avg_line_length": 38.9099378882, "max_line_length": 126, "alphanum_fraction": 0.5786575146, "include": true, "reason": "import numpy", "num_tokens": 3320}
|
# -*- coding: utf-8 -*-
# pylint: skip-file
"""reV SAM unit test module
"""
import os
from pkg_resources import get_distribution
from packaging import version
import pytest
import numpy as np
import pandas as pd
import warnings
from reV.SAM.defaults import (DefaultPvWattsv5, DefaultPvWattsv7,
DefaultWindPower)
from reV.SAM.generation import PvWattsv5, PvWattsv7
from reV import TESTDATADIR
from reV.config.project_points import ProjectPoints
from reV.SAM.version_checker import PySamVersionChecker
from reV.utilities.exceptions import PySAMVersionWarning
from reV.utilities.exceptions import InputError
from rex.renewable_resource import NSRDB
@pytest.fixture
def res():
"""Initialize a SAM resource object to test SAM functions on."""
res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_2012.h5'
rev2_points = TESTDATADIR + '/project_points/ri.csv'
sam_files = [TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json',
TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json']
sam_files = {'sam_param_{}'.format(i): k for i, k in
enumerate(sam_files)}
pp = ProjectPoints(rev2_points, sam_files, 'pv')
res = NSRDB.preload_SAM(res_file, pp.sites)
return res
def test_res_length(res):
"""Test the method to ensure resource array length with truncation."""
for res_df, meta in res:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_dropped = PvWattsv5.ensure_res_len(res_df.values, res_df.index)
break
compare = np.allclose(res_dropped[:9000, :], res_df.values[:9000, :])
def test_leap_year(res):
"""Test the method to ensure resource array length with dropping leap day.
"""
for res_df, meta in res:
res_dropped = PvWattsv5.drop_leap(res_df)
break
compare = np.allclose(res_dropped.iloc[-9000:, :].values,
res_df.iloc[-9000:, :].values)
assert compare
def test_leap_year_freq():
"""
Test ensure_res_len with leap year data
"""
time_index = pd.date_range('2012', '2013', freq='h', closed='left')
arr = np.arange(len(time_index) * 10).reshape(len(time_index), 10)
out = PvWattsv5.ensure_res_len(arr, time_index)
assert np.allclose(arr[:8760], out)
mask = time_index.month == 2
mask &= time_index.day == 29
time_index = time_index[~mask]
arr = np.arange(len(time_index) * 10).reshape(len(time_index), 10)
out = PvWattsv5.ensure_res_len(arr, time_index)
assert np.allclose(arr, out)
@pytest.mark.parametrize('site_index', range(5))
def test_PV_lat_tilt(res, site_index):
"""Test the method to set tilt based on latitude."""
rev2_points = TESTDATADIR + '/project_points/ri.csv'
sam_files = [TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json',
TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json']
sam_files = {'sam_param_{}'.format(i): k for i, k in
enumerate(sam_files)}
pp = ProjectPoints(rev2_points, sam_files, 'pv')
for i, [res_df, meta] in enumerate(res):
if i == site_index:
# get SAM inputs from project_points based on the current site
site = res_df.name
config, inputs = pp[site]
inputs['tilt'] = 'latitude'
# iterate through requested sites.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sim = PvWattsv5(resource=res_df, meta=meta,
sam_sys_inputs=inputs,
output_request=('cf_mean',))
break
else:
pass
assert sim.sam_sys_inputs['tilt'] == meta['latitude']
@pytest.mark.parametrize('dt', ('1h', '30min', '5min'))
def test_time_interval(dt):
"""Test the method to get the 'time interval' from the time index obj."""
baseline = {'1h': 1, '30min': 2, '5min': 12}
ti = pd.date_range('1-1-{y}'.format(y=2012), '1-1-{y}'.format(y=2013),
freq=dt)[:-1]
interval = PvWattsv5.get_time_interval(ti)
assert interval == baseline[dt]
def test_pysam_version_checker_pv():
"""Test that the pysam version checker passes through PV config untouched.
"""
pv_config = {'gcr': 0.4, 'system_capacity': 1}
with pytest.warns(None) as record:
sam_sys_inputs = PySamVersionChecker.run('pvwattsv5', pv_config)
assert not any(record)
assert 'gcr' in sam_sys_inputs
assert 'system_capacity' in sam_sys_inputs
def test_pysam_version_checker_wind():
"""Check that the pysam version checker recognizes outdated config keys
from pysam v1 and fixes them and raises warning.
"""
wind_config = {'wind_farm_losses_percent': 10, 'system_capacity': 1}
pysam_version = str(get_distribution('nrel-pysam')).split(' ')[1]
pysam_version = version.parse(pysam_version)
if pysam_version > version.parse('2.1.0'):
with pytest.warns(PySAMVersionWarning) as record:
sam_sys_inputs = PySamVersionChecker.run('windpower', wind_config)
assert 'old SAM v1 keys' in str(record[0].message)
assert 'turb_generic_loss' in sam_sys_inputs
assert 'system_capacity' in sam_sys_inputs
assert 'wind_farm_losses_percent'
def test_nan_resource():
"""Test that the reV-SAM interface will raise an error if there is NaN
data in the resource input."""
res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_2012.h5'
sam_files = TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json'
pp = ProjectPoints(0, sam_files, 'pv')
res = NSRDB.preload_SAM(res_file, pp.sites)
for res_df, meta in res:
res_df.iloc[10, 0] = np.nan
site = res_df.name
_, inputs = pp[site]
with pytest.raises(InputError):
PvWattsv7(resource=res_df, meta=meta, sam_sys_inputs=inputs)
def test_default_pvwattsv5():
"""Test default pvwattsv5 execution and compare baseline annual energy"""
default = DefaultPvWattsv5.default()
assert round(default.Outputs.annual_energy, -1) == 6830
def test_default_pvwattsv7():
"""Test default pvwattsv7 execution and compare baseline annual energy"""
default = DefaultPvWattsv7.default()
assert round(default.Outputs.annual_energy, -1) == 6940
def test_default_windpower():
"""Test default windpower execution and compare baseline annual energy"""
default = DefaultWindPower.default()
assert round(default.Outputs.annual_energy, -1) == 201595970
def execute_pytest(capture='all', flags='-rapP'):
"""Execute module as pytest with detailed summary report.
Parameters
----------
capture : str
Log or stdout/stderr capture option. ex: log (only logger),
all (includes stdout/stderr)
flags : str
Which tests to show logs and results for.
"""
fname = os.path.basename(__file__)
pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])
if __name__ == '__main__':
execute_pytest()
|
{"hexsha": "db41dc25391f56460fe23151773fc1d8fed87f59", "size": 7014, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_sam.py", "max_stars_repo_name": "pjstanle/reV", "max_stars_repo_head_hexsha": "c22c620749747022a65d2a98a99beef804849ee6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2020-03-04T05:24:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T14:39:49.000Z", "max_issues_repo_path": "tests/test_sam.py", "max_issues_repo_name": "pjstanle/reV", "max_issues_repo_head_hexsha": "c22c620749747022a65d2a98a99beef804849ee6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2020-03-03T18:18:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T22:00:40.000Z", "max_forks_repo_path": "tests/test_sam.py", "max_forks_repo_name": "pjstanle/reV", "max_forks_repo_head_hexsha": "c22c620749747022a65d2a98a99beef804849ee6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-08-10T13:43:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T22:43:36.000Z", "avg_line_length": 34.2146341463, "max_line_length": 79, "alphanum_fraction": 0.6662389507, "include": true, "reason": "import numpy", "num_tokens": 1809}
|
import numpy as np
import torch
from torchvision.transforms import ToPILImage, ToTensor
from eda.image.transforms.compose import Compose
from eda.image.transforms.transform import EdaTransform
from eda.image.utils import default_loader
class Mixup(EdaTransform):
def __init__(
self,
name=None,
prob=1.0,
level=0,
alpha=1.0,
same_class_ratio=-1.0,
prob_label=False,
):
self.alpha = alpha
self.same_class_ratio = same_class_ratio
self.prob_label = prob_label
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
X_dict = kwargs["X_dict"]
Y_dict = kwargs["Y_dict"]
transforms = kwargs["transforms"]
if self.alpha > 0.0:
mix_ratio = np.random.beta(self.alpha, self.alpha)
else:
mix_ratio = 1.0
if "image" in X_dict:
idx = np.random.randint(len(X_dict["image"]))
tot_cnt = len(X_dict["image"])
else:
idx = np.random.randint(len(X_dict["image_path"]))
tot_cnt = len(X_dict["image_path"])
if self.same_class_ratio >= 0:
same_class = True if np.random.rand() <= self.same_class_ratio else False
for i in np.random.permutation(tot_cnt):
if same_class == torch.equal(Y_dict["labels"][i], label):
idx = i
break
# Calc all transforms before mixup
prev_transforms = transforms[: kwargs["idx"]]
# Apply all prev mixup transforms
if "image" not in X_dict and "image_path" in X_dict:
cand_img, cand_label = Compose(prev_transforms)(
default_loader(X_dict["image_path"][idx]),
Y_dict["labels"][idx],
**kwargs,
)
else:
cand_img, cand_label = Compose(prev_transforms)(
X_dict["image"][idx], Y_dict["labels"][idx], **kwargs
)
mixup_img = ToPILImage()(
mix_ratio * ToTensor()(pil_img) + (1 - mix_ratio) * ToTensor()(cand_img)
)
if label is not None:
if self.prob_label:
mixup_label = mix_ratio * label + (1 - mix_ratio) * cand_label
else:
mixup_label = label if np.random.random() < mix_ratio else cand_label
else:
mixup_label = label
return mixup_img, mixup_label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"alpha={self.alpha}, same_class_ratio={self.same_class_ratio}, "
f"prob_label={self.prob_label}>"
)
|
{"hexsha": "4836f8080aa377a49620063134bca937fd4cd8aa", "size": 2730, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_augmentation/eda/image/transforms/mixup.py", "max_stars_repo_name": "simran-arora/emmental-tutorials", "max_stars_repo_head_hexsha": "249a82a57be58e960408a45e2e0daa72980d210a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_augmentation/eda/image/transforms/mixup.py", "max_issues_repo_name": "simran-arora/emmental-tutorials", "max_issues_repo_head_hexsha": "249a82a57be58e960408a45e2e0daa72980d210a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_augmentation/eda/image/transforms/mixup.py", "max_forks_repo_name": "simran-arora/emmental-tutorials", "max_forks_repo_head_hexsha": "249a82a57be58e960408a45e2e0daa72980d210a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1176470588, "max_line_length": 85, "alphanum_fraction": 0.5684981685, "include": true, "reason": "import numpy", "num_tokens": 630}
|
/* Copyright 2016-2017 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See http://www.boost.org/libs/poly_collection for library home page.
*/
#ifndef BOOST_POLY_COLLECTION_DETAIL_POLY_COLLECTION_HPP
#define BOOST_POLY_COLLECTION_DETAIL_POLY_COLLECTION_HPP
#if defined(_MSC_VER)
#pragma once
#endif
#include <algorithm>
#include <boost/assert.hpp>
#include <boost/iterator/iterator_adaptor.hpp>
#include <boost/poly_collection/detail/iterator_impl.hpp>
#include <boost/poly_collection/detail/is_acceptable.hpp>
#include <boost/poly_collection/detail/is_constructible.hpp>
#include <boost/poly_collection/detail/is_final.hpp>
#include <boost/poly_collection/detail/newdelete_allocator.hpp>
#include <boost/poly_collection/detail/segment.hpp>
#include <boost/poly_collection/detail/type_info_map.hpp>
#include <boost/poly_collection/exception.hpp>
#include <iterator>
#include <type_traits>
#include <typeinfo>
#include <utility>
namespace boost{
namespace poly_collection{
namespace common_impl{
/* common implementation for all polymorphic collections */
using namespace detail;
template<typename Model,typename Allocator>
class poly_collection
{
template<typename T>
static const std::type_info& subtypeid(const T& x)
{return Model::subtypeid(x);}
template<typename...>
struct for_all_types{using type=void*;};
template<typename... T>
using for_all=typename for_all_types<T...>::type;
template<typename T>
struct is_implementation: /* using makes VS2015 choke, hence we derive */
Model::template is_implementation<typename std::decay<T>::type>{};
template<typename T>
using enable_if_implementation=
typename std::enable_if<is_implementation<T>::value>::type*;
template<typename T>
using enable_if_not_implementation=
typename std::enable_if<!is_implementation<T>::value>::type*;
template<typename T>
using is_acceptable=
detail::is_acceptable<typename std::decay<T>::type,Model>;
template<typename T>
using enable_if_acceptable=
typename std::enable_if<is_acceptable<T>::value>::type*;
template<typename T>
using enable_if_not_acceptable=
typename std::enable_if<!is_acceptable<T>::value>::type*;
template<typename InputIterator>
using enable_if_derefs_to_implementation=enable_if_implementation<
typename std::iterator_traits<InputIterator>::value_type
>;
template<typename T>
using is_terminal=
typename Model::template is_terminal<typename std::decay<T>::type>;
template<typename T>
using enable_if_terminal=
typename std::enable_if<is_terminal<T>::value>::type*;
template<typename T>
using enable_if_not_terminal=
typename std::enable_if<!is_terminal<T>::value>::type*;
template<typename InputIterator>
using derefs_to_terminal=is_terminal<
typename std::iterator_traits<InputIterator>::value_type
>;
template<typename InputIterator>
using enable_if_derefs_to_terminal=
typename std::enable_if<derefs_to_terminal<InputIterator>::value>::type*;
template<typename InputIterator>
using enable_if_derefs_to_not_terminal=
typename std::enable_if<!derefs_to_terminal<InputIterator>::value>::type*;
template<typename T,typename U>
using enable_if_not_same=typename std::enable_if<
!std::is_same<
typename std::decay<T>::type,typename std::decay<U>::type
>::value
>::type*;
template<typename T,typename U>
using enable_if_constructible=
typename std::enable_if<is_constructible<T,U>::value>::type*;
template<typename T,typename U>
using enable_if_not_constructible=
typename std::enable_if<!is_constructible<T,U>::value>::type*;
using segment_type=detail::segment<Model,Allocator>;
using segment_base_iterator=typename segment_type::base_iterator;
using const_segment_base_iterator=
typename segment_type::const_base_iterator;
using segment_base_sentinel=typename segment_type::base_sentinel;
using const_segment_base_sentinel=
typename segment_type::const_base_sentinel;
template<typename T>
using segment_iterator=typename segment_type::template iterator<T>;
template<typename T>
using const_segment_iterator=
typename segment_type::template const_iterator<T>;
using segment_map=type_info_map<
segment_type,
newdelete_allocator_adaptor<
typename std::allocator_traits<Allocator>::template
rebind_alloc<segment_type>
>
>;
using segment_map_allocator_type=typename segment_map::allocator_type;
using segment_map_iterator=typename segment_map::iterator;
using const_segment_map_iterator=typename segment_map::const_iterator;
public:
/* types */
using value_type=typename segment_type::value_type;
using allocator_type=Allocator;
using size_type=std::size_t;
using difference_type=std::ptrdiff_t;
using reference=value_type&;
using const_reference=const value_type&;
using pointer=typename std::allocator_traits<Allocator>::pointer;
using const_pointer=typename std::allocator_traits<Allocator>::const_pointer;
private:
template<typename,bool>
friend class detail::iterator_impl;
template<typename,typename>
friend class detail::local_iterator_impl;
template<bool Const>
using iterator_impl=detail::iterator_impl<poly_collection,Const>;
template<typename BaseIterator>
using local_iterator_impl=
detail::local_iterator_impl<poly_collection,BaseIterator>;
public:
using iterator=iterator_impl<false>;
using const_iterator=iterator_impl<true>;
using local_base_iterator=local_iterator_impl<segment_base_iterator>;
using const_local_base_iterator=
local_iterator_impl<const_segment_base_iterator>;
template<typename T>
using local_iterator=local_iterator_impl<segment_iterator<T>>;
template<typename T>
using const_local_iterator=local_iterator_impl<const_segment_iterator<T>>;
class const_base_segment_info
{
public:
const_base_segment_info(const const_base_segment_info&)=default;
const_base_segment_info& operator=(const const_base_segment_info&)=default;
const_local_base_iterator begin()const noexcept
{return {it,it->second.begin()};}
const_local_base_iterator end()const noexcept
{return {it,it->second.end()};}
const_local_base_iterator cbegin()const noexcept{return begin();}
const_local_base_iterator cend()const noexcept{return end();}
template<typename T>
const_local_iterator<T> begin()const noexcept
{return const_local_iterator<T>{begin()};}
template<typename T>
const_local_iterator<T> end()const noexcept
{return const_local_iterator<T>{end()};}
template<typename T>
const_local_iterator<T> cbegin()const noexcept{return begin<T>();}
template<typename T>
const_local_iterator<T> cend()const noexcept{return end<T>();}
const std::type_info& type_info()const{return *it->first;}
protected:
friend class poly_collection;
const_base_segment_info(const_segment_map_iterator it)noexcept:it{it}{}
const_segment_map_iterator it;
};
class base_segment_info:public const_base_segment_info
{
public:
base_segment_info(const base_segment_info&)=default;
base_segment_info& operator=(const base_segment_info&)=default;
using const_base_segment_info::begin;
using const_base_segment_info::end;
local_base_iterator begin()noexcept
{return {this->it,this->it->second.begin()};}
local_base_iterator end()noexcept
{return {this->it,this->it->second.end()};}
template<typename T>
local_iterator<T> begin()noexcept{return local_iterator<T>{begin()};}
template<typename T>
local_iterator<T> end()noexcept{return local_iterator<T>{end()};}
private:
friend class poly_collection;
using const_base_segment_info::const_base_segment_info;
};
template<typename T>
class const_segment_info
{
public:
const_segment_info(const const_segment_info&)=default;
const_segment_info& operator=(const const_segment_info&)=default;
const_local_iterator<T> begin()const noexcept
{return {it,it->second.begin()};}
const_local_iterator<T> end()const noexcept
{return {it,it->second.end()};}
const_local_iterator<T> cbegin()const noexcept{return begin();}
const_local_iterator<T> cend()const noexcept{return end();}
protected:
friend class poly_collection;
const_segment_info(const_segment_map_iterator it)noexcept:it{it}{}
const_segment_map_iterator it;
};
template<typename T>
class segment_info:public const_segment_info<T>
{
public:
segment_info(const segment_info&)=default;
segment_info& operator=(const segment_info&)=default;
using const_segment_info<T>::begin;
using const_segment_info<T>::end;
local_iterator<T> begin()noexcept
{return {this->it,this->it->second.begin()};}
local_iterator<T> end()noexcept
{return {this->it,this->it->second.end()};}
private:
friend class poly_collection;
using const_segment_info<T>::const_segment_info;
};
private:
template<typename SegmentInfo>
class segment_info_iterator_impl:
public boost::iterator_adaptor<
segment_info_iterator_impl<SegmentInfo>,
const_segment_map_iterator,
SegmentInfo,
std::input_iterator_tag,
SegmentInfo
>
{
segment_info_iterator_impl(const_segment_map_iterator it):
segment_info_iterator_impl::iterator_adaptor_{it}{}
public:
segment_info_iterator_impl()=default;
segment_info_iterator_impl(const segment_info_iterator_impl&)=default;
segment_info_iterator_impl& operator=(
const segment_info_iterator_impl&)=default;
template<
typename SegmentInfo2,
typename std::enable_if<
std::is_base_of<SegmentInfo,SegmentInfo2>::value
>::type* =nullptr
>
segment_info_iterator_impl(
const segment_info_iterator_impl<SegmentInfo2>& x):
segment_info_iterator_impl::iterator_adaptor_{x.base()}{}
template<
typename SegmentInfo2,
typename std::enable_if<
std::is_base_of<SegmentInfo,SegmentInfo2>::value
>::type* =nullptr
>
segment_info_iterator_impl& operator=(
const segment_info_iterator_impl<SegmentInfo2>& x)
{
this->base_reference()=x.base();
return *this;
}
private:
template<typename>
friend class segment_info_iterator_impl;
friend class poly_collection;
friend class boost::iterator_core_access;
template<typename>
friend struct detail::iterator_traits;
SegmentInfo dereference()const noexcept{return this->base();}
};
public:
using base_segment_info_iterator=
segment_info_iterator_impl<base_segment_info>;
using const_base_segment_info_iterator=
segment_info_iterator_impl<const_base_segment_info>;
private:
template<typename Iterator>
static Iterator nonconst_hlp(Iterator);
static iterator nonconst_hlp(const_iterator);
static local_base_iterator nonconst_hlp(const_local_base_iterator);
template<typename T>
static local_iterator<T> nonconst_hlp(const_local_iterator<T>);
static base_segment_info_iterator nonconst_hlp(
const_base_segment_info_iterator);
template<typename Iterator>
using nonconst_version=decltype(nonconst_hlp(std::declval<Iterator>()));
public:
class const_segment_traversal_info
{
public:
const_segment_traversal_info(const const_segment_traversal_info&)=default;
const_segment_traversal_info& operator=(
const const_segment_traversal_info&)=default;
const_base_segment_info_iterator begin()const noexcept
{return pmap->cbegin();}
const_base_segment_info_iterator end()const noexcept{return pmap->cend();}
const_base_segment_info_iterator cbegin()const noexcept{return begin();}
const_base_segment_info_iterator cend()const noexcept{return end();}
protected:
friend class poly_collection;
const_segment_traversal_info(const segment_map& map)noexcept:
pmap{const_cast<segment_map*>(&map)}{}
segment_map* pmap;
};
class segment_traversal_info:public const_segment_traversal_info
{
public:
segment_traversal_info(const segment_traversal_info&)=default;
segment_traversal_info& operator=(const segment_traversal_info&)=default;
using const_segment_traversal_info::begin;
using const_segment_traversal_info::end;
base_segment_info_iterator begin()noexcept{return this->pmap->cbegin();}
base_segment_info_iterator end()noexcept{return this->pmap->cend();}
private:
friend class poly_collection;
using const_segment_traversal_info::const_segment_traversal_info;
};
/* construct/destroy/copy */
poly_collection()=default;
poly_collection(const poly_collection&)=default;
poly_collection(poly_collection&&)=default;
explicit poly_collection(const allocator_type& al):
map{segment_map_allocator_type{al}}{}
poly_collection(const poly_collection& x,const allocator_type& al):
map{x.map,segment_map_allocator_type{al}}{}
poly_collection(poly_collection&& x,const allocator_type& al):
map{std::move(x.map),segment_map_allocator_type{al}}{}
template<typename InputIterator>
poly_collection(
InputIterator first,InputIterator last,
const allocator_type& al=allocator_type{}):
map{segment_map_allocator_type{al}}
{
this->insert(first,last);
}
// TODO: what to do with initializer_list?
poly_collection& operator=(const poly_collection&)=default;
poly_collection& operator=(poly_collection&&)=default;
allocator_type get_allocator()const noexcept{return map.get_allocator();}
/* type registration */
template<
typename... T,
for_all<enable_if_acceptable<T>...> =nullptr
>
void register_types()
{
/* http://twitter.com/SeanParent/status/558765089294020609 */
using seq=int[1+sizeof...(T)];
(void)seq{
0,
(map.insert(
typeid(T),segment_type::template make<T>(get_allocator())),0)...
};
}
bool is_registered(const std::type_info& info)const
{
return map.find(info)!=map.end();
}
template<typename T,enable_if_acceptable<T> =nullptr>
bool is_registered()const
{
return is_registered(typeid(T));
}
/* iterators */
iterator begin()noexcept{return {map.begin(),map.end()};}
iterator end()noexcept{return {map.end(),map.end()};}
const_iterator begin()const noexcept{return {map.begin(),map.end()};}
const_iterator end()const noexcept{return {map.end(),map.end()};}
const_iterator cbegin()const noexcept{return begin();}
const_iterator cend()const noexcept{return end();}
local_base_iterator begin(const std::type_info& info)
{
auto it=get_map_iterator_for(info);
return {it,segment(it).begin()};
}
local_base_iterator end(const std::type_info& info)
{
auto it=get_map_iterator_for(info);
return {it,segment(it).end()};
}
const_local_base_iterator begin(const std::type_info& info)const
{
auto it=get_map_iterator_for(info);
return {it,segment(it).begin()};
}
const_local_base_iterator end(const std::type_info& info)const
{
auto it=get_map_iterator_for(info);
return {it,segment(it).end()};
}
const_local_base_iterator cbegin(const std::type_info& info)const
{return begin(info);}
const_local_base_iterator cend(const std::type_info& info)const
{return end(info);}
template<typename T,enable_if_acceptable<T> =nullptr>
local_iterator<T> begin()
{
auto it=get_map_iterator_for(typeid(T));
return {it,segment(it).template begin<T>()};
}
template<typename T,enable_if_acceptable<T> =nullptr>
local_iterator<T> end()
{
auto it=get_map_iterator_for(typeid(T));
return {it,segment(it).template end<T>()};
}
template<typename T,enable_if_acceptable<T> =nullptr>
const_local_iterator<T> begin()const
{
auto it=get_map_iterator_for(typeid(T));
return {it,segment(it).template begin<T>()};
}
template<typename T,enable_if_acceptable<T> =nullptr>
const_local_iterator<T> end()const
{
auto it=get_map_iterator_for(typeid(T));
return {it,segment(it).template end<T>()};
}
template<typename T,enable_if_acceptable<T> =nullptr>
const_local_iterator<T> cbegin()const{return begin<T>();}
template<typename T,enable_if_acceptable<T> =nullptr>
const_local_iterator<T> cend()const{return end<T>();}
base_segment_info segment(const std::type_info& info)
{
return get_map_iterator_for(info);
}
const_base_segment_info segment(const std::type_info& info)const
{
return get_map_iterator_for(info);
}
template<typename T,enable_if_acceptable<T> =nullptr>
segment_info<T> segment(){return get_map_iterator_for(typeid(T));}
template<typename T,enable_if_acceptable<T> =nullptr>
const_segment_info<T> segment()const{return get_map_iterator_for(typeid(T));}
segment_traversal_info segment_traversal()noexcept{return map;}
const_segment_traversal_info segment_traversal()const noexcept{return map;}
/* capacity */
bool empty()const noexcept
{
for(const auto& x:map)if(!x.second.empty())return false;
return true;
}
bool empty(const std::type_info& info)const
{
return segment(get_map_iterator_for(info)).empty();
}
template<typename T,enable_if_acceptable<T> =nullptr>
bool empty()const
{
return segment(get_map_iterator_for(typeid(T))).template empty<T>();
}
size_type size()const noexcept
{
size_type res=0;
for(const auto& x:map)res+=x.second.size();
return res;
}
size_type size(const std::type_info& info)const
{
return segment(get_map_iterator_for(info)).size();
}
template<typename T,enable_if_acceptable<T> =nullptr>
size_type size()const
{
return segment(get_map_iterator_for(typeid(T))).template size<T>();
}
size_type max_size(const std::type_info& info)const
{
return segment(get_map_iterator_for(info)).max_size();
}
template<typename T,enable_if_acceptable<T> =nullptr>
size_type max_size()const
{
return segment(get_map_iterator_for(typeid(T))).template max_size<T>();
}
size_type capacity(const std::type_info& info)const
{
return segment(get_map_iterator_for(info)).capacity();
}
template<typename T,enable_if_acceptable<T> =nullptr>
size_type capacity()const
{
return segment(get_map_iterator_for(typeid(T))).template capacity<T>();
}
void reserve(size_type n)
{
for(auto& x:map)x.second.reserve(n);
}
void reserve(const std::type_info& info,size_type n)
{
segment(get_map_iterator_for(info)).reserve(n);
}
template<typename T,enable_if_acceptable<T> =nullptr>
void reserve(size_type n)
{
/* note this creates the segment if it didn't previously exist */
segment(get_map_iterator_for<T>()).template reserve<T>(n);
}
void shrink_to_fit()
{
for(auto& x:map)x.second.shrink_to_fit();
}
void shrink_to_fit(const std::type_info& info)
{
segment(get_map_iterator_for(info)).shrink_to_fit();
}
template<typename T,enable_if_acceptable<T> =nullptr>
void shrink_to_fit()
{
segment(get_map_iterator_for(typeid(T))).template shrink_to_fit<T>();
}
/* modifiers */
template<typename T,typename... Args,enable_if_acceptable<T> =nullptr>
iterator emplace(Args&&... args)
{
auto it=get_map_iterator_for<T>();
return {
it,map.end(),
segment(it).template emplace_back<T>(std::forward<Args>(args)...)
};
}
template<typename T,typename... Args,enable_if_acceptable<T> =nullptr>
iterator emplace_hint(const_iterator hint,Args&&... args)
{
auto it=get_map_iterator_for<T>();
return {
it,map.end(),
hint.mapit==it? /* hint in segment */
segment(it).template emplace<T>(
hint.segpos,std::forward<Args>(args)...):
segment(it).template emplace_back<T>(std::forward<Args>(args)...)
};
}
template<typename T,typename... Args,enable_if_acceptable<T> =nullptr>
local_base_iterator
emplace_pos(local_base_iterator pos,Args&&... args)
{
return emplace_pos<T>(
const_local_base_iterator{pos},std::forward<Args>(args)...);
}
template<typename T,typename... Args,enable_if_acceptable<T> =nullptr>
local_base_iterator
emplace_pos(const_local_base_iterator pos,Args&&... args)
{
BOOST_ASSERT(pos.type_info()==typeid(T));
return {
pos.mapit,
pos.segment().template emplace<T>(pos.base(),std::forward<Args>(args)...)
};
}
template<typename T,typename... Args>
local_iterator<T>
emplace_pos(local_iterator<T> pos,Args&&... args)
{
return emplace_pos(
const_local_iterator<T>{pos},std::forward<Args>(args)...);
}
template<typename T,typename... Args>
local_iterator<T>
emplace_pos(const_local_iterator<T> pos,Args&&... args)
{
return {
pos.mapit,
pos.segment().template emplace<T>(pos.base(),std::forward<Args>(args)...)
};
}
template<typename T,enable_if_implementation<T> =nullptr>
iterator insert(T&& x)
{
auto it=get_map_iterator_for(x);
return {it,map.end(),push_back(segment(it),std::forward<T>(x))};
}
template<
typename T,
enable_if_not_same<const_iterator,T> =nullptr,
enable_if_implementation<T> =nullptr
>
iterator insert(const_iterator hint,T&& x)
{
auto it=get_map_iterator_for(x);
return {
it,map.end(),
hint.mapit==it? /* hint in segment */
segment(it).insert(hint.segpos,std::forward<T>(x)):
push_back(segment(it),std::forward<T>(x))
};
}
template<
typename BaseIterator,typename T,
enable_if_not_same<local_iterator_impl<BaseIterator>,T> =nullptr,
enable_if_implementation<T> =nullptr
>
nonconst_version<local_iterator_impl<BaseIterator>>
insert(local_iterator_impl<BaseIterator> pos,T&& x)
{
BOOST_ASSERT(pos.type_info()==subtypeid(x));
return {
pos.mapit,
pos.segment().insert(pos.base(),std::forward<T>(x))
};
}
template<
typename InputIterator,
enable_if_derefs_to_implementation<InputIterator> =nullptr,
enable_if_derefs_to_not_terminal<InputIterator> =nullptr
>
void insert(InputIterator first,InputIterator last)
{
for(;first!=last;++first)insert(*first);
}
template<
typename InputIterator,
enable_if_derefs_to_implementation<InputIterator> =nullptr,
enable_if_derefs_to_terminal<InputIterator> =nullptr
>
void insert(InputIterator first,InputIterator last)
{
if(first==last)return;
/* same segment for all (type is terminal) */
auto& seg=segment(get_map_iterator_for(*first));
seg.insert(first,last);
}
template<bool Const>
void insert(iterator_impl<Const> first,iterator_impl<Const> last)
{
for(;first!=last;++first){
auto& seg=segment(get_map_iterator_for(*first,first.segment()));
push_back(seg,*first);
}
}
template<typename BaseIterator>
void insert(
local_iterator_impl<BaseIterator> first,
local_iterator_impl<BaseIterator> last)
{
if(first==last)return;
/* same segment for all (iterator is local) */
auto& seg=segment(get_map_iterator_for(*first,first.segment()));
do seg.push_back(*first); while(++first!=last);
}
template<
typename InputIterator,
enable_if_derefs_to_implementation<InputIterator> =nullptr,
enable_if_derefs_to_not_terminal<InputIterator> =nullptr
>
void insert(const_iterator hint,InputIterator first,InputIterator last)
{
for(;first!=last;++first){
auto it=get_map_iterator_for(*first);
if(hint.mapit==it){ /* hint in segment */
hint={it,map.end(),segment(it).insert(hint.segpos,*first)};
++hint;
}
else push_back(segment(it),*first);
}
}
template<
typename InputIterator,
enable_if_derefs_to_implementation<InputIterator> =nullptr,
enable_if_derefs_to_terminal<InputIterator> =nullptr
>
void insert(const_iterator hint,InputIterator first,InputIterator last)
{
if(first==last)return;
/* same segment for all (type is terminal) */
auto it=get_map_iterator_for(*first);
auto& seg=segment(it);
if(hint.mapit==it)seg.insert(hint.segpos,first,last); /* hint in segment */
else seg.insert(first,last);
}
template<bool Const>
void insert(
const_iterator hint,iterator_impl<Const> first,iterator_impl<Const> last)
{
for(;first!=last;++first){
auto it=get_map_iterator_for(*first,first.segment());
if(hint.mapit==it){ /* hint in segment */
hint={it,map.end(),segment(it).insert(hint.segpos,*first)};
++hint;
}
else push_back(segment(it),*first);
}
}
template<typename BaseIterator>
void insert(
const_iterator hint,
local_iterator_impl<BaseIterator> first,
local_iterator_impl<BaseIterator> last)
{
if(first==last)return;
/* same segment for all (iterator is local) */
auto it=get_map_iterator_for(*first,first.segment());
auto& seg=segment(it);
if(hint.mapit==it){ /* hint in segment */
do{
hint={it,map.end(),seg.insert(hint.segpos,*first)};
++hint;
}while(++first!=last);
}
else{
do push_back(seg,*first); while(++first!=last);
}
}
template<
typename InputIterator,
enable_if_derefs_to_implementation<InputIterator> =nullptr
>
local_base_iterator insert(
const_local_base_iterator pos,InputIterator first,InputIterator last)
{
auto& seg=pos.segment();
auto it=Model::nonconst_iterator(pos.base());
size_type n=0;
for(;first!=last;++first){
BOOST_ASSERT(pos.type_info()==subtypeid(*first));
it=std::next(seg.insert(it,*first));
++n;
}
return {pos.mapit,it-n};
}
template<typename T,typename InputIterator>
local_iterator<T> insert(
const_local_iterator<T> pos,InputIterator first,InputIterator last)
{
auto& seg=pos.segment();
segment_iterator<T> it=Model::nonconst_iterator(pos.base());
size_type n=0;
for(;first!=last;++first){
it=std::next(
static_cast<segment_iterator<T>>(local_insert<T>(seg,it,*first)));
++n;
}
return {pos.mapit,it-n};
}
template<typename T,typename InputIterator>
local_iterator<T> insert(
local_iterator<T> pos,InputIterator first,InputIterator last)
{
return insert(const_local_iterator<T>{pos},first,last);
}
iterator erase(const_iterator pos)
{
return {pos.mapit,pos.mapend,pos.segment().erase(pos.segpos)};
}
template<typename BaseIterator>
nonconst_version<local_iterator_impl<BaseIterator>>
erase(local_iterator_impl<BaseIterator> pos)
{
return {pos.mapit,pos.segment().erase(pos.base())};
}
iterator erase(const_iterator first, const_iterator last)
{
const_segment_map_iterator fseg=first.mapit,
lseg=last.mapit,
end=first.mapend;
if(fseg!=lseg){ /* [first,last] spans over more than one segment */
/* from 1st elem to end of 1st segment */
segment(fseg).erase_till_end(first.segpos);
/* entire segments till last one */
while(++fseg!=lseg)segment(fseg).clear();
/* remaining elements of last segment */
if(fseg==end){ /* except if at end of container */
return {end,end};
}
else{
return {fseg,end,segment(fseg).erase_from_begin(last.segpos)};
}
}
else{ /* range is included in one segment only */
if(first==last){ /* to avoid segment(fseg) when fseg==end */
return {fseg,end,first.segpos};
}
else{
return {fseg,end,segment(fseg).erase(first.segpos,last.segpos)};
}
}
}
template<typename BaseIterator>
nonconst_version<local_iterator_impl<BaseIterator>>
erase(
local_iterator_impl<BaseIterator> first,
local_iterator_impl<BaseIterator> last)
{
BOOST_ASSERT(first.mapit==last.mapit);
return{
first.mapit,
first.segment().erase(first.base(),last.base())
};
}
void clear()noexcept
{
for(auto& x:map)x.second.clear();
}
void clear(const std::type_info& info)
{
segment(get_map_iterator_for(info)).clear();
}
template<typename T,enable_if_acceptable<T> =nullptr>
void clear()
{
segment(get_map_iterator_for(typeid(T))).template clear<T>();
}
void swap(poly_collection& x){map.swap(x.map);}
private:
template<typename M,typename A>
friend bool operator==(
const poly_collection<M,A>&,const poly_collection<M,A>&);
template<
typename T,
enable_if_acceptable<T> =nullptr,
enable_if_not_terminal<T> =nullptr
>
const_segment_map_iterator get_map_iterator_for(const T& x)
{
const auto& id=subtypeid(x);
auto it=map.find(id);
if(it!=map.end())return it;
else if(id!=typeid(T))throw unregistered_type{id};
else return map.insert(
typeid(T),segment_type::template make<T>(get_allocator())).first;
}
template<
typename T,
enable_if_acceptable<T> =nullptr,
enable_if_terminal<T> =nullptr
>
const_segment_map_iterator get_map_iterator_for(const T&)
{
auto it=map.find(typeid(T));
if(it!=map.end())return it;
else return map.insert(
typeid(T),segment_type::template make<T>(get_allocator())).first;
}
template<
typename T,
enable_if_not_acceptable<T> =nullptr,
enable_if_not_terminal<T> =nullptr
>
const_segment_map_iterator get_map_iterator_for(const T& x)const
{
const auto& id=subtypeid(x);
auto it=map.find(id);
if(it!=map.end())return it;
else throw unregistered_type{id};
}
template<
typename T,
enable_if_not_acceptable<T> =nullptr,
enable_if_terminal<T> =nullptr
>
const_segment_map_iterator get_map_iterator_for(const T&)const
{
static_assert(
is_acceptable<T>::value,
"type must be move constructible and move assignable");
return {}; /* never executed */
}
template<typename T>
const_segment_map_iterator get_map_iterator_for(
const T& x,const segment_type& seg)
{
const auto& id=subtypeid(x);
auto it=map.find(id);
if(it!=map.end())return it;
else return map.insert(id,segment_type::make_from_prototype(seg)).first;
}
template<typename T>
const_segment_map_iterator get_map_iterator_for()
{
auto it=map.find(typeid(T));
if(it!=map.end())return it;
else return map.insert(
typeid(T),segment_type::template make<T>(get_allocator())).first;
}
const_segment_map_iterator get_map_iterator_for(const std::type_info& info)
{
return const_cast<const poly_collection*>(this)->
get_map_iterator_for(info);
}
const_segment_map_iterator get_map_iterator_for(
const std::type_info& info)const
{
auto it=map.find(info);
if(it!=map.end())return it;
else throw unregistered_type{info};
}
static segment_type& segment(const_segment_map_iterator pos)
{
return const_cast<segment_type&>(pos->second);
}
template<
typename T,
enable_if_not_acceptable<T> =nullptr
>
segment_base_iterator push_back(segment_type& seg,T&& x)
{
return seg.push_back(std::forward<T>(x));
}
template<
typename T,
enable_if_acceptable<T> =nullptr,
enable_if_not_terminal<T> =nullptr
>
segment_base_iterator push_back(segment_type& seg,T&& x)
{
return subtypeid(x)==typeid(T)?
seg.push_back_terminal(std::forward<T>(x)):
seg.push_back(std::forward<T>(x));
}
template<
typename T,
enable_if_acceptable<T> =nullptr,
enable_if_terminal<T> =nullptr
>
segment_base_iterator push_back(segment_type& seg,T&& x)
{
return seg.push_back_terminal(std::forward<T>(x));
}
template<
typename T,typename BaseIterator,typename U,
enable_if_implementation<U> =nullptr,
enable_if_not_constructible<T,U&&> =nullptr
>
static segment_base_iterator local_insert(
segment_type& seg,BaseIterator pos,U&& x)
{
BOOST_ASSERT(subtypeid(x)==typeid(T));
return seg.insert(pos,std::forward<U>(x));
}
template<
typename T,typename BaseIterator,typename U,
enable_if_implementation<U> =nullptr,
enable_if_constructible<T,U&&> =nullptr
>
static segment_base_iterator local_insert(
segment_type& seg,BaseIterator pos,U&& x)
{
if(subtypeid(x)==typeid(T))return seg.insert(pos,std::forward<U>(x));
else return seg.template emplace<T>(pos,std::forward<U>(x));
}
template<
typename T,typename BaseIterator,typename U,
enable_if_not_implementation<U> =nullptr,
enable_if_constructible<T,U&&> =nullptr
>
static segment_base_iterator local_insert(
segment_type& seg,BaseIterator pos,U&& x)
{
return seg.template emplace<T>(pos,std::forward<U>(x));
}
template<
typename T,typename BaseIterator,typename U,
enable_if_not_implementation<U> =nullptr,
enable_if_not_constructible<T,U&&> =nullptr
>
static segment_base_iterator local_insert(
segment_type&,BaseIterator,U&&)
{
static_assert(
is_constructible<T,U&&>::value,
"element must be constructible from type");
return {}; /* never executed */
}
segment_map map;
};
template<typename Model,typename Allocator>
bool operator==(
const poly_collection<Model,Allocator>& x,
const poly_collection<Model,Allocator>& y)
{
typename poly_collection<Model,Allocator>::size_type s=0;
const auto &mapx=x.map,&mapy=y.map;
for(const auto& p:mapx){
auto ss=p.second.size();
auto it=mapy.find(*p.first);
if(it==mapy.end()?ss!=0:p.second!=it->second)return false;
s+=ss;
}
return s==y.size();
}
template<typename Model,typename Allocator>
bool operator!=(
const poly_collection<Model,Allocator>& x,
const poly_collection<Model,Allocator>& y)
{
return !(x==y);
}
template<typename Model,typename Allocator>
void swap(
poly_collection<Model,Allocator>& x,poly_collection<Model,Allocator>& y)
{
x.swap(y);
}
} /* namespace poly_collection::common_impl */
} /* namespace poly_collection */
} /* namespace boost */
#endif
|
{"hexsha": "2334347419c0976ee5501360129ffea94fbd8136", "size": 34226, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "deps/boost/boost/poly_collection/detail/poly_collection.hpp", "max_stars_repo_name": "alexhenrie/poedit", "max_stars_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1155.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T19:04:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:30:30.000Z", "max_issues_repo_path": "deps/boost/boost/poly_collection/detail/poly_collection.hpp", "max_issues_repo_name": "alexhenrie/poedit", "max_issues_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 618.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T01:39:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T15:18:40.000Z", "max_forks_repo_path": "deps/boost/boost/poly_collection/detail/poly_collection.hpp", "max_forks_repo_name": "alexhenrie/poedit", "max_forks_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 228.0, "max_forks_repo_forks_event_min_datetime": "2015-01-13T12:55:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:11:05.000Z", "avg_line_length": 29.1285106383, "max_line_length": 79, "alphanum_fraction": 0.7098404722, "num_tokens": 8182}
|
#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import io
import csv
from optparse import OptionParser
import numpy as np
import tensorflow as tf
from flask import Flask, jsonify, render_template, request
from tensorflow.contrib import learn
import data_helpers
from flask_restplus import Api, Resource, fields, marshal_with, reqparse
from text_cnn import TextCNNModel
from werkzeug.datastructures import FileStorage
# Create event type model
def __load_models():
mod = "models/event-types/model.ckpt"
vocab = "models/event-types/vocabulary.voc"
data_helpers.merge_model_file(mod, remove=True)
type_classifier = TextCNNModel.restore(
mod, vocab, sequence_length=32, num_classes=14, vocab_size=44345)
type_classifier.labels = ["bombings", "collapse", "crash", "derailment", "earthquake",
"explosion", "fire", "floods", "haze", "meteorite",
"none", "shootings", "typhoon", "wildfire"]
# Create related event type model:
mod = "models/event-related/model.ckpt"
vocab = "models/event-related/vocabulary.voc"
data_helpers.merge_model_file(mod, remove=True)
related_classifier = TextCNNModel.restore(
mod, vocab, sequence_length=43, num_classes=2, vocab_size=87420)
related_classifier.labels = ["non-related", "related"]
# Create related info type model:
mod = "models/info-types/model.ckpt"
vocab = "models/info-types/vocabulary.voc"
data_helpers.merge_model_file(mod, remove=True)
info_classifier = TextCNNModel.restore(
mod, vocab, sequence_length=32, num_classes=8, vocab_size=44345)
info_classifier.labels = ["affected_individuals", "caution_and_advice", "donations_and_volunteering",
"infrastructure_and_utilities", "not_applicable", "not_labeled",
"other_useful_information", "sympathy_and_support"]
return (type_classifier, related_classifier, info_classifier)
def main(argv):
# Parse command line arguments:
parser = OptionParser()
parser.add_option('-p', '--port', type='int', dest='port', default=80,
help="the API port for serving CREES [default: %default]")
parser.add_option('-n', '--namespace', type='string', dest='api_namespace', default="comrades",
help="the API namespace for CREES [default: %default]")
parser.add_option('-d', '--debug', action="store_true", dest='debug', default=False,
help="show debugging information")
(options, args) = parser.parse_args()
# Use environment variables if available:
if os.environ.get('CREES_PORT') is not None and options.port is 80:
options.port = int(os.environ.get('CREES_PORT'))
if os.environ.get('CREES_NAMESPACE') is not None and options.api_namespace is 'comrades':
options.api_namespace = str(os.environ.get('CREES_NAMESPACE'))
print(' * Using port: '+str(options.port))
print(' * Using namespace: /'+str(options.api_namespace))
# Web App:
app = Flask(__name__, static_url_path='')
app.config['RESTPLUS_MASK_SWAGGER'] = False
app.config['APPLICATION_ROOT'] = '/' + options.api_namespace
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(app, version='0.3', title='COMRADES Event API',
description='A set of tools for analysing short textual documents (e.g. tweets).',
doc='/' + options.api_namespace + '/',
endpoint='/' + options.api_namespace
)
ns = api.namespace(options.api_namespace,
description='Event detection tools.')
# Load models:
type_classifier, related_classifier, info_classifier = __load_models()
# Routes:
@ns.route('/')
class RootController(Resource):
def get(self):
return app.send_static_file('index.html')
@ns.route('/events/eventType')
class EventClassifierController(Resource):
"""
Performs event detection on short piece of text (e.g. tweets).
"""
# API arguments:
get_arguments = reqparse.RequestParser()
get_arguments.add_argument(
'text', required=True, help="The text to be analysed.")
# Output arguments:
model = api.model('Category', {
'input': fields.String,
'label': fields.String,
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textEventType')
@api.expect(get_arguments, validate=True)
@api.marshal_with(model, description='Event type')
def get(self):
"""Obtains the type of event associated with a post."""
args = self.get_arguments.parse_args()
text = args['text']
results = type_classifier.predict(text)
return {'input': text, 'label': results, 'classifier': "CNN", 'version': 0.3}
post_arguments = reqparse.RequestParser()
post_arguments.add_argument(
'texts', required=True, type=list, location='json', help="The JSON array containning the strings to be analysed.")
# Output arguments:
model2_inner = api.model('Category', {
'input': fields.String,
'label': fields.String,
})
model2 = api.model('Categories', {
'labels': fields.List(fields.Nested(model2_inner)),
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textEventType')
@api.expect(post_arguments, validate=False)
@api.marshal_with(model2, description='Event Type')
def post(self):
"""Identifies the type of events associated with multiple posts."""
data = request.get_json()
resp = []
for text in data:
results = type_classifier.predict(text)
resp.append({'input': text, 'label': results})
return {'labels': resp, 'classifier': "CNN", 'version': 0.3}
@ns.route('/events/infoType')
class InfoTypeClassifierController(Resource):
"""
Identifies the type of information associated with a post.
"""
# API arguments:
get_arguments = reqparse.RequestParser()
get_arguments.add_argument(
'text', required=True, help="The text to be analysed.")
# Output arguments:
model = api.model('Category', {
'input': fields.String,
'label': fields.String,
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textInfoType')
@api.expect(get_arguments, validate=True)
@api.marshal_with(model, description='Info type')
def get(self):
"""Identifies the type of information associated with a post."""
args = self.get_arguments.parse_args()
text = args['text']
results = info_classifier.predict(text)
return {'input': text, 'label': results, 'classifier': "CNN", 'version': 0.3}
post_arguments = reqparse.RequestParser()
post_arguments.add_argument(
'texts', required=True, type=list, location='json', help="The JSON array containning the strings to be analysed.")
# Output arguments:
model2_inner = api.model('Category', {
'input': fields.String,
'label': fields.String,
})
model2 = api.model('Categories', {
'labels': fields.List(fields.Nested(model2_inner)),
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textInfoType')
@api.expect(post_arguments, validate=False)
@api.marshal_with(model2, description='Info Type')
def post(self):
"""Identifies the type of information associated with multiple posts'"""
data = request.get_json()
resp = []
for text in data:
results = info_classifier.predict(text)
resp.append({'input': text, 'label': results})
return {'labels': resp, 'classifier': "CNN", 'version': 0.3}
@ns.route('/events/eventRelated')
class RelatedClassifierController(Resource):
"""
Identifies if a post is talking about an event.
"""
# API arguments:
get_arguments = reqparse.RequestParser()
get_arguments.add_argument(
'text', required=True, help="The text to be analysed.")
# Output arguments:
model = api.model('Category', {
'input': fields.String,
'label': fields.String,
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textEventRelated')
@api.expect(get_arguments, validate=True)
@api.marshal_with(model, description='Event relation')
def get(self):
"""Identifies if a post is talking about an event."""
args = self.get_arguments.parse_args()
text = args['text']
results = related_classifier.predict(text)
return {'input': text, 'label': results, 'classifier': "CNN", 'version': 0.3}
post_arguments = reqparse.RequestParser()
post_arguments.add_argument(
'texts', required=True, type=list, location='json', help="The JSON array containning the strings to be analysed.")
# Output arguments:
model2_inner = api.model('Category', {
'input': fields.String,
'label': fields.String,
})
model2 = api.model('Categories', {
'labels': fields.List(fields.Nested(model2_inner)),
'classifier': fields.String,
'version': fields.Float
})
@api.doc(id='textEventRelated')
@api.expect(post_arguments, validate=False)
@api.marshal_with(model2, description='Event relations')
def post(self):
"""Identifies if multiple posts are talking about an event."""
data = request.get_json()
resp = []
for text in data:
results = related_classifier.predict(text)
resp.append({'input': text, 'label': results})
return {'labels': resp, 'classifier': "CNN", 'version': 0.3}
# Start App:
app.run(host='0.0.0.0', port=options.port, debug=options.debug)
if __name__ == "__main__":
main(sys.argv)
|
{"hexsha": "dd2c42bd62fa740efb83b6473f85d0a758055b9e", "size": 10595, "ext": "py", "lang": "Python", "max_stars_repo_path": "crees_server.py", "max_stars_repo_name": "evhart/comrades-crees", "max_stars_repo_head_hexsha": "c06260bde5ae664ddd199bcf368a2da9e246da6e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2017-10-28T08:59:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T10:10:57.000Z", "max_issues_repo_path": "crees_server.py", "max_issues_repo_name": "evhart/comrades-crees", "max_issues_repo_head_hexsha": "c06260bde5ae664ddd199bcf368a2da9e246da6e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crees_server.py", "max_forks_repo_name": "evhart/comrades-crees", "max_forks_repo_head_hexsha": "c06260bde5ae664ddd199bcf368a2da9e246da6e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-12-06T08:41:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-24T08:22:51.000Z", "avg_line_length": 36.660899654, "max_line_length": 126, "alphanum_fraction": 0.6064181218, "include": true, "reason": "import numpy", "num_tokens": 2244}
|
import torch
import cole as cl
import numpy as np
import argparse
import os
cl.set_data_path("./data")
device = "cuda"
_BASE_PATH = ".."
def calc_full_grad_norm(loaders, model):
opt = torch.optim.SGD(model.parameters(), lr=0.01)
n_samples = 0
opt.zero_grad()
for loader in loaders:
for (x, y) in loader:
x, y = x.to(device), y.to(device)
n_samples += len(y)
output = model(x)
loss = torch.nn.functional.cross_entropy(output, y, reduction='sum')
loss.backward()
grad = torch.cat([p.grad.flatten() for p in model.parameters()])
grad.div_(n_samples)
return grad.norm()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--iter', type=int, default=1)
parser.add_argument('--data', type=str, default="mnist")
args = parser.parse_args()
if args.data == "mnist":
model = cl.MLP().to(device)
dataset = cl.get_split_mnist((1, 2, 3, 4, 5))
dataset_t1 = cl.get_split_mnist((1, ))
buf_size = 50
epochs = 1
elif args.data == "cifar":
model = cl.get_resnet18().to(device)
dataset = cl.get_split_cifar10((1, 2, 3, 4, 5))
dataset_t1 = cl.get_split_cifar10((1, ))
buf_size = 100
epochs = 1
elif args.data == "min":
model = cl.get_resnet18(100, (3, 84, 84)).to(device)
dataset = cl.get_split_mini_imagenet((1, 2, 3, 4, 5))
dataset_t1 = cl.get_split_mini_imagenet(1)
buf_size = 100
epochs = 10
else:
raise ValueError("Data unknown")
loaders = cl.CLDataLoader(dataset.train, bs=10, task_size=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
buffer = cl.Buffer(0, sampler="balanced")
results = [[] for _ in range(6)]
for i in range(args.iter):
for t, loader in enumerate(loaders):
buffer.size = (t + 1) * buf_size
for e in range(epochs):
for data, target in loader:
buffer.sample((data, target))
buf_data, buf_target = buffer.retrieve((data, target), size=10)
if buf_data is not None:
data, target = torch.cat([data, buf_data]), torch.cat([target, buf_target])
data, target = data.to(device), target.to(device)
cl.step(model, optimizer, data, target)
if t in [0, 1, 4]:
idx = 2 if t == 4 else t
test_loader = cl.CLDataLoader(dataset_t1.test, bs=64)
buffer_loader = cl.CLDataLoader([buffer], bs=50, shuffle=False, task_size=100)
full_grad = calc_full_grad_norm(test_loader, model).item()
buffer_grad = calc_full_grad_norm(buffer_loader, model).item()
results[idx].append(full_grad)
results[3 + idx].append(buffer_grad)
results = np.array(results)
rand_idx = np.random.randint(0, 10000)
if not os.path.exists(f"{_BASE_PATH}/results/{args.data}"):
os.makedirs(f"{_BASE_PATH}/results/{args.data}")
np.save(f'{_BASE_PATH}/results/{args.data}/grad_norms_{rand_idx}.npy', results)
if __name__ == '__main__':
main()
|
{"hexsha": "5175715db0e115f14150ecbaaaa0f49078d26ceb", "size": 3232, "ext": "py", "lang": "Python", "max_stars_repo_path": "grad_norm_exp/grad_norms.py", "max_stars_repo_name": "Mattdl/RehearsalRevealed", "max_stars_repo_head_hexsha": "f9cd2548f6c6d3ff119b40fecdb0df6fcd1525f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-04-16T15:49:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T18:04:58.000Z", "max_issues_repo_path": "grad_norm_exp/grad_norms.py", "max_issues_repo_name": "Mattdl/RehearsalRevealed", "max_issues_repo_head_hexsha": "f9cd2548f6c6d3ff119b40fecdb0df6fcd1525f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grad_norm_exp/grad_norms.py", "max_forks_repo_name": "Mattdl/RehearsalRevealed", "max_forks_repo_head_hexsha": "f9cd2548f6c6d3ff119b40fecdb0df6fcd1525f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-22T04:11:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T03:27:18.000Z", "avg_line_length": 34.0210526316, "max_line_length": 99, "alphanum_fraction": 0.5838490099, "include": true, "reason": "import numpy", "num_tokens": 835}
|
using Test
# write your own tests here
@test 1 == 1
using DataFrames
using ExoplanetsSysSim
function run_constructor_tests()
ExoplanetsSysSim.SimulationParameters.test_sim_param_constructors()
sim_param = ExoplanetsSysSim.setup_sim_param_demo()
ExoplanetsSysSim.test_orbit_constructors()
ExoplanetsSysSim.test_planet_constructors(sim_param)
ExoplanetsSysSim.test_star_constructors(sim_param)
ExoplanetsSysSim.test_planetary_system_constructors(sim_param)
ExoplanetsSysSim.test_target(sim_param)
ExoplanetsSysSim.test_transit_observations(sim_param)
(cat_phys, cat_obs) = ExoplanetsSysSim.test_catalog_constructors(sim_param)
ExoplanetsSysSim.test_summary_statistics(cat_obs, cat_phys, sim_param)
ExoplanetsSysSim.test_abc_distance(cat_obs, cat_phys, sim_param)
return 0
end
@test run_constructor_tests() == 0 # Just tests that the basic elements compile and run # TODO: Write tests that will be useful in diagnosing any bugs
#Test CORBITS moved from ExoplanetsSysSim.test_corbits() to
using CORBITS
include(joinpath(dirname(pathof(CORBITS)),"..","test","runtests.jl"))
|
{"hexsha": "8e9a0737fdf1bceb243b3f6c210efbebd710241e", "size": 1100, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "aditya-sengupta/ExoplanetsSysSim.jl", "max_stars_repo_head_hexsha": "df552110db61453cbb8584657ba79f92f741909c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-04-01T06:14:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T20:08:26.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "aditya-sengupta/ExoplanetsSysSim.jl", "max_issues_repo_head_hexsha": "df552110db61453cbb8584657ba79f92f741909c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-10-08T05:33:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-14T20:28:20.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "aditya-sengupta/ExoplanetsSysSim.jl", "max_forks_repo_head_hexsha": "df552110db61453cbb8584657ba79f92f741909c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-04-06T23:36:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T16:31:04.000Z", "avg_line_length": 37.9310344828, "max_line_length": 152, "alphanum_fraction": 0.8290909091, "num_tokens": 282}
|
from gym_kuka_mujoco.utils.kinematics import inverseKin
from gym_kuka_mujoco.utils.quaternion import identity_quat
from gym_kuka_mujoco.envs.assets import kuka_asset_dir
import os
import mujoco_py
import numpy as np
# Get the model path
model_filename = 'full_pushing_experiment_no_gravity.xml'
model_path = os.path.join(kuka_asset_dir(), model_filename)
# Construct the model and simulation objects.
model = mujoco_py.load_model_from_path(model_path)
sim = mujoco_py.MjSim(model)
# The points to be transformed.
pos = np.array([0., 0., 0.])
body_id = model.body_name2id('peg')
# Compute the forward kinematics
q_nom = np.zeros(7)
q_init = np.random.random(7)
peg_tip_idx = model.site_name2id('peg_tip')
body_pos = model.site_pos[peg_tip_idx]
# world_pos = np.array([0.7, 0., 1.22]) # above the block
world_pos = np.array([0.83, -0.02, 1.20-0.02]) # to the +x and -y of the block
# world_quat = np.array([0, 1., 0, 0])
# world_quat = np.array([-0.09983341664, 0.99500416527, 0, 0]) # 0.2 rad
world_quat = np.array([-0.2588190451, 0.96592582628, 0, 0]) # 0.1 rad
qpos_idx = range(7)
q_opt = inverseKin(sim, q_init, q_nom, body_pos, world_pos, world_quat, body_id, qpos_idx=qpos_idx)
# Visualize the solution
print("Optimal pose: {}\n".format(q_opt))
sim.data.qpos[qpos_idx] = q_opt
sim.forward()
viewer = mujoco_py.MjViewer(sim)
while True:
viewer.render()
|
{"hexsha": "7f870004e34b99315ebece9fb93165d549eee206", "size": 1368, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/test_mujoco/inverse_kinematics_block_push.py", "max_stars_repo_name": "leonmkim/gym-kuka-mujoco", "max_stars_repo_head_hexsha": "ed45ae74d10e69f4e51439de2d1d0c0811623b6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-03-12T21:19:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T15:03:40.000Z", "max_issues_repo_path": "examples/test_mujoco/inverse_kinematics_block_push.py", "max_issues_repo_name": "hzm2016/gym-kuka-mujoco", "max_issues_repo_head_hexsha": "a8a40bb08a1a1a269a2386ca0d102d62d8384206", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2019-04-21T17:50:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:40:29.000Z", "max_forks_repo_path": "examples/test_mujoco/inverse_kinematics_block_push.py", "max_forks_repo_name": "hzm2016/gym-kuka-mujoco", "max_forks_repo_head_hexsha": "a8a40bb08a1a1a269a2386ca0d102d62d8384206", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-05-21T08:56:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-31T04:23:29.000Z", "avg_line_length": 32.5714285714, "max_line_length": 99, "alphanum_fraction": 0.7456140351, "include": true, "reason": "import numpy", "num_tokens": 445}
|
#!/usr/bin/env python
# Two environment variables influence this script.
#
# GEOS_LIBRARY_PATH: a path to a GEOS C shared library.
#
# GEOS_CONFIG: the path to a geos-config program that points to GEOS version,
# headers, and libraries.
#
# NB: within this setup scripts, software versions are evaluated according
# to https://www.python.org/dev/peps/pep-0440/.
from sys import version_info as v, warnoptions
if any([v < (2, 6), (3,) < v < (3, 3)]):
raise Exception("Unsupported Python version %d.%d. Requires Python >= 2.6 "
"or >= 3.3." % v[:2])
import logging
import os
# If possible, use setuptools
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
class BuildExtNumpyInc(build_ext):
def build_extensions(self):
from numpy.distutils.misc_util import get_numpy_include_dirs
for e in self.extensions:
e.include_dirs.extend(get_numpy_include_dirs())
build_ext.build_extensions(self)
logging.basicConfig()
log = logging.getLogger(__file__)
# python -W all setup.py ...
if 'all' in warnoptions:
log.level = logging.DEBUG
# Handle UTF-8 encoding of certain text files.
open_kwds = {}
if v >= (3,):
open_kwds['encoding'] = 'utf-8'
with open('README.rst', 'r', **open_kwds) as fp:
readme = fp.read()
with open('CREDITS.txt', 'r', **open_kwds) as fp:
credits = fp.read()
with open('CHANGES.txt', 'r', **open_kwds) as fp:
changes = fp.read()
long_description = readme + '\n\n' + credits + '\n\n' + changes
# Prepare build opts and args for the speedups extension module.
include_dirs = []
library_dirs = []
libraries = ['geos_c']
extra_link_args = []
ext_modules = [
Extension(
"shapely.speedups._speedups",
["shapely/speedups/_speedups.pyx"],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
),
Extension(
"shapely.vectorized._vectorized",
sources=["shapely/vectorized/_vectorized.pyx"],
libraries=libraries,
),
]
setup(
name='Shapely',
requires=['Python (>=2.6)', 'libgeos_c (>=3.3)'],
use_scm_version={
'version_scheme': 'guess-next-dev',
'local_scheme': 'dirty-tag',
'write_to': 'shapely/_version.py'
},
setup_requires=[
'setuptools>=18.0',
'setuptools-scm>1.5.4',
'Cython>=0.19.2',
'numpy>=1.8.0',
'pytest-runner',
],
install_requires=[
'numpy>=1.8.0'
],
extras_require={
"tests:python_version=='2.6'": ["unittest2"],
"docs": [
"sphinx>=1.2",
"descartes>=1.0.1",
"matplotlib>=1.2"
],
"tests": ['pytest>=1.0'],
},
description='Geometric objects, predicates, and operations',
license='BSD',
keywords='geometry topology gis',
author='Sean Gillies',
author_email='sean.gillies@gmail.com',
maintainer='Sean Gillies',
maintainer_email='sean.gillies@gmail.com',
url='https://github.com/Toblerity/Shapely',
long_description=long_description,
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", 'tests']),
ext_modules=ext_modules,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: GIS',
],
cmdclass={'build_ext': BuildExtNumpyInc},
package_data={'': ['*.pxi'],},
)
|
{"hexsha": "00167c53318d39910fd24d98e38d7238d98c77e4", "size": 4029, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "mindw/shapely", "max_stars_repo_head_hexsha": "2f552833cef80ec3fc4990e8df10cc153d41d5be", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "mindw/shapely", "max_issues_repo_head_hexsha": "2f552833cef80ec3fc4990e8df10cc153d41d5be", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "mindw/shapely", "max_forks_repo_head_hexsha": "2f552833cef80ec3fc4990e8df10cc153d41d5be", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5744680851, "max_line_length": 79, "alphanum_fraction": 0.6259617771, "include": true, "reason": "from numpy", "num_tokens": 1032}
|
from torch.utils.data.dataset import Dataset
import os
import cv2
from PIL import Image
import numpy as np
from sklearn.preprocessing import LabelEncoder # CrossEntropyLoss expects class indices
class Mit67Dataset(Dataset):
def __init__(self, path, transform, enc=None):
self.X = []
self.y = []
for class_ in os.listdir(path):
for img_path in os.listdir(os.path.join(path, class_)):
img_full_path = os.path.join(path, class_, img_path)
self.X.append(img_full_path)
self.y.append(class_)
self.data_len = len(self.X)
self.X = np.array(self.X)
self.y = np.array(self.y)
if enc is None:
self.enc = LabelEncoder()
self.enc = self.enc.fit(self.y)
self.y = self.enc.transform(self.y)
else:
self.enc = enc
self.y = self.enc.transform(self.y)
self.transform = transform
def __getitem__(self, index):
img = Image.open(self.X[index])
if self.transform is not None:
image_np = np.array(img)
augmented = self.transform(image=image_np)
img = augmented['image']
label = self.y[index]
return img, label
def __len__(self):
return self.data_len
|
{"hexsha": "7008d4155bf8a124e10001957343ae8dec649010", "size": 1312, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/cnn/dataset.py", "max_stars_repo_name": "jordiae/DeepLearning-MAI", "max_stars_repo_head_hexsha": "e12b6975d8de6cbe89f812bf691a7f7e95213552", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-25T04:54:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T04:54:01.000Z", "max_issues_repo_path": "src/cnn/dataset.py", "max_issues_repo_name": "jordiae/DeepLearning-MAI", "max_issues_repo_head_hexsha": "e12b6975d8de6cbe89f812bf691a7f7e95213552", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-30T21:15:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T01:58:29.000Z", "max_forks_repo_path": "src/cnn/dataset.py", "max_forks_repo_name": "jordiae/DeepLearning-MAI", "max_forks_repo_head_hexsha": "e12b6975d8de6cbe89f812bf691a7f7e95213552", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0, "max_line_length": 88, "alphanum_fraction": 0.5914634146, "include": true, "reason": "import numpy", "num_tokens": 285}
|
% -*- root: developer-guide.tex -*-
\section{Random Clifford sampling procedure}
This section provides documentation for the routine found in \texttt{src/cliffords/swap-representation.lisp}. The n-qubit Clifford group grows rapidly with the number of qubits, in particular as $\prod^n_{i=1} 2(4^i - 1)4^i$. In addition, the Clifford group often comes about in quantum computation, in particular because it stabilizes the Pauli operators, and can thus be written as a basis map that that only has size polynomial in the number of qubits under investigation. In addition, schemes like randomized benchmarking leverage the fact that the Clifford group is a unitary 2-design to estimate the error on a gate.
In routines like randomized benchmarking it is necessary to sample randomly from the Clifford group, and one could imagine a form of classical benchmarking where random clifford circuits are sampled, simulted, and compared with quantum hardware. From the Gottesman-Knill theorem, this simulation can be done efficiently.
The naive approach to sampling from the Clifford group scales poorly - generating the entire group quickly becomes infeasible after two qubits. One way of attempting to improve this would be to still do a tree search over the group, but to implement a more intelligent pruning technique. Namely, if we can recognize that a collection of paths will lead to the same kind of sub-trees, we can ignore those paths, and cut down the search time.
A natural way to attempt this is to consider Clifford elements to be equivalent up to \SWAP\ operations. The \SWAP\ group is an asymptotically large, easy to understand, and relatively uninteresting subgroup of the Clifford group. In particular, looking at $C_n/\SWAP_n$ we see that the sizes grow more slowly, supressed by a factor of $n!$.
In order to work with this subset of the Clifford group, one needs to generate a procedure for picking out a canonical representative for each coset of the \SWAP\ group in the Clifford group efficiently. Such a procedure is given (currently without proof) below, and implemented in the code. A nice property of this particular routine is that it canonizes \SWAP\ operations to the identity.
\begin{algorithm}[H]
\caption*{\textsc{\textbf{Canonical Representative}}}
\hspace*{\algorithmicindent} \textbf{Input} $c\in C_n$, $c\equiv[X_1, Z_1, ..., X_n, Z_n]$\\
\hspace*{\algorithmicindent} \textbf{Output} $\Pi(c)$, $\Pi\in \SWAP_n$\\
\begin{algorithmic}
\State \textbf{initialize} $\Pi$ = Identity
\For{$1 \leq i \leq n$}
\State \textbf{defun} MaxIndices(A, k, n) := \{$\ell\mid A_\ell$ = n and $\ell >k$\}
\State indices = MaxIndices(M($X_i$), i, max($M(X_i)$))
\For{$i \leq j \leq n$}
\State curr\_max = max(\{$M(X_j)_\ell\mid\ell\in$indices\})
\State indices = indices $\cap$ MaxIndices(M($X_j$), j, curr\_max)
\State curr\_max = max(\{$M(Z_j)_\ell\mid\ell\in$indices\})
\State indices = indices $\cap$ MaxIndices(M($Z_j$), j, curr\_max)
\EndFor
\State first = indices[0]
\State $\pi := \SWAP(\textrm{first}, \Pi^{-1}(0))$
\State set $\Pi := \pi\circ\Pi$
\EndFor
\State \Return $\Pi(c)$
\end{algorithmic}
\end{algorithm}
The main idea behind this algorithm is that for each basis vector (sorted in a standard order) we can choose swaps such that the $i$\textsuperscript{th} image has a Pauli term with the largest base four representation in the $i$\textsuperscript{th} position. This immediately satisfies the property that SWAPs are canonized to the identity, if the basis vectors were sorted as $X_1, Z_1, ..., X_n, Z_n$. The only remaining problem is that there are in general many tensor factors in the image that have the same highest base four representation. To break these ties unambiguously we must iterate through the remaining basis vectors, indexed by $j$, and look at their images. If we call the indices of the tied tensor factors in the image `indices` as above, then we can break ties in the $i$\textsuperscript{th} image by looking at the base four representation of all elements of `indices`, and only keeping the terms that are maximal for each $j$. This process will either terminate with one unique element, in which case we should swap that element to the $i$\textsuperscript{th} position, or it will terminate with many elements, from which we are free to pick one. If there is more than one element, they have the same base four representation in each image, are therefore indistinguishable by swapping, and we are free to pick any one in the $i$\textsuperscript{th} step.
As a final remark, we give a rough estimate of the runtime of this algorithm. The number of basis vectors is linear in the number of qubits, $n$. For each (even) basis vector, we look at all following basis vectors, of which there are at most $n$. In addition, for each following basis vector, we must look at all tensor factors in its image, which is also $n$. Therefore the runtime of this algorithm is $\mathcal{O}(n^3)$.
|
{"hexsha": "60114909f3d7bbffbd3d742cf6cf572dd2dcc427", "size": 5004, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/canonical-representation.tex", "max_stars_repo_name": "stylewarning/quilc", "max_stars_repo_head_hexsha": "86b017109d185a7d03a98cc223aee1e02b32d584", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 348, "max_stars_repo_stars_event_min_datetime": "2019-02-02T10:50:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-02T16:30:11.000Z", "max_issues_repo_path": "doc/canonical-representation.tex", "max_issues_repo_name": "stylewarning/quilc", "max_issues_repo_head_hexsha": "86b017109d185a7d03a98cc223aee1e02b32d584", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 501, "max_issues_repo_issues_event_min_datetime": "2019-02-04T23:11:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-04T18:25:49.000Z", "max_forks_repo_path": "doc/canonical-representation.tex", "max_forks_repo_name": "stylewarning/quilc", "max_forks_repo_head_hexsha": "86b017109d185a7d03a98cc223aee1e02b32d584", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2019-02-04T21:15:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-21T22:04:18.000Z", "avg_line_length": 116.3720930233, "max_line_length": 1376, "alphanum_fraction": 0.7539968026, "num_tokens": 1268}
|
from ctypes import *
from numpy.random import normal
import time
import numpy as np
from dolfin import *
from mesh_generation import sphere_mesh
from utils import solve_problem
from Problem import Problem
path_to_c = './fast_spher_harms.so'
sph = CDLL(path_to_c)
sph.sin_term.restype = c_float
sph.cos_term.restype = c_float
sph.fast_grf.restype = c_float
sph.fast_grf.argtypes = (
c_int,c_float,c_float,c_float, POINTER(c_float))
def GRF(noterms,rands,x,y,z): #function to evaluate GRF in a point
return(sph.fast_grf(c_int(noterms),c_float(x),c_float(y),c_float(z),rands))
def wn(rands,L):
pf = (c_float*len(rands))(*rands)
prands = cast(pf,POINTER(c_float))
class right_hand_side(UserExpression):
def eval(self,value,x):
value[0]= GRF(L,prands,x[0],x[1],x[2])
def value_shape(self):
return()
return(right_hand_side(degree=2))
def problem_const(L,beta,kappa,k = None,h = None):
if (k is None):
k = 1/(beta*np.log(L+1))
if (h is None):
h = (L+1)**(-(2*beta+2)/2)
surfaces = [sphere_mesh(h)] #not yet implemented -user supplied meshes TODO
Vs= [FunctionSpace(surfaces[0],FiniteElement("Lagrange",surfaces[0].ufl_cell(),1))]
us= [TrialFunction(Vs[0])]
vs = [TestFunction(Vs[0])]
As = [assemble(inner(grad(us[0]),grad(vs[0]))*dx)]
Bs = [assemble(us[0]*vs[0]*dx)]
def surf():
return surfaces[0]
def fenstuff():
return (Vs[0],us[0],vs[0])
def AB():
return(As[0],Bs[0])
prob=Problem(surf, fenstuff,wn,beta, k,kappa,AB,rands=[],L=L)
return prob
|
{"hexsha": "318e66905dc7fdf33430acb143024f997301f61b", "size": 1624, "ext": "py", "lang": "Python", "max_stars_repo_path": "field_sfem.py", "max_stars_repo_name": "erik-grennberg-jansson/matern_sfem", "max_stars_repo_head_hexsha": "1e9468084abf41cc0ae85f1b4b1254904ed2d72f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "field_sfem.py", "max_issues_repo_name": "erik-grennberg-jansson/matern_sfem", "max_issues_repo_head_hexsha": "1e9468084abf41cc0ae85f1b4b1254904ed2d72f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "field_sfem.py", "max_forks_repo_name": "erik-grennberg-jansson/matern_sfem", "max_forks_repo_head_hexsha": "1e9468084abf41cc0ae85f1b4b1254904ed2d72f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7777777778, "max_line_length": 87, "alphanum_fraction": 0.6422413793, "include": true, "reason": "import numpy,from numpy", "num_tokens": 496}
|
# Parameters controlling how a plot appears
const title_font_desc = "'PT Sans','Helvetica Neue','Helvetica',sans-serif"
const label_font_desc = "'PT Sans Caption','Helvetica Neue','Helvetica',sans-serif"
# Choose highlight color by darkening the fill color
function default_discrete_highlight_color(fill_color::ColorValue)
return RGB(1, 1, 1)
end
function default_continuous_highlight_color(fill_color::ColorValue)
c = convert(LCHab, fill_color)
return LCHab(max(0, c.l - 40), c.c, c.h)
end
function default_stroke_color(fill_color::ColorValue)
fill_color = convert(LCHab, fill_color)
c = LCHab(fill_color.l, fill_color.c, fill_color.h)
LCHab(c.l - 15, c.c, c.h)
end
function default_lowlight_color(fill_color::ColorValue)
fill_color = convert(LCHab, fill_color)
c = LCHab(fill_color.l, fill_color.c, fill_color.h)
LCHab(90, 20, c.h)
end
# Choose a middle color by darkening the fill color
function default_middle_color(fill_color::ColorValue)
fill_color = convert(LCHab, fill_color)
LCHab(fill_color.l + 40, fill_color.c, fill_color.h)
end
@varset Theme begin
# If the color aesthetic is not mapped to anything, this is the color that
# is used.
default_color, ColorOrNothing, LCHab(70, 60, 240)
# Default size when the size aesthetic is not mapped.
default_point_size, Measure, 0.9mm
# Width of lines in the line geometry.
line_width, Measure, 0.3mm
# Background color of the plot.
panel_fill, ColorOrNothing, nothing
# Border color of the plot panel.
panel_stroke, ColorOrNothing, nothing
# Opacity of the plot background panel.
panel_opacity, Float64, 1.0
# Grid line color.
grid_color, ColorOrNothing, color("#D0D0E0")
grid_strokedash, Maybe(Vector), [0.5mm, 0.5mm]
# Grid lines for focused item.
grid_color_focused, ColorOrNothing, color("#A0A0A0")
# Width of grid lines
grid_line_width, Measure, 0.2mm
# Font name, size, and color used for tick labels, entries in keys, etc.
minor_label_font, String, label_font_desc
minor_label_font_size, Measure, 8pt
minor_label_color, ColorOrNothing, color("#6c606b")
# Font name, size and color used for axis labels, key title, etc.
major_label_font, String, title_font_desc
major_label_font_size, Measure, 11pt
major_label_color, ColorOrNothing, color("#564a55")
# Font name, size and color used for labels on plot elements.
point_label_font, String, label_font_desc
point_label_font_size, Measure, 8pt
point_label_color, ColorOrNothing, color("#4c404b")
# Font name, size and color used for key titles
key_title_font, String, title_font_desc
key_title_font_size, Measure, 11pt
key_title_color, ColorOrNothing, color("#362a35")
# Font name, size and color used for key entries.
key_label_font, String, title_font_desc
key_label_font_size, Measure, 8pt
key_label_color, ColorOrNothing, color("#4c404b")
# How many gradations to show in a continuous color key.
key_color_gradations, Int, 40
# Spacing between bars for Geom.bar.
bar_spacing, Measure, 0.0mm
# Spacing between boxplots in Geom.boxplot.
boxplot_spacing, Measure, 1mm
# Length of caps on error bars
errorbar_cap_length, Measure, 3mm
# Lines are drawn in a slightly different color than fills, e.g. to
# differentiate histogram bars from error bars.
stroke_color, Function, default_stroke_color
# Points, etc, are highlighted by stroking in slightly different color. This
# is the stroke width.
highlight_width, Measure, 0.3mm
# A function mapping fill color to stoke color for highlights.
discrete_highlight_color, Function, default_discrete_highlight_color
continuous_highlight_color, Function, default_continuous_highlight_color
# A function mapping fill color to a duller background fill color. Used for
# Geom.ribbon in particular so lines stand out against it.
lowlight_color, Function, default_lowlight_color
# Opacity of geometry filled with lowlight_color
lowlight_opacity, Float64, 0.6
# A function mapping base fill color to the color of the median marker in a
# boxplot.
middle_color, Function, default_middle_color
# Width of the middle line in a boxplot.
middle_width, Measure, 0.6mm
# Horizontal position of the title of color key guides. One of :left,
# :right, :center.
guide_title_position, Symbol, :left
# Shape used in color keys for color swatches. Either :square or :circle.
colorkey_swatch_shape, Symbol, :square
# One of :left, :right, :top, :bottom, :none determining where color keys
# and the like should be placed.
key_position, Symbol, :right
# TODO: This stuff is too incomprehensible to be in theme, I think. Put it
# somewhere else.
# Number of annealing iterations.
label_placement_iterations, Int, 1000
# Penalty for a label not being contained within the plot frame.
label_out_of_bounds_penalty, Float64, 10.0
# Penalty for making a label hidden to avoid overlaps.
label_hidden_penalty, Float64, 0.5
# Probability of proposing a visibility flip during label layout.
label_visibility_flip_pr, Float64, 0.2
end
const default_theme = Theme()
|
{"hexsha": "490dc41cabe49ba9c9b69ac7edf7dca17977e1a3", "size": 5785, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/theme.jl", "max_stars_repo_name": "mbauman/Gadfly.jl", "max_stars_repo_head_hexsha": "04b1b7deb29d7f40fef2e2d78e3e0ac6adbdab3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/theme.jl", "max_issues_repo_name": "mbauman/Gadfly.jl", "max_issues_repo_head_hexsha": "04b1b7deb29d7f40fef2e2d78e3e0ac6adbdab3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/theme.jl", "max_forks_repo_name": "mbauman/Gadfly.jl", "max_forks_repo_head_hexsha": "04b1b7deb29d7f40fef2e2d78e3e0ac6adbdab3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9316770186, "max_line_length": 87, "alphanum_fraction": 0.6710458081, "num_tokens": 1470}
|
"""Create Bosch competition datasets with leak"""
## Bosch Production Line Performance - Kaggle
## 1) Download train and test data from Slack public URLs
## 2) Unzip .zip files
## 3) Combine train and test data
## 4) Create leak features for train and test data based on row ids and row order
## 5) Import the data into Driverless AI for further experimentation
from typing import Union, List
from h2oaicore.data import CustomData
import datatable as dt
import numpy as np
import pandas as pd
class BoschData(CustomData):
@staticmethod
def create_data(X: dt.Frame = None) -> Union[str, List[str],
dt.Frame, List[dt.Frame],
np.ndarray, List[np.ndarray],
pd.DataFrame, List[pd.DataFrame]]:
# import packages
import os
import gc
from h2oaicore.systemutils_more import download
from h2oaicore.systemutils import config
import zipfile
# define constants
train_data_url = "https://files.slack.com/files-pri/T0329MHH6-F012UF3T2J0/download/bosch_train_full.zip?pub_secret=c59d0f381a"
test_data_url = "https://files.slack.com/files-pri/T0329MHH6-F013ES4F6N4/download/bosch_test_full.zip?pub_secret=8726e8b7e2"
# function for unzipping data
def extract_zip(file, output_directory):
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(output_directory)
# download and unzip files
temp_path = os.path.join(config.data_directory, "recipe_tmp", "bosch")
os.makedirs(temp_path, exist_ok=True)
for link in [train_data_url, test_data_url]:
raw_file = download(link, dest_path=temp_path)
extract_zip(raw_file, temp_path)
# parse with datatable
train_path = os.path.join(temp_path, "bosch_train_full.csv")
test_path = os.path.join(temp_path, "bosch_test_full.csv")
X_train = dt.fread(train_path)
X_test = dt.fread(test_path)
# add leak features
train = X_train[:, ["Id", "Response"]].to_pandas()
test = X_test[:, ["Id"]].to_pandas()
date_features = [colname for colname in X_test.names if "D" in colname]
train["Min_Date"] = X_train[:, date_features].to_pandas().min(axis=1).values
test["Min_Date"] = X_test[:, date_features].to_pandas().min(axis=1).values
ntrain = train.shape[0]
train_test = pd.concat([train, test]).reset_index(drop=True)
train_test.sort_values(by=["Min_Date", "Id"], ascending=True, inplace=True)
train_test["Leak_1"] = train_test["Id"].diff()
train_test["Leak_2"] = train_test["Id"].iloc[::-1].diff()
train_test["Leak_3"] = train_test["Response"].shift(1)
train_test["Leak_4"] = train_test["Response"].shift(-1)
train_test = dt.Frame(train_test.drop("Response", axis=1))
train_test.key = "Id"
X_train = X_train[:, :, dt.join(train_test)]
X_test = X_test[:, :, dt.join(train_test)]
return {"bosch_train_leak": X_train, "bosch_test_leak": X_test}
|
{"hexsha": "8b8f30eb7ad1ad1fcc4e3e609ab5312a95fe90ea", "size": 3197, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/kaggle_bosch.py", "max_stars_repo_name": "james94/driverlessai-recipes", "max_stars_repo_head_hexsha": "87c35460db59ffda8dc18ad82cb3a9b8291410e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 194, "max_stars_repo_stars_event_min_datetime": "2019-04-23T10:25:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T04:19:28.000Z", "max_issues_repo_path": "data/kaggle_bosch.py", "max_issues_repo_name": "james94/driverlessai-recipes", "max_issues_repo_head_hexsha": "87c35460db59ffda8dc18ad82cb3a9b8291410e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2019-06-24T20:17:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T20:05:37.000Z", "max_forks_repo_path": "data/kaggle_bosch.py", "max_forks_repo_name": "james94/driverlessai-recipes", "max_forks_repo_head_hexsha": "87c35460db59ffda8dc18ad82cb3a9b8291410e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 85, "max_forks_repo_forks_event_min_datetime": "2019-03-27T12:26:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T13:15:37.000Z", "avg_line_length": 38.5180722892, "max_line_length": 134, "alphanum_fraction": 0.6349702846, "include": true, "reason": "import numpy", "num_tokens": 791}
|
from namsa import SupercellBuilder, MSAGPU
from utils import *
import numpy as np
from time import time
import sys, os, re
import h5py
from mpi4py import MPI
from itertools import chain
import tensorflow as tf
import lmdb
comm = MPI.COMM_WORLD
comm_size = comm.Get_size()
comm_rank = comm.Get_rank()
def simulate(filehandle, cif_path, idx= None, gpu_id=0, clean_up=False):
# load cif and get sim params
spgroup_num, matname = parse_cif_path(cif_path)
index = 1
sp = SupercellBuilder(cif_path, verbose=False, debug=False)
t = time()
sim_params = get_sim_params(sp)
#if comm_rank == 0:
print('time to get params: %2.3f' %(time() - t))
z_dir = sim_params['z_dirs'][index]
y_dir = sim_params['y_dirs'][index]
cell_dim = sim_params['cell_dim']
slab_t = sim_params['slab_t']
sim_params['space_group']= spgroup_num
sim_params['material'] = matname
# build supercell
t = time()
sp.build_unit_cell()
sp.make_orthogonal_supercell(supercell_size=np.array([cell_dim,cell_dim,slab_t]),
projec_1=y_dir, projec_2=z_dir)
print('time to build cell : %2.3f' %(time() - t))
# set simulation params
slice_thickness = sim_params['d_hkl'][index]
energy = sim_params['energy']
semi_angle= sim_params['semi_angles'][index]
probe_params = sim_params['probe_params']
sampling = sim_params['sampling']
grid_steps = sim_params['grid_steps']
# simulate
t = time()
msa = MSAGPU(energy, semi_angle, sp.supercell_sites, sampling=sampling,
verbose=False, debug=False)
ctx = msa.setup_device(gpu_rank=gpu_id)
msa.calc_atomic_potentials()
msa.build_potential_slices(slice_thickness)
msa.build_probe(probe_dict=probe_params)
msa.generate_probe_positions(grid_steps=grid_steps)
#if comm_rank == 0:
print('time to create context + other sims inputs : %2.3f' % (time() - t))
t = time()
msa.plan_simulation()
msa.multislice()
#if comm_rank == 0:
print('time to simulate cbed : %2.3f' %(time() - t))
# process cbed and potential
mask = msa.bandwidth_limit_mask(sampling, radius=1./3).astype(np.bool)
proj_potential = process_potential(msa.potential_slices, mask=mask, normalize=True, fp16=True)
cbed = process_cbed(msa.probes, normalize=True, fp16=True)
# update sim_params dict
sim_params = update_sim_params(sim_params, msa_cls=msa, sp_cls=sp)
has_nan = np.all(np.isnan(cbed)) or np.all(np.isnan(proj_potential))
wrong_shape = cbed.shape != (1024, 512, 512) or proj_potential.shape != (1, 512, 512)
if has_nan or wrong_shape:
# clean-up context and/or allocated memory
#print('rank=%d, found this many %d nan in cbed' %(comm_rank, np.where(np.isnan(cbed)==True)[0].size))
#print('rank=%d, found this many %d nan in proj_pot' %(comm_rank, np.where(np.isnan(proj_potential)==True)[0].size))
#print('rank=%d, found this many %d nan in raw cbed' %(comm_rank, np.where(np.isnan(msa.probes)==True)[0].size))
if clean_up and ctx is not None:
msa.clean_up(ctx=ctx, vars=msa.vars)
else:
msa.clean_up(ctx=None, vars=msa.vars)
return False
else:
# write to h5 / tfrecords / lmdb
if isinstance(filehandle, h5py.Group):
write_h5(filehandle, cbed, proj_potential, sim_params)
elif isinstance(filehandle, lmdb.Transaction):
#write_lmdb(filehandle, idx + index, cbed, proj_potential, sim_params)
write_lmdb(filehandle, idx , cbed, proj_potential, sim_params)
elif isinstance(filehandle, tf.python_io.TFRecordWriter):
write_tfrecord(filehandle, cbed, proj_potential, sim_params)
# clean-up context and/or allocated memory
if clean_up and ctx is not None:
msa.clean_up(ctx=ctx, vars=msa.vars)
else:
msa.clean_up(ctx=None, vars=msa.vars)
return True
def main(cifdir_path, outdir_path, save_mode="h5"):
t = time()
cifpaths = get_cif_paths(cifdir_path)
batch_num, _ = np.divmod(comm_rank, 6)
num_sims = cifpaths.size
num_sims = comm_size * 2
if save_mode == "h5":
# HDF5
h5path = os.path.join(outdir_path, 'batch_%d.h5'% comm_rank)
if os.path.exists(h5path):
mode ='r+'
else:
mode ='w'
with h5py.File(h5path, mode=mode) as f:
for (idx, cif_path) in enumerate(cifpaths[comm_rank:num_sims:comm_size]):
manual = idx < ( num_sims - comm_size)
spgroup_num, matname = parse_cif_path(cif_path)
try:
h5g = f.create_group(matname)
except Exception as e:
print("rank=%d" % comm_rank, e, "group=%s exists" % matname)
h5g = f[matname]
if comm_rank == 0 and bool(idx % 500):
print('time=%3.2f, num_sims= %d' %(time() - t, idx * comm_size))
try:
simulate(h5g, cif_path, gpu_id=int(np.mod(comm_rank, 6)), clean_up=manual)
print('rank=%d, finished simulation=%s' % (comm_rank, cif_path))
except Exception as e:
print("rank=%d, skipped simulation=%s, error=%s" % (comm_rank, cif_path, format(e)))
# TFRECORDS
elif save_mode == "tfrecord":
tfrecpath = os.path.join(outdir_path, 'batch_%d.tfrecords'% comm_rank)
with tf.python_io.TFRecordWriter(tfrecpath) as tfrec:
for (idx, cif_path) in enumerate(cifpaths[comm_rank:num_sims:comm_size]):
manual = idx < ( num_sims - comm_size)
spgroup_num, matname = parse_cif_path(cif_path)
if comm_rank == 0 and bool(idx % 500):
print('time=%3.2f, num_sims= %d' %(time() - t, idx * comm_size))
try:
status = simulate(tfrec, cif_path, gpu_id=int(np.mod(comm_rank, 6)), clean_up=manual)
if status:
print('rank=%d, finished simulation=%s' % (comm_rank, cif_path))
else:
print("rank=%d, skipped simulation=%s, error=NaN" % (comm_rank, cif_path))
except Exception as e:
print("rank=%d, skipped simulation=%s, error=%s" % (comm_rank, cif_path, format(e)))
# LMDB
elif save_mode == "lmdb":
lmdbpath = os.path.join(outdir_path, 'batch_eval_%d.db' % comm_rank)
env = lmdb.open(lmdbpath, map_size=int(100e9), map_async=True, writemap=True, create=True) # max of 100 GB
with env.begin(write=True) as txn:
fail = 0
for (idx, cif_path) in enumerate(cifpaths[comm_rank:num_sims:comm_size]):
manual = idx < ( num_sims - comm_size)
spgroup_num, matname = parse_cif_path(cif_path)
if comm_rank == 0 and bool(idx % 500):
print('time=%3.2f, num_sims= %d' %(time() - t, idx * comm_size))
try:
status = simulate(txn, cif_path, idx=idx-fail, gpu_id=int(np.mod(comm_rank, 6)), clean_up=manual)
if status:
print('rank=%d, finished simulation=%s' % (comm_rank, cif_path))
env.sync()
else:
print("rank=%d, skipped simulation=%s, error=NaN" % (comm_rank, cif_path))
fail += 1
except Exception as e:
print("rank=%d, skipped simulation=%s, error=%s" % (comm_rank, cif_path, format(e)))
fail += 1
# write lmdb headers
headers = {b"input_dtype": bytes('float16', "ascii"),
b"input_shape": np.array([1024,512,512]).tostring(),
b"output_shape": np.array([1,512,512]).tostring(),
b"output_dtype": bytes('float16', "ascii")}
for key, val in headers.items():
txn.put(key, val)
env.sync()
#comm.Barrier()
# time the simulation run
sim_t = time() - t
if comm_rank == 0:
print("took %3.3f seconds" % sim_t)
def main_test(cifdir_path):
cifpaths_train, cifpaths_test= get_cif_paths(cifdir_path, ratio=0.2)
print("train", cifpaths_train[:10])
print("test", cifpaths_test[:10])
if __name__ == "__main__":
if len(sys.argv) > 2:
cifdir_path, outdir_path, save_mode = sys.argv[-3:]
if save_mode not in ["h5", "tfrecord", "lmdb"]:
print("saving format not of h5, tfrecord, lmdb")
sys.exit()
main(cifdir_path, outdir_path, save_mode)
elif len(sys.argv) == 2:
cifdir_path = sys.argv[-1]
main_test(cifdir_path)
else:
print("Pass directory paths for sim input files and h5 output files")
|
{"hexsha": "ee290adbdddba8ab657bbbc3c16c8cd2ff7bb20b", "size": 9007, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/summit_scripts/sim_batch_debug.py", "max_stars_repo_name": "nlaanait/namsa", "max_stars_repo_head_hexsha": "55f82ecf1c82601fcb81815d5e60705506c01e1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/summit_scripts/sim_batch_debug.py", "max_issues_repo_name": "nlaanait/namsa", "max_issues_repo_head_hexsha": "55f82ecf1c82601fcb81815d5e60705506c01e1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/summit_scripts/sim_batch_debug.py", "max_forks_repo_name": "nlaanait/namsa", "max_forks_repo_head_hexsha": "55f82ecf1c82601fcb81815d5e60705506c01e1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9365853659, "max_line_length": 124, "alphanum_fraction": 0.5899855668, "include": true, "reason": "import numpy", "num_tokens": 2395}
|
'''
Created on Mar 25, 2018
@author: ywz
'''
import numpy, random, os
import tensorflow as tf
from replay_memory import ReplayMemory
from optimizer import Optimizer
from q_network import QNetwork
class DQN:
def __init__(self, config, game, directory, callback=None, summary_writer=None):
self.game = game
self.actions = game.get_available_actions()
self.feedback_size = game.get_feedback_size()
self.callback = callback
self.summary_writer = summary_writer
self.config = config
self.batch_size = config['batch_size']
self.n_episode = config['num_episode']
self.capacity = config['capacity']
self.epsilon_decay = config['epsilon_decay']
self.epsilon_min = config['epsilon_min']
self.num_frames = config['num_frames']
self.num_nullops = config['num_nullops']
self.time_between_two_copies = config['time_between_two_copies']
self.input_scale = config['input_scale']
self.update_interval = config['update_interval']
self.directory = directory
self._init_modules()
def _init_modules(self):
# Replay memory
self.replay_memory = ReplayMemory(history_len=self.num_frames,
capacity=self.capacity,
batch_size=self.batch_size,
input_scale=self.input_scale)
input_shape = self.feedback_size + (self.num_frames,)
# Q-network
self.q_network = QNetwork(input_shape=input_shape, n_outputs=len(self.actions),
network_type=self.config['network_type'], scope='q_network')
# Target network
self.target_network = QNetwork(input_shape=input_shape, n_outputs=len(self.actions),
network_type=self.config['network_type'], scope='target_network')
# Optimizer
self.optimizer = Optimizer(config=self.config,
feedback_size=self.feedback_size,
q_network=self.q_network,
target_network=self.target_network,
replay_memory=self.replay_memory)
# Ops for updating target network
self.clone_op = self.target_network.get_clone_op(self.q_network)
# For tensorboard
self.t_score = tf.placeholder(dtype=tf.float32, shape=[], name='new_score')
tf.summary.scalar("score", self.t_score, collections=['dqn'])
self.summary_op = tf.summary.merge_all('dqn')
def set_summary_writer(self, summary_writer=None):
self.summary_writer = summary_writer
self.optimizer.set_summary_writer(summary_writer)
def choose_action(self, sess, state, epsilon_greedy):
if numpy.random.binomial(1, epsilon_greedy) == 1:
action = random.choice(self.actions)
else:
x = numpy.asarray(numpy.expand_dims(state, axis=0) / self.input_scale, dtype=numpy.float32)
action = self.q_network.get_q_action(sess, x)[0]
return action
def play(self, action):
r, new_state, termination = self.game.play_action(action)
return r, new_state, termination
def update_target_network(self, sess):
sess.run(self.clone_op)
def train(self, sess, saver=None):
num_of_trials = -1
for episode in range(self.n_episode):
self.game.reset()
frame = self.game.get_current_feedback()
for _ in range(self.num_nullops):
r, new_frame, termination = self.play(action=0)
self.replay_memory.add(frame, 0, r, termination)
frame = new_frame
for _ in range(self.config['T']):
num_of_trials += 1
epsilon_greedy = self.epsilon_min + \
max(self.epsilon_decay - num_of_trials, 0) / \
self.epsilon_decay * (1 - self.epsilon_min)
print("epi {}, frame {}k: reward {}, eps {}".format(episode,
int(num_of_trials / 1000),
self.game.get_total_reward(),
epsilon_greedy))
if num_of_trials % self.update_interval == 0:
self.optimizer.train_one_step(sess, num_of_trials, self.batch_size)
state = self.replay_memory.phi(frame)
action = self.choose_action(sess, state, epsilon_greedy)
r, new_frame, termination = self.play(action)
self.replay_memory.add(frame, action, r, termination)
frame = new_frame
if num_of_trials % self.time_between_two_copies == 0:
self.update_target_network(sess)
self.save(sess, saver)
if self.callback:
self.callback()
if termination:
score = self.game.get_total_reward()
summary_str = sess.run(self.summary_op, feed_dict={self.t_score: score})
self.summary_writer.add_summary(summary_str, num_of_trials)
self.summary_writer.flush()
break
def evaluate(self, sess):
for episode in range(self.n_episode):
self.game.reset()
frame = self.game.get_current_feedback()
for _ in range(self.num_nullops):
r, new_frame, termination = self.play(action=0)
self.replay_memory.add(frame, 0, r, termination)
frame = new_frame
for _ in range(self.config['T']):
print("episode {}, total reward {}".format(episode,
self.game.get_total_reward()))
state = self.replay_memory.phi(frame)
action = self.choose_action(sess, state, self.epsilon_min)
r, new_frame, termination = self.play(action)
self.replay_memory.add(frame, action, r, termination)
frame = new_frame
if self.callback:
self.callback()
if termination:
break
def save(self, sess, saver, model_name='model.ckpt'):
if saver:
try:
checkpoint_path = os.path.join(self.directory, model_name)
saver.save(sess, checkpoint_path)
except:
pass
def load(self, sess, saver, model_name='model.ckpt'):
if saver:
try:
checkpoint_path = os.path.join(self.directory, model_name)
saver.restore(sess, checkpoint_path)
except:
pass
|
{"hexsha": "eb040524dd701753ecb85a8cb27b48e79ca1c843", "size": 7186, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter03/q_learning.py", "max_stars_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_stars_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 114, "max_stars_repo_stars_event_min_datetime": "2018-10-20T15:32:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T14:16:25.000Z", "max_issues_repo_path": "Chapter03/q_learning.py", "max_issues_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_issues_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-10-18T12:39:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T03:28:19.000Z", "max_forks_repo_path": "Chapter03/q_learning.py", "max_forks_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_forks_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 72, "max_forks_repo_forks_event_min_datetime": "2018-10-12T13:02:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T13:03:26.000Z", "avg_line_length": 42.7738095238, "max_line_length": 104, "alphanum_fraction": 0.5423044809, "include": true, "reason": "import numpy", "num_tokens": 1322}
|
using Images, MXNet
### LOADING THE MODEL
const MODEL_NAME = "weights/mobilenet-v2/mobilenet_v2"
const MODEL_CLASS_NAMES = "weights/mobilenet-v2/synset.txt"
nnet = mx.load_checkpoint(MODEL_NAME, 0, mx.FeedForward; context = mx.gpu());
synset = readlines(MODEL_CLASS_NAMES);
### SEARCH FOR A LAYER OF INTERESTS
layers = mx.get_internals(nnet.arch);
layers_flatten = nothing
layers_to_remove = Symbol[]
# We iterate over all layers until we find the one matching our requirements
# and remove the ones to follow after
for i = 1:2000
layer = layers[i];
layer_name = mx.get_name(layer)
if layers_flatten == nothing && layer_name == :pool6
layers_flatten = layer
elseif layers_flatten != nothing
push!(layers_to_remove, layer_name)
if layer_name in [:softmax, :label, :prob] break end
end
end
nnet.arch = @mx.chain layers_flatten => Flatten()
map(x -> delete!(nnet.arg_params, x), layers_to_remove);
map(x -> delete!(nnet.aux_params, x), layers_to_remove);
mx.save_checkpoint(nnet, "weights/mobilenet-v2/MobiletNet-FE", mx.OptimizationState(1, 0, 0, 0))
|
{"hexsha": "4ceba957e133dced80b820e4fbc36af2fcb317c6", "size": 1104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Chapter09/4_loading_mobilenetv2.jl", "max_stars_repo_name": "tjburch/Hands-On-Computer-Vision-with-Julia", "max_stars_repo_head_hexsha": "bf1008087e9c5427ee37e6ef33bac07979cf8854", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2018-07-10T16:45:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T23:30:03.000Z", "max_issues_repo_path": "Chapter09/4_loading_mobilenetv2.jl", "max_issues_repo_name": "tjburch/Hands-On-Computer-Vision-with-Julia", "max_issues_repo_head_hexsha": "bf1008087e9c5427ee37e6ef33bac07979cf8854", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-07-12T15:14:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-04T18:32:44.000Z", "max_forks_repo_path": "Chapter09/4_loading_mobilenetv2.jl", "max_forks_repo_name": "tjburch/Hands-On-Computer-Vision-with-Julia", "max_forks_repo_head_hexsha": "bf1008087e9c5427ee37e6ef33bac07979cf8854", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2018-03-16T10:28:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T18:39:50.000Z", "avg_line_length": 32.4705882353, "max_line_length": 96, "alphanum_fraction": 0.7237318841, "num_tokens": 312}
|
# This file is a part of ValueShapes.jl, licensed under the MIT License (MIT).
"""
ReshapedDist <: Distribution
An multivariate distribution reshaped using a given
[`AbstractValueShape`](@ref).
Constructors:
```julia
ReshapedDist(dist::MultivariateDistribution, shape::AbstractValueShape)
```
In addition, `MultivariateDistribution`s can be reshaped via
```julia
(shape::AbstractValueShape)(dist::MultivariateDistribution)
```
with the difference that
```julia
(shape::ArrayShape{T,1})(dist::MultivariateDistribution)
```
will return the original `dist` instead of a `ReshapedDist`.
"""
struct ReshapedDist{
VF <: VariateForm,
VS <: ValueSupport,
D <: Distribution{Multivariate,VS},
S <: AbstractValueShape
} <: Distribution{VF,VS}
dist::D
shape::S
end
export ReshapedDist
_variate_form(shape::ScalarShape) = Univariate
_variate_form(shape::ArrayShape{T,1}) where T = Multivariate
_variate_form(shape::ArrayShape{T,2}) where T = Matrixvariate
_variate_form(shape::NamedTupleShape{names}) where names = NamedTupleVariate{names}
_with_zeroconst(shape::AbstractValueShape) = replace_const_shapes(const_zero_shape, shape)
function ReshapedDist(dist::MultivariateDistribution{VS}, shape::AbstractValueShape) where {VS}
@argcheck totalndof(varshape(dist)) == totalndof(shape)
VF = _variate_form(shape)
D = typeof(dist)
S = typeof(shape)
ReshapedDist{VF,VS,D,S}(dist, shape)
end
(shape::AbstractValueShape)(dist::MultivariateDistribution) = ReshapedDist(dist, shape)
function (shape::ArrayShape{T,1})(dist::MultivariateDistribution) where T
@argcheck totalndof(varshape(dist)) == totalndof(shape)
dist
end
function (shape::ArrayShape{T,2})(dist::MultivariateDistribution) where T
MatrixReshaped(dist, size(shape)...)
end
@inline varshape(rd::ReshapedDist) = rd.shape
@inline unshaped(rd::ReshapedDist) = rd.dist
Random.rand(rng::AbstractRNG, rd::ReshapedDist{Univariate}) = stripscalar(varshape(rd)(rand(rng, unshaped(rd))))
function Distributions._rand!(rng::AbstractRNG, rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real})
Distributions._rand!(rng, unshaped(rd), x)
end
function Distributions._rand!(rng::AbstractRNG, rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real})
Distributions._rand!(rng, MatrixReshaped(unshaped(rd), size(rd)...), x)
end
Base.length(rd::ReshapedDist{<:Multivariate}) = size(varshape(rd))[1]
Base.size(rd::ReshapedDist{<:Matrixvariate}) = size(varshape(rd))
Statistics.mean(rd::ReshapedDist) = stripscalar(varshape(rd)(mean(unshaped(rd))))
StatsBase.mode(rd::ReshapedDist) = stripscalar(varshape(rd)(mode(unshaped(rd))))
Statistics.var(rd::ReshapedDist) = stripscalar(_with_zeroconst(varshape(rd))(var(unshaped(rd))))
Statistics.cov(rd::ReshapedDist{Multivariate}) = cov(unshaped(rd))
Distributions.pdf(rd::ReshapedDist{Univariate}, x::Real) = pdf(unshaped(rd), unshaped(x))
Distributions._pdf(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = pdf(unshaped(rd), x)
Distributions._pdf(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = pdf(MatrixReshaped(unshaped(rd), size(rd)...), x)
Distributions.logpdf(rd::ReshapedDist{Univariate}, x::Real) = logpdf(unshaped(rd), unshaped(x))
Distributions._logpdf(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = logpdf(unshaped(rd), x)
Distributions._logpdf(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = logpdf(MatrixReshaped(unshaped(rd), size(rd)...), x)
Distributions.insupport(rd::ReshapedDist{Univariate}, x::Real) = insupport(unshaped(rd), unshaped(x))
Distributions.insupport(rd::ReshapedDist{Multivariate}, x::AbstractVector{<:Real}) = insupport(unshaped(rd), x)
Distributions.insupport(rd::ReshapedDist{Matrixvariate}, x::AbstractMatrix{<:Real}) = insupport(MatrixReshaped(unshaped(rd), size(rd)...), x)
|
{"hexsha": "8f25d998c9291595ff2df71f9a6bf5f4b30e59da", "size": 3850, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/reshaped_dist.jl", "max_stars_repo_name": "sthayashi/ValueShapes.jl", "max_stars_repo_head_hexsha": "f87a92d261a0889e8efef0641bd49c626bf7c02c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-10-21T13:21:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T14:51:00.000Z", "max_issues_repo_path": "src/reshaped_dist.jl", "max_issues_repo_name": "oschulz/ValueShapes.jl", "max_issues_repo_head_hexsha": "3372750ae320f7904f2cd0ef236b45873360371c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2019-12-12T08:45:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T01:22:21.000Z", "max_forks_repo_path": "src/reshaped_dist.jl", "max_forks_repo_name": "tldrma/ValueShapes.jl", "max_forks_repo_head_hexsha": "f87a92d261a0889e8efef0641bd49c626bf7c02c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-12-20T22:29:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T15:34:17.000Z", "avg_line_length": 33.7719298246, "max_line_length": 141, "alphanum_fraction": 0.7493506494, "num_tokens": 1088}
|
module JuliaCommunityStatistics
using GitHub
using ProgressMeter
using Dates
using DataFrames
import GitHub: name
export jlrepo, auth
const auth = authenticate(ENV["GH_AUTH"])
const jlrepo = repo("JuliaLang/julia"; auth=auth)
export get_all_prs
function get_all_prs(;state="all")
prs = PullRequest[]
@showprogress 1 "Fetching... " for iP = 1:1000
thisprs, _ = pull_requests(jlrepo; auth=auth, page_limit=1, params = Dict("state"=>state, "page"=>iP))
!isempty(thisprs) || break;
prs = append!(prs, thisprs)
end
prs
end
function get_pr_info_df(pr)
prdf = pr_to_df(pr)
cs, _ = commits(jlrepo, pr;auth=auth)
cfs = pull_request_files(jlrepo, pr;auth=auth)
cdfs = commit_to_df.(cs)
cfdfs = changed_file_to_df.(cfs)
insertcols!(prdf, :commits=>[cdfs], :changed_files=>[cfdfs])
end
export get_all_pr_info_df
function get_all_pr_info_df(prs)
df = DataFrame()
@showprogress 1 "Fetching... " for pr in prs
try
prdf = get_pr_info_df(pr)
df = vcat(df, prdf)
catch
sleep_till_reset()
prdf = get_pr_info_df(pr)
df = vcat(df, prdf)
end
end
df
end
export get_commits
function get_commits(prs)
prd = Dict()
@showprogress 1 "Fetching... " for iPR in prs
try
prd[iPR], _ = commits(jlrepo, pr;auth=auth)
catch
sleep_till_reset()
prd[iPR], _ = commits(jlrepo, pr;auth=auth)
end
end
prd
end
export get_changed_files
function get_changed_files(prs)
prd = Dict()
@showprogress 1 "Fetching... " for iPR in prs
try
prd[iPR] = pull_request_files(jlrepo, iPR;auth=auth)
catch
sleep_till_reset()
prd[iPR] = pull_request_files(jlrepo, iPR;auth=auth)
end
end
prd
end
export sleep_till_reset
function sleep_till_reset()
rate_lim = rate_limit(auth=auth)["rate"]
if rate_lim["remaining"] > 0
return
end
reset_time = Dates.unix2datetime(rate_lim["reset"])
sleeptime = Dates.Millisecond(reset_time - now()) + Dates.Millisecond(2000)
println("Sleeping for $(sleeptime)")
sleep(sleeptime)
end
export pr_to_df
function pr_to_df(pr)
DataFrame(
number = pr.number,
state = pr.state,
owner = name(pr.user),
created_at = pr.created_at,
closed_at = pr.closed_at,
updated_at = pr.updated_at,
merged_at = pr.merged_at,
base = name(pr.base),
head = name(pr.head),
merge_commit = pr.merge_commit_sha
)
end
export commit_to_df
function commit_to_df(c)
DataFrame(
sha = c.sha,
author = name(c.author)
)
end
export changed_file_to_df
function changed_file_to_df(cf)
DataFrame(
file = cf.filename,
status = cf.status,
changes = cf.changes,
additions = cf.additions,
deletions = cf.deletions
)
end
end
|
{"hexsha": "6c0bf29329396d00ce946e3bb3536cd4c9139fe1", "size": 2972, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/JuliaCommunityStatistics.jl", "max_stars_repo_name": "rick2047/JuliaCommunityStatistics.jl", "max_stars_repo_head_hexsha": "3d42d1a9aab5af7d044fd4795223eb0f93bb0ac2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/JuliaCommunityStatistics.jl", "max_issues_repo_name": "rick2047/JuliaCommunityStatistics.jl", "max_issues_repo_head_hexsha": "3d42d1a9aab5af7d044fd4795223eb0f93bb0ac2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/JuliaCommunityStatistics.jl", "max_forks_repo_name": "rick2047/JuliaCommunityStatistics.jl", "max_forks_repo_head_hexsha": "3d42d1a9aab5af7d044fd4795223eb0f93bb0ac2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.776, "max_line_length": 110, "alphanum_fraction": 0.6258411844, "num_tokens": 830}
|
# python correlation_cm_sh.py NUM_GROUPS colormap
# python correlation_cm_sh.py 50 hot
import setproctitle
setproctitle.setproctitle("covid-19-vac@chenlin")
import sys
import os
import datetime
import pandas as pd
import numpy as np
import constants
import functions
import pdb
from sklearn.preprocessing import KBinsDiscretizer
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
############################################################
# Constants
root = '/data/chenlin/COVID-19/Data' #dl3
#root = '/home/chenlin/COVID-19/Data' #rl4
# timestring: specify the model
timestring = '20210206'
print('timestring: ',timestring)
############################################################
# Main variable settings
demo_policy_list = ['Age_Flood', 'Income_Flood', 'JUE_EW_Flood']
# Number of groups for quantization
NUM_GROUPS = int(sys.argv[1])
print('NUM_GROUPS: ',NUM_GROUPS)
# Color map for scatter plot
colormap = sys.argv[2]
print('Color map:', colormap)
############################################################
# Functions
# Compute the average features of a group of CBGs.
def get_avg_feat(cbg_list, data_df, feat_str):
values = []
weights = []
for cbg in cbg_list:
values.append(data_df.iloc[cbg][feat_str])
weights.append(data_df.iloc[cbg]['Sum'])
return np.average(np.array(values),weights=weights)
# Scatter plot with density
def scatter_kde(df, col_x, col_y, savepath, colormap='Spectral_r'):
plt.figure()
# Calculate the point density
xystack = np.vstack([df[col_x],df[col_y]])
z = gaussian_kde(xystack)(xystack)
# Sort the points by density, so that the densest points are plotted last
idx = z.argsort()
x, y, z = df[col_x][idx], df[col_y][idx], z[idx]
#plt.scatter(x, y, c=z, s=20,cmap='Spectral_r')
plt.scatter(x, y, c=z, s=20,cmap=colormap)
plt.colorbar()
if(col_x=='Elder_Ratio'):
label_x = 'Percentage of older adults' #'Elderly rate' #'Ranking of elderly rate'
elif(col_x=='Vulnerability'):
label_x = 'Community risk' #'Individual vulnerability'
elif(col_x=='Mean_Household_Income'):
label_x = 'Average household income'#'Ranking of average household income'
else:
label_x = col_x
if(col_y=='Elder_Ratio'):
label_y = 'Elderly rate'
elif(col_y=='Essential_Worker_Ratio'):
label_y = 'Percentage of essential workers' #'Essential worker rate'
elif(col_y=='Mean_Household_Income'):
label_y = 'Average household income'
elif(col_y=='Damage'):
label_y = 'Societal harm' #'Social damage'
else:
label_y = col_y
plt.xlabel(label_x.replace("_", " "),fontsize=17)
plt.ylabel(label_y.replace("_", " "),fontsize=17)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
#plt.text(0.4, 0.9, 'Corr: %s'%(np.round(df[col_x].corr(df[col_y]), 2)), fontsize=18)
plt.savefig(savepath,bbox_inches = 'tight')
print('Figure saved. Path: ', savepath)
############################################################
# Load Data
# Load ACS Data for matching with NYT Data
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# cbg_c24.csv: Occupation
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_c24.csv")
cbg_occupation = pd.read_csv(filepath)
# Load ACS 5-year (2013-2017) Data: Mean Household Income
filepath = os.path.join(root,"ACS_5years_Income_Filtered_Summary.csv")
cbg_income = pd.read_csv(filepath)
# Drop duplicate column 'Unnamed:0'
cbg_income.drop(['Unnamed: 0'],axis=1, inplace=True)
data = pd.DataFrame()
msa_count = 0
vd_corr_list = []
for msa_idx in range(len(constants.MSA_NAME_LIST)):
MSA_NAME = constants.MSA_NAME_LIST[msa_idx]
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_NAME]
if(MSA_NAME=='NewYorkCity'):continue
print('\nMSA_NAME: ',MSA_NAME)
# Extract data specific to one msa, according to ACS data
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values)
# Load CBG ids belonging to a specific metro area
cbg_ids_msa = pd.read_csv(os.path.join(root,MSA_NAME,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
M = len(cbg_ids_msa)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(M)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
#print('Number of CBGs in this metro area:', M)
# Select counties belonging to the MSA
y = []
for i in x:
if((len(i)==12) & (int(i[0:5])in good_list)):
y.append(x[i])
if((len(i)==11) & (int(i[0:4])in good_list)):
y.append(x[i])
idxs_msa_all = list(x.values())
idxs_msa_nyt = y
#print('Number of CBGs in this metro area:', len(idxs_msa_all))
#print('Number of CBGs in to compare with NYT data:', len(idxs_msa_nyt))
# Load ACS Data for MSA-county matching
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa = pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left')
# Add up males and females of the same age, according to the detailed age list (DETAILED_AGE_LIST)
# which is defined in Constants.py
for i in range(3,25+1): # 'B01001e3'~'B01001e25'
male_column = 'B01001e'+str(i)
female_column = 'B01001e'+str(i+24)
cbg_age_msa[constants.DETAILED_AGE_LIST[i-3]] = cbg_age_msa.apply(lambda x : x[male_column]+x[female_column],axis=1)
# Rename
cbg_age_msa.rename(columns={'B01001e1':'Sum'},inplace=True)
# Extract columns of interest
columns_of_interest = ['census_block_group','Sum'] + constants.DETAILED_AGE_LIST
cbg_age_msa = cbg_age_msa[columns_of_interest].copy()
# Deal with CBGs with 0 populations
cbg_age_msa['Sum'] = cbg_age_msa['Sum'].apply(lambda x : x if x!=0 else 1)
# Calculate elder ratios
cbg_age_msa['Elder_Absolute'] = cbg_age_msa.apply(lambda x : x['70 To 74 Years']+x['75 To 79 Years']+x['80 To 84 Years']+x['85 Years And Over'],axis=1)
cbg_age_msa['Elder_Ratio'] = cbg_age_msa['Elder_Absolute'] / cbg_age_msa['Sum']
# Obtain cbg sizes (populations)
cbg_sizes = cbg_age_msa['Sum'].values
cbg_sizes = np.array(cbg_sizes,dtype='int32')
print('Total population: ',np.sum(cbg_sizes))
# Load other Safegraph demographic data
# Extract pois corresponding to the metro area, by merging dataframes
cbg_occupation_msa = pd.merge(cbg_ids_msa, cbg_occupation, on='census_block_group', how='left')
columns_of_essential_workers = list(constants.ew_rate_dict.keys())
for column in columns_of_essential_workers:
cbg_occupation_msa[column] = cbg_occupation_msa[column].apply(lambda x : x*constants.ew_rate_dict[column])
cbg_occupation_msa['Essential_Worker_Absolute'] = cbg_occupation_msa.apply(lambda x : x[columns_of_essential_workers].sum(), axis=1)
cbg_occupation_msa['Sum'] = cbg_age_msa['Sum']
cbg_occupation_msa['Essential_Worker_Ratio'] = cbg_occupation_msa['Essential_Worker_Absolute'] / cbg_occupation_msa['Sum']
columns_of_interest = ['census_block_group','Sum','Essential_Worker_Absolute','Essential_Worker_Ratio']
cbg_occupation_msa = cbg_occupation_msa[columns_of_interest].copy()
# Extract pois corresponding to the metro area (Philadelphia), by merging dataframes
cbg_income_msa = pd.merge(cbg_ids_msa, cbg_income, on='census_block_group', how='left')
# Add information of cbg populations, from cbg_age_Phi(cbg_b01.csv)
cbg_income_msa['Sum'] = cbg_age_msa['Sum'].copy()
# Rename
cbg_income_msa.rename(columns = {'total_households':'Total_Households',
'mean_household_income':'Mean_Household_Income'},inplace=True)
# Deal with NaN values
cbg_age_msa.fillna(0,inplace=True)
cbg_income_msa.fillna(0,inplace=True)
cbg_occupation_msa.fillna(0,inplace=True)
###############################################################################
# Load and scale age-aware CBG-specific attack/death rates (original)
cbg_death_rates_original = np.loadtxt(os.path.join(root, MSA_NAME, 'cbg_death_rates_original_'+MSA_NAME))
cbg_attack_rates_original = np.ones(cbg_death_rates_original.shape)
# Fix attack_scale
attack_scale = 1
cbg_attack_rates_scaled = cbg_attack_rates_original * attack_scale
# Scale death rates
cbg_death_rates_scaled = cbg_death_rates_original * constants.death_scale_dict[MSA_NAME]
cbg_age_msa['Death_Rate'] = cbg_death_rates_scaled
###############################################################################
# Obtain vulnerability and damage, according to theoretical analysis
nyt_included = np.zeros(len(idxs_msa_all))
for i in range(len(nyt_included)):
if(i in idxs_msa_nyt):
nyt_included[i] = 1
cbg_age_msa['NYT_Included'] = nyt_included.copy()
# Retrieve the attack rate for the whole MSA (home_beta, fitted for each MSA)
home_beta = constants.parameters_dict[MSA_NAME][1]
# Load cbg_avg_infect_same, cbg_avg_infect_diff
if(os.path.exists(os.path.join(root, '3cbg_avg_infect_same_%s.npy'%MSA_NAME))):
print('cbg_avg_infect_same, cbg_avg_infect_diff: Load existing file.')
cbg_avg_infect_same = np.load(os.path.join(root, '3cbg_avg_infect_same_%s.npy'%MSA_NAME))
cbg_avg_infect_diff = np.load(os.path.join(root, '3cbg_avg_infect_diff_%s.npy'%MSA_NAME))
else:
print('cbg_avg_infect_same, cbg_avg_infect_diff: File not found. Please check.')
pdb.set_trace()
#print('cbg_avg_infect_same.shape:',cbg_avg_infect_same.shape)
SEIR_at_30d = np.load(os.path.join(root, 'SEIR_at_30d.npy'),allow_pickle=True).item()
S_ratio = SEIR_at_30d[MSA_NAME]['S'] / (cbg_sizes.sum())
I_ratio = SEIR_at_30d[MSA_NAME]['I'] / (cbg_sizes.sum())
#print('S_ratio:',S_ratio,'I_ratio:',I_ratio)
# Deal with nan and inf (https://numpy.org/doc/stable/reference/generated/numpy.nan_to_num.html)
cbg_avg_infect_same = np.nan_to_num(cbg_avg_infect_same,nan=0,posinf=0,neginf=0)
cbg_avg_infect_diff = np.nan_to_num(cbg_avg_infect_diff,nan=0,posinf=0,neginf=0)
cbg_age_msa['Infect'] = cbg_avg_infect_same + cbg_avg_infect_diff
# Check whether there is NaN in cbg_tables
#print('Any NaN in cbg_age_msa[\'Infect\']?', cbg_age_msa['Infect'].isnull().any().any())
# Normalize by cbg population
cbg_avg_infect_same_norm = cbg_avg_infect_same / cbg_sizes
cbg_avg_infect_diff_norm = cbg_avg_infect_diff / cbg_sizes
cbg_avg_infect_all_norm = cbg_avg_infect_same_norm + cbg_avg_infect_diff_norm
# Compute the average death rate (alpha_bar) for the whole MSA: perform another weighted average over all CBGs
avg_death_rates_scaled = np.matmul(cbg_sizes.T, cbg_death_rates_scaled) / np.sum(cbg_sizes)
#print('avg_death_rates_scaled.shape:',avg_death_rates_scaled.shape) # shape: (), because it is a scalar
# Compute vulnerability and damage for each cbg
# New new method # 20210619
cbg_vulnerability = cbg_avg_infect_all_norm * cbg_death_rates_scaled
cbg_secondary_damage = cbg_avg_infect_all_norm * (cbg_avg_infect_all_norm*(S_ratio/I_ratio)) * avg_death_rates_scaled
cbg_damage = cbg_vulnerability + cbg_secondary_damage
cbg_age_msa['Vulnerability'] = cbg_vulnerability.copy()
cbg_age_msa['Damage'] = cbg_damage.copy()
cbg_age_msa['Secondary_Damage'] = cbg_secondary_damage.copy()
cbg_age_msa['Vulner_Rank'] = cbg_age_msa['Vulnerability'].rank(ascending=False,method='first')
cbg_age_msa['Damage_Rank'] = cbg_age_msa['Damage'].rank(ascending=False,method='first')
# Only those belonging to the MSA (according to nyt) is valid for vaccination.
# This is to prevent overlapping of CBGs across MSAs.
cbg_age_msa['Vulner_Rank'] = cbg_age_msa.apply(lambda x : x['Vulner_Rank'] if x['NYT_Included']==1 else M+1, axis=1)
cbg_age_msa['Vulner_Rank_New'] = cbg_age_msa['Vulner_Rank'].rank(ascending=True,method='first')
cbg_age_msa['Damage_Rank'] = cbg_age_msa.apply(lambda x : x['Damage_Rank'] if x['NYT_Included']==1 else M+1, axis=1)
cbg_age_msa['Damage_Rank_New'] = cbg_age_msa['Damage_Rank'].rank(ascending=True,method='first')
cbg_age_msa['Mobility'] = cbg_avg_infect_all_norm.copy()
#cbg_visits_all = np.loadtxt(os.path.join(root,MSA_NAME,'cbg_visits_all_%s' % MSA_NAME)) # 20210801
#cbg_visits_all = np.loadtxt(os.path.join(root,MSA_NAME,'cbg_visits_afterlockdown_%s' % MSA_NAME)) # Mobility after lockdown # 20210323
#cbg_age_msa['Mobility'] = cbg_visits_all / cbg_age_msa['Sum']
cbg_age_msa['Mobility'] = cbg_avg_infect_diff_norm.copy()
#print('Correlation between Elder_Ratio and Mobility: ', cbg_age_msa['Mobility'].corr(cbg_age_msa['Elder_Ratio']))
# No_Vaccination & Age_Agnostic, accumulated results # 20210802
deaths_cbg_no_vaccination = np.load(os.path.join(root,MSA_NAME,'20210206_deaths_cbg_no_vaccination_%s.npy'%MSA_NAME))
deaths_cbg_age_agnostic = np.load(os.path.join(root,MSA_NAME,'20210206_deaths_cbg_age_agnostic_%s.npy'%MSA_NAME))
# Collect data together
data_msa = pd.DataFrame()
data_msa['Elder_Ratio'] = cbg_age_msa['Elder_Ratio'].copy()
data_msa['Mean_Household_Income'] = cbg_income_msa['Mean_Household_Income'].copy()
data_msa['Essential_Worker_Ratio'] = cbg_occupation_msa['Essential_Worker_Ratio'].copy()
data_msa['Vulnerability'] = cbg_age_msa['Vulnerability'].copy()
data_msa['Damage'] = cbg_age_msa['Damage'].copy()
data_msa['Secondary_Damage'] = cbg_age_msa['Secondary_Damage'].copy()
data_msa['Diff'] = data_msa['Vulnerability'] - data_msa['Secondary_Damage']
data_msa['Mobility'] = cbg_age_msa['Mobility'].copy()
data_msa['Death_Rate_Age_Aware'] = deaths_cbg_no_vaccination[-1,:]/cbg_age_msa['Sum']
data_msa['Death_Rate_Age_Agnostic'] = deaths_cbg_age_agnostic[-1,:]/cbg_age_msa['Sum']
###############################################################################
# Ranking (dense, percentile)
data_msa['Elder_Ratio'] = data_msa['Elder_Ratio'].rank(method='dense',pct=True)
data_msa['Mean_Household_Income'] = data_msa['Mean_Household_Income'].rank(method='dense',pct=True)
data_msa['Essential_Worker_Ratio'] = data_msa['Essential_Worker_Ratio'].rank(method='dense',pct=True)
data_msa['Vulnerability'] = data_msa['Vulnerability'].rank(method='dense',pct=True)
data_msa['Damage'] = data_msa['Damage'].rank(method='dense',pct=True)
data_msa['Mobility'] = data_msa['Mobility'].rank(method='dense',pct=True)
data = data.append(data_msa, ignore_index=True)
print('len(data): ',len(data))
vd_corr_list.append(data_msa['Vulnerability'].corr(data['Damage']))
print('Correlation between Vulnerability and Damage: ', vd_corr_list)
###############################################################################
# Preprocessing: Binning (数据分箱)
print('Discretization, ', NUM_GROUPS)
enc = KBinsDiscretizer(n_bins=NUM_GROUPS, encode="ordinal",strategy='uniform') #strategy='kmeans''uniform'
for column in data.columns:
data[column] = enc.fit_transform(np.array(data[column]).reshape(-1,1))
data[column] = enc.inverse_transform(np.array(data[column]).reshape(-1,1))
# Scatter plot with density
savepath = os.path.join(root, '1028_adjusted_%s_all_%squant_rank_vulner_damage.jpg'%(colormap, NUM_GROUPS))
scatter_kde(data, 'Vulnerability', 'Damage', savepath, colormap)
|
{"hexsha": "7ca58f8334fe79fc3c289504604b963d62a0089d", "size": 16944, "ext": "py", "lang": "Python", "max_stars_repo_path": "correlation_cm_sh.py", "max_stars_repo_name": "LinChen-65/utility-equity-covid-vac", "max_stars_repo_head_hexsha": "9194ee0e019b3160254401b84d369900a527da7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-30T08:02:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T08:02:33.000Z", "max_issues_repo_path": "correlation_cm_sh.py", "max_issues_repo_name": "LinChen-65/utility-equity-covid-vac", "max_issues_repo_head_hexsha": "9194ee0e019b3160254401b84d369900a527da7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "correlation_cm_sh.py", "max_forks_repo_name": "LinChen-65/utility-equity-covid-vac", "max_forks_repo_head_hexsha": "9194ee0e019b3160254401b84d369900a527da7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.5438596491, "max_line_length": 159, "alphanum_fraction": 0.6833687441, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4772}
|
################################################################################
#
# Package : AlphaPy
# Module : market_variables
# Created : July 11, 2013
#
# Copyright 2017 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Variables
# ---------
#
# Numeric substitution is allowed for any number in the expression.
# Offsets are allowed in event expressions but cannot be substituted.
#
# Examples
# --------
#
# Variable('rrunder', 'rr_3_20 <= 0.9')
#
# 'rrunder_2_10_0.7'
# 'rrunder_2_10_0.9'
# 'xmaup_20_50_20_200'
# 'xmaup_10_50_20_50'
#
#
# Imports
#
from alphapy.alias import get_alias
from alphapy.frame import Frame
from alphapy.frame import frame_name
from alphapy.globals import BSEP, LOFF, ROFF, USEP
from alphapy.utilities import valid_name
from collections import OrderedDict
from importlib import import_module
import logging
import numpy as np
import pandas as pd
import parser
import re
import sys
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Class Variable
#
class Variable(object):
"""Create a new variable as a key-value pair. All variables are stored
in ``Variable.variables``. Duplicate keys or values are not allowed,
unless the ``replace`` parameter is ``True``.
Parameters
----------
name : str
Variable key.
expr : str
Variable value.
replace : bool, optional
Replace the current key-value pair if it already exists.
Attributes
----------
variables : dict
Class variable for storing all known variables
Examples
--------
>>> Variable('rrunder', 'rr_3_20 <= 0.9')
>>> Variable('hc', 'higher_close')
"""
# class variable to track all variables
variables = {}
# function __new__
def __new__(cls,
name,
expr,
replace = False):
# code
efound = expr in [Variable.variables[key].expr for key in Variable.variables]
if efound:
key = [key for key in Variable.variables if expr in Variable.variables[key].expr]
logger.info("Expression '%s' already exists for key %s", expr, key)
return
else:
if replace or not name in Variable.variables:
if not valid_name(name):
logger.info("Invalid variable key: %s", name)
return
try:
result = parser.expr(expr)
except:
logger.info("Invalid expression: %s", expr)
return
return super(Variable, cls).__new__(cls)
else:
logger.info("Key %s already exists", name)
# function __init__
def __init__(self,
name,
expr,
replace = False):
# code
self.name = name;
self.expr = expr;
# add key with expression
Variable.variables[name] = self
# function __str__
def __str__(self):
return self.expr
#
# Function vparse
#
def vparse(vname):
r"""Parse a variable name into its respective components.
Parameters
----------
vname : str
The name of the variable.
Returns
-------
vxlag : str
Variable name without the ``lag`` component.
root : str
The base variable name without the parameters.
plist : list
The parameter list.
lag : int
The offset starting with the current value [0]
and counting back, e.g., an offset [1] means the
previous value of the variable.
Notes
-----
**AlphaPy** makes feature creation easy. The syntax
of a variable name maps to a function call:
xma_20_50 => xma(20, 50)
Examples
--------
>>> vparse('xma_20_50[1]')
# ('xma_20_50', 'xma', ['20', '50'], 1)
"""
# split along lag first
lsplit = vname.split(LOFF)
vxlag = lsplit[0]
# if necessary, substitute any alias
root = vxlag.split(USEP)[0]
alias = get_alias(root)
if alias:
vxlag = vxlag.replace(root, alias)
vsplit = vxlag.split(USEP)
root = vsplit[0]
plist = vsplit[1:]
# extract lag
lag = 0
if len(lsplit) > 1:
# lag is present
slag = lsplit[1].replace(ROFF, '')
if len(slag) > 0:
lpat = r'(^-?[0-9]+$)'
lre = re.compile(lpat)
if lre.match(slag):
lag = int(slag)
# return all components
return vxlag, root, plist, lag
#
# Function allvars
#
def allvars(expr):
r"""Get the list of valid names in the expression.
Parameters
----------
expr : str
A valid expression conforming to the Variable Definition Language.
Returns
-------
vlist : list
List of valid variable names.
"""
regex = re.compile('\w+')
items = regex.findall(expr)
vlist = []
for item in items:
if valid_name(item):
vlist.append(item)
return vlist
#
# Function vtree
#
def vtree(vname):
r"""Get all of the antecedent variables.
Before applying a variable to a dataframe, we have to recursively
get all of the child variables, beginning with the starting variable's
expression. Then, we have to extract the variables from all the
subsequent expressions. This process continues until all antecedent
variables are obtained.
Parameters
----------
vname : str
A valid variable stored in ``Variable.variables``.
Returns
-------
all_variables : list
The variables that need to be applied before ``vname``.
Other Parameters
----------------
Variable.variables : dict
Global dictionary of variables
"""
allv = []
def vwalk(allv, vname):
vxlag, root, plist, lag = vparse(vname)
if root in Variable.variables:
root_expr = Variable.variables[root].expr
expr = vsub(vname, root_expr)
av = allvars(expr)
for v in av:
vwalk(allv, v)
else:
for p in plist:
if valid_name(p):
vwalk(allv, p)
allv.append(vname)
return allv
allv = vwalk(allv, vname)
all_variables = list(OrderedDict.fromkeys(allv))
return all_variables
#
# Function vsub
#
def vsub(v, expr):
r"""Substitute the variable parameters into the expression.
This function performs the parameter substitution when
applying features to a dataframe. It is a mechanism for
the user to override the default values in any given
expression when defining a feature, instead of having
to programmatically call a function with new values.
Parameters
----------
v : str
Variable name.
expr : str
The expression for substitution.
Returns
-------
newexpr
The expression with the new, substituted values.
"""
# numbers pattern
npat = '[-+]?[0-9]*\.?[0-9]+'
nreg = re.compile(npat)
# find all number locations in variable name
vnums = nreg.findall(v)
viter = nreg.finditer(v)
vlocs = []
for match in viter:
vlocs.append(match.span())
# find all number locations in expression
# find all non-number locations as well
elen = len(expr)
enums = nreg.findall(expr)
eiter = nreg.finditer(expr)
elocs = []
enlocs = []
index = 0
for match in eiter:
eloc = match.span()
elocs.append(eloc)
enlocs.append((index, eloc[0]))
index = eloc[1]
# build new expression
newexpr = str()
for i, enloc in enumerate(enlocs):
if i < len(vlocs):
newexpr += expr[enloc[0]:enloc[1]] + v[vlocs[i][0]:vlocs[i][1]]
else:
newexpr += expr[enloc[0]:enloc[1]] + expr[elocs[i][0]:elocs[i][1]]
if elocs:
estart = elocs[len(elocs)-1][1]
else:
estart = 0
newexpr += expr[estart:elen]
return newexpr
#
# Function vexec
#
def vexec(f, v, vfuncs=None):
r"""Add a variable to the given dataframe.
This is the core function for adding a variable to a dataframe.
The default variable functions are already defined locally
in ``alphapy.var``; however, you may want to define your
own variable functions. If so, then the ``vfuncs`` parameter
will contain the list of modules and functions to be imported
and applied by the ``vexec`` function.
To write your own variable function, your function must have
a pandas *DataFrame* as an input parameter and must return
a pandas *Series* that represents the new variable.
Parameters
----------
f : pandas.DataFrame
Dataframe to contain the new variable.
v : str
Variable to add to the dataframe.
vfuncs : dict, optional
Dictionary of external modules and functions.
Returns
-------
f : pandas.DataFrame
Dataframe with the new variable.
Other Parameters
----------------
Variable.variables : dict
Global dictionary of variables
"""
vxlag, root, plist, lag = vparse(v)
logger.debug("vexec : %s", v)
logger.debug("vxlag : %s", vxlag)
logger.debug("root : %s", root)
logger.debug("plist : %s", plist)
logger.debug("lag : %s", lag)
if vxlag not in f.columns:
if root in Variable.variables:
logger.debug("Found variable %s: ", root)
vroot = Variable.variables[root]
expr = vroot.expr
expr_new = vsub(vxlag, expr)
estr = "%s" % expr_new
logger.debug("Expression: %s", estr)
# pandas eval
f[vxlag] = f.eval(estr)
else:
logger.debug("Did not find variable: %s", root)
# Must be a function call
func_name = root
# Convert the parameter list and prepend the data frame
newlist = []
for p in plist:
try:
newlist.append(int(p))
except:
try:
newlist.append(float(p))
except:
newlist.append(p)
newlist.insert(0, f)
# Find the module and function
module = None
if vfuncs:
for m in vfuncs:
funcs = vfuncs[m]
if func_name in funcs:
module = m
break
# If the module was found, import the external treatment function,
# else search the local namespace.
if module:
ext_module = import_module(module)
func = getattr(my_module, func_name)
# Create the variable by calling the function
f[v] = func(*newlist)
else:
modname = globals()['__name__']
module = sys.modules[modname]
if func_name in dir(module):
func = getattr(module, func_name)
# Create the variable
f[v] = func(*newlist)
else:
logger.debug("Could not find function %s", func_name)
# if necessary, add the lagged variable
if lag > 0 and vxlag in f.columns:
f[v] = f[vxlag].shift(lag)
# output frame
return f
#
# Function vapply
#
def vapply(group, vname, vfuncs=None):
r"""Apply a variable to multiple dataframes.
Parameters
----------
group : alphapy.Group
The input group.
vname : str
The variable to apply to the ``group``.
vfuncs : dict, optional
Dictionary of external modules and functions.
Returns
-------
None : None
Other Parameters
----------------
Frame.frames : dict
Global dictionary of dataframes
See Also
--------
vunapply
"""
# get all frame names to apply variables
gnames = [item.lower() for item in group.members]
# get all the precedent variables
allv = vtree(vname)
# apply the variables to each frame
for g in gnames:
fname = frame_name(g, group.space)
if fname in Frame.frames:
f = Frame.frames[fname].df
if not f.empty:
for v in allv:
logger.debug("Applying variable %s to %s", v, g)
f = vexec(f, v, vfuncs)
else:
logger.debug("Frame for %s is empty", g)
else:
logger.debug("Frame not found: %s", fname)
#
# Function vmapply
#
def vmapply(group, vs, vfuncs=None):
r"""Apply multiple variables to multiple dataframes.
Parameters
----------
group : alphapy.Group
The input group.
vs : list
The list of variables to apply to the ``group``.
vfuncs : dict, optional
Dictionary of external modules and functions.
Returns
-------
None : None
See Also
--------
vmunapply
"""
for v in vs:
logger.info("Applying variable: %s", v)
vapply(group, v, vfuncs)
#
# Function vunapply
#
def vunapply(group, vname):
r"""Remove a variable from multiple dataframes.
Parameters
----------
group : alphapy.Group
The input group.
vname : str
The variable to remove from the ``group``.
Returns
-------
None : None
Other Parameters
----------------
Frame.frames : dict
Global dictionary of dataframes
See Also
--------
vapply
"""
# get all frame names to apply variables
gnames = [item.lower() for item in group.all_members()]
# apply the variables to each frame
for g in gnames:
fname = frame_name(g, group.space)
if fname in Frame.frames:
f = Frame.frames[fname].df
logger.info("Unapplying variable %s from %s", vname, g)
if vname not in f.columns:
logger.info("Variable %s not in %s frame", vname, g)
else:
estr = "Frame.frames['%s'].df = f.df.drop('%s', axis=1)" \
% (fname, vname)
exec(estr)
else:
logger.info("Frame not found: %s", fname)
#
# Function vmunapply
#
def vmunapply(group, vs):
r"""Remove a list of variables from multiple dataframes.
Parameters
----------
group : alphapy.Group
The input group.
vs : list
The list of variables to remove from the ``group``.
Returns
-------
None : None
See Also
--------
vmapply
"""
for v in vs:
vunapply(group, v)
#
# This is the reference for all internal and external variable functions.
#
#
# 1. datetime functions
#
# date, datetime, time, timedelta
#
# 2. numpy unary ufuncs (PDA p. 96)
#
# abs, ceil, cos, exp, floor, log, log10, log2, modf, rint, sign,
# sin, square, sqrt, tan
#
# 3. moving window and exponential functions (PDA p. 323)
#
# rolling, ewm
#
# 5. pandas descriptive and summary statistical functions (PDA p. 139)
#
# argmin, argmax, count, cummax, cummin, cumprod, cumsum, describe,
# diff, idxmin, idxmax, kurt, mad, max, mean, median, min, pct_change,
# quantile, skew, std, sum, var
#
# 6. time series (PDA p. 289-328)
#
#
# Function c2max
#
def c2max(f, c1, c2):
r"""Take the maximum value between two columns in a dataframe.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the two columns ``c1`` and ``c2``.
c1 : str
Name of the first column in the dataframe ``f``.
c2 : str
Name of the second column in the dataframe ``f``.
Returns
-------
max_val : float
The maximum value of the two columns.
"""
max_val = max(f[c1], f[c2])
return max_val
#
# Function c2min
#
def c2min(f, c1, c2):
r"""Take the minimum value between two columns in a dataframe.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the two columns ``c1`` and ``c2``.
c1 : str
Name of the first column in the dataframe ``f``.
c2 : str
Name of the second column in the dataframe ``f``.
Returns
-------
min_val : float
The minimum value of the two columns.
"""
min_val = min(f[c1], f[c2])
return min_val
#
# Function pchange1
#
def pchange1(f, c, o = 1):
r"""Calculate the percentage change within the same variable.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
o : int
Offset to the previous value.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = f[c] / f[c].shift(o) - 1.0
return new_column
#
# Function pchange2
#
def pchange2(f, c1, c2):
r"""Calculate the percentage change between two variables.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the two columns ``c1`` and ``c2``.
c1 : str
Name of the first column in the dataframe ``f``.
c2 : str
Name of the second column in the dataframe ``f``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = f[c1] / f[c2] - 1.0
return new_column
#
# Function diff
#
def diff(f, c, n = 1):
r"""Calculate the n-th order difference for the given variable.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
n : int
The number of times that the values are differenced.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = np.diff(f[c], n)
return new_column
#
# Function down
#
def down(f, c):
r"""Find the negative values in the series.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] < 0
return new_column
#
# Function up
#
def up(f, c):
r"""Find the positive values in the series.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] > 0
return new_column
#
# Function higher
#
def higher(f, c, o = 1):
r"""Determine whether or not a series value is higher than
the value ``o`` periods back.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
o : int, optional
Offset value for shifting the series.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] > f[c].shift(o)
return new_column
#
# Function highest
#
def highest(f, c, p = 20):
r"""Calculate the highest value on a rolling basis.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period over which to calculate the rolling maximum.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = f[c].rolling(p).max()
return new_column
#
# Function lower
#
def lower(f, c, o = 1):
r"""Determine whether or not a series value is lower than
the value ``o`` periods back.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
o : int, optional
Offset value for shifting the series.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] < f[c].shift(o)
return new_column
#
# Function lowest
#
def lowest(f, c, p = 20):
r"""Calculate the lowest value on a rolling basis.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period over which to calculate the rolling minimum.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
return f[c].rolling(p).min()
#
# Function ma
#
def ma(f, c, p = 20):
r"""Calculate the mean on a rolling basis.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period over which to calculate the rolling mean.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*In statistics, a moving average (rolling average or running average)
is a calculation to analyze data points by creating series of averages
of different subsets of the full data set* [WIKI_MA]_.
.. [WIKI_MA] https://en.wikipedia.org/wiki/Moving_average
"""
new_column = f[c].rolling(p).mean()
return new_column
#
# Function ema
#
def ema(f, c, p = 20):
r"""Calculate the mean on a rolling basis.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period over which to calculate the rolling mean.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*An exponential moving average (EMA) is a type of moving average
that is similar to a simple moving average, except that more weight
is given to the latest data* [IP_EMA]_.
.. [IP_EMA] http://www.investopedia.com/terms/e/ema.asp
"""
new_column = pd.ewma(f[c], span=p)
return new_column
#
# Function maratio
#
def maratio(f, c, p1 = 1, p2 = 10):
r"""Calculate the ratio of two moving averages.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p1 : int
The period of the first moving average.
p2 : int
The period of the second moving average.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = ma(f, c, p1) / ma(f, c, p2)
return new_column
#
# Function net
#
def net(f, c='close', o = 1):
r"""Calculate the net change of a given column.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
o : int, optional
Offset value for shifting the series.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Net change is the difference between the closing price of a security
on the day's trading and the previous day's closing price. Net change
can be positive or negative and is quoted in terms of dollars* [IP_NET]_.
.. [IP_NET] http://www.investopedia.com/terms/n/netchange.asp
"""
new_column = f[c] - f[c].shift(o)
return new_column
#
# Function gap
#
def gap(f):
r"""Calculate the gap percentage between the current open and
the previous close.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``open`` and ``close``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*A gap is a break between prices on a chart that occurs when the
price of a stock makes a sharp move up or down with no trading
occurring in between* [IP_GAP]_.
.. [IP_GAP] http://www.investopedia.com/terms/g/gap.asp
"""
c1 = 'open'
c2 = 'close[1]'
vexec(f, c2)
new_column = 100 * pchange2(f, c1, c2)
return new_column
#
# Function gapdown
#
def gapdown(f):
r"""Determine whether or not there has been a gap down.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``open`` and ``close``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*A gap is a break between prices on a chart that occurs when the
price of a stock makes a sharp move up or down with no trading
occurring in between* [IP_GAP]_.
"""
new_column = f['open'] < f['close'].shift(1)
return new_column
#
# Function gapup
#
def gapup(f):
r"""Determine whether or not there has been a gap up.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``open`` and ``close``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*A gap is a break between prices on a chart that occurs when the
price of a stock makes a sharp move up or down with no trading
occurring in between* [IP_GAP]_.
"""
new_column = f['open'] > f['close'].shift(1)
return new_column
#
# Function gapbadown
#
def gapbadown(f):
r"""Determine whether or not there has been a breakaway gap down.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``open`` and ``low``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*A breakaway gap represents a gap in the movement of a stock price
supported by levels of high volume* [IP_BAGAP]_.
.. [IP_BAGAP] http://www.investopedia.com/terms/b/breakawaygap.asp
"""
new_column = f['open'] < f['low'].shift(1)
return new_column
#
# Function gapbaup
#
def gapbaup(f):
r"""Determine whether or not there has been a breakaway gap up.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``open`` and ``high``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*A breakaway gap represents a gap in the movement of a stock price
supported by levels of high volume* [IP_BAGAP]_.
"""
new_column = f['open'] > f['high'].shift(1)
return new_column
#
# Function truehigh
#
def truehigh(f):
r"""Calculate the *True High* value.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Today's high, or the previous close, whichever is higher* [TS_TR]_.
.. [TS_TR] http://help.tradestation.com/09_01/tradestationhelp/charting_definitions/true_range.htm
"""
c1 = 'low[1]'
vexec(f, c1)
c2 = 'high'
new_column = f.apply(c2max, axis=1, args=[c1, c2])
return new_column
#
# Function truelow
#
def truelow(f):
r"""Calculate the *True Low* value.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Today's low, or the previous close, whichever is lower* [TS_TR]_.
"""
c1 = 'high[1]'
vexec(f, c1)
c2 = 'low'
new_column = f.apply(c2min, axis=1, args=[c1, c2])
return new_column
#
# Function truerange
#
def truerange(f):
r"""Calculate the *True Range* value.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*True High - True Low* [TS_TR]_.
"""
new_column = truehigh(f) - truelow(f)
return new_column
#
# Function hlrange
#
def hlrange(f, p = 1):
r"""Calculate the Range, the difference between High and Low.
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
p : int
The period over which the range is calculated.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = highest(f, 'high', p) - lowest(f, 'low', p)
return new_column
#
# Function netreturn
#
def netreturn(f, c, o = 1):
r"""Calculate the net return, or Return On Invesment (ROI)
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
o : int, optional
Offset value for shifting the series.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*ROI measures the amount of return on an investment relative to the
original cost. To calculate ROI, the benefit (or return) of an
investment is divided by the cost of the investment, and the result
is expressed as a percentage or a ratio* [IP_ROI]_.
.. [IP_ROI] http://www.investopedia.com/terms/r/returnoninvestment.asp
"""
new_column = 100 * pchange1(f, c, o)
return new_column
#
# Function rindex
#
def rindex(f, ci, ch, cl, p = 1):
r"""Calculate the *range index* spanning a given period ``p``.
The **range index** is a number between 0 and 100 that
relates the value of the index column ``ci`` to the
high column ``ch`` and the low column ``cl``. For example,
if the low value of the range is 10 and the high value
is 20, then the range index for a value of 15 would be 50%.
The range index for 18 would be 80%.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the columns ``ci``, ``ch``, and ``cl``.
ci : str
Name of the index column in the dataframe ``f``.
ch : str
Name of the high column in the dataframe ``f``.
cl : str
Name of the low column in the dataframe ``f``.
p : int
The period over which the range index of column ``ci``
is calculated.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
o = p-1 if f[ci].name == 'open' else 0
hh = highest(f, ch, p)
ll = lowest(f, cl, p)
fn = f[ci].shift(o) - ll
fd = hh - ll
new_column = 100 * fn / fd
return new_column
#
# Function mval
#
def mval(f, c):
r"""Get the negative value, otherwise zero.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
Returns
-------
new_val : float
Negative value or zero.
"""
new_val = -f[c] if f[c] < 0 else 0
return new_val
#
# Function pval
#
def pval(f, c):
r"""Get the positive value, otherwise zero.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
Returns
-------
new_val : float
Positive value or zero.
"""
new_val = f[c] if f[c] > 0 else 0
return new_val
#
# Function dpc
#
def dpc(f, c):
r"""Get the negative values, with positive values zeroed.
Parameters
----------
f : pandas.DataFrame
Dataframe with column ``c``.
c : str
Name of the column.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = f.apply(mval, axis=1, args=[c])
return new_column
#
# Function upc
#
def upc(f, c):
r"""Get the positive values, with negative values zeroed.
Parameters
----------
f : pandas.DataFrame
Dataframe with column ``c``.
c : str
Name of the column.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
"""
new_column = f.apply(pval, axis=1, args=[c])
return new_column
#
# Function rsi
#
def rsi(f, c, p = 14):
r"""Calculate the Relative Strength Index (RSI).
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``net``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period over which to calculate the RSI.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Developed by J. Welles Wilder, the Relative Strength Index (RSI) is a momentum
oscillator that measures the speed and change of price movements* [SC_RSI]_.
.. [SC_RSI] http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi
"""
cdiff = 'net'
vexec(f, cdiff)
f['pval'] = upc(f, cdiff)
f['mval'] = dpc(f, cdiff)
upcs = ma(f, 'pval', p)
dpcs = ma(f, 'mval', p)
new_column = 100 - (100 / (1 + (upcs / dpcs)))
return new_column
#
# Function gtval
#
def gtval(f, c1, c2):
r"""Determine whether or not the first column of a dataframe
is greater than the second.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the two columns ``c1`` and ``c2``.
c1 : str
Name of the first column in the dataframe ``f``.
c2 : str
Name of the second column in the dataframe ``f``.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c1] > f[c2]
return new_column
#
# Function gtval0
#
def gtval0(f, c1, c2):
r"""For positive values in the first column of the dataframe
that are greater than the second column, get the value in
the first column, otherwise return zero.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the two columns ``c1`` and ``c2``.
c1 : str
Name of the first column in the dataframe ``f``.
c2 : str
Name of the second column in the dataframe ``f``.
Returns
-------
new_val : float
A positive value or zero.
"""
if f[c1] > f[c2] and f[c1] > 0:
new_val = f[c1]
else:
new_val = 0
return new_val
#
# Function dmplus
#
def dmplus(f):
r"""Calculate the Plus Directional Movement (+DM).
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Directional movement is positive (plus) when the current high minus
the prior high is greater than the prior low minus the current low.
This so-called Plus Directional Movement (+DM) then equals the current
high minus the prior high, provided it is positive. A negative value
would simply be entered as zero* [SC_ADX]_.
.. [SC_ADX] http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
"""
c1 = 'upmove'
f[c1] = net(f, 'high')
c2 = 'downmove'
f[c2] = -net(f, 'low')
new_column = f.apply(gtval0, axis=1, args=[c1, c2])
return new_column
#
# Function dminus
#
def dminus(f):
r"""Calculate the Minus Directional Movement (-DM).
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*Directional movement is negative (minus) when the prior low minus
the current low is greater than the current high minus the prior high.
This so-called Minus Directional Movement (-DM) equals the prior low
minus the current low, provided it is positive. A negative value
would simply be entered as zero* [SC_ADX]_.
"""
c1 = 'downmove'
f[c1] = -net(f, 'low')
c2 = 'upmove'
f[c2] = net(f, 'high')
new_column = f.apply(gtval0, axis=1, args=[c1, c2])
return new_column
#
# Function diplus
#
def diplus(f, p = 14):
r"""Calculate the Plus Directional Indicator (+DI).
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
p : int
The period over which to calculate the +DI.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*A component of the average directional index (ADX) that is used to
measure the presence of an uptrend. When the +DI is sloping upward,
it is a signal that the uptrend is getting stronger* [IP_PDI]_.
.. [IP_PDI] http://www.investopedia.com/terms/p/positivedirectionalindicator.asp
"""
tr = 'truerange'
vexec(f, tr)
atr = USEP.join(['atr', str(p)])
vexec(f, atr)
dmp = 'dmplus'
vexec(f, dmp)
new_column = 100 * f[dmp].ewm(span=p).mean() / f[atr]
return new_column
#
# Function diminus
#
def diminus(f, p = 14):
r"""Calculate the Minus Directional Indicator (-DI).
Parameters
----------
f : pandas.DataFrame
Dataframe with columns ``high`` and ``low``.
p : int
The period over which to calculate the -DI.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
*A component of the average directional index (ADX) that is used to
measure the presence of a downtrend. When the -DI is sloping downward,
it is a signal that the downtrend is getting stronger* [IP_NDI]_.
.. [IP_NDI] http://www.investopedia.com/terms/n/negativedirectionalindicator.asp
"""
tr = 'truerange'
vexec(f, tr)
atr = USEP.join(['atr', str(p)])
vexec(f, atr)
dmm = 'dmminus'
f[dmm] = dminus(f)
new_column = 100 * dminus(f).ewm(span=p).mean() / f[atr]
return new_column
#
# Function adx
#
def adx(f, p = 14):
r"""Calculate the Average Directional Index (ADX).
Parameters
----------
f : pandas.DataFrame
Dataframe with all columns required for calculation. If you
are applying ADX through ``vapply``, then these columns are
calculated automatically.
p : int
The period over which to calculate the ADX.
Returns
-------
new_column : pandas.Series (float)
The array containing the new feature.
References
----------
The Average Directional Movement Index (ADX) was invented by J. Welles
Wilder in 1978 [WIKI_ADX]_. Its value reflects the strength of trend in any
given instrument.
.. [WIKI_ADX] https://en.wikipedia.org/wiki/Average_directional_movement_index
"""
c1 = 'diplus'
vexec(f, c1)
c2 = 'diminus'
vexec(f, c2)
# calculations
dip = f[c1]
dim = f[c2]
didiff = abs(dip - dim)
disum = dip + dim
new_column = 100 * didiff.ewm(span=p).mean() / disum
return new_column
#
# Function abovema
#
def abovema(f, c, p = 50):
r"""Determine those values of the dataframe that are above the
moving average.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period of the moving average.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] > ma(f, c, p)
return new_column
#
# Function belowma
#
def belowma(f, c, p = 50):
r"""Determine those values of the dataframe that are below the
moving average.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str
Name of the column in the dataframe ``f``.
p : int
The period of the moving average.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
"""
new_column = f[c] < ma(f, c, p)
return new_column
#
# Function xmadown
#
def xmadown(f, c='close', pfast = 20, pslow = 50):
r"""Determine those values of the dataframe that are below the
moving average.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str, optional
Name of the column in the dataframe ``f``.
pfast : int, optional
The period of the fast moving average.
pslow : int, optional
The period of the slow moving average.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*In the statistics of time series, and in particular the analysis
of financial time series for stock trading purposes, a moving-average
crossover occurs when, on plotting two moving averages each based
on different degrees of smoothing, the traces of these moving averages
cross* [WIKI_XMA]_.
.. [WIKI_XMA] https://en.wikipedia.org/wiki/Moving_average_crossover
"""
sma = ma(f, c, pfast)
sma_prev = sma.shift(1)
lma = ma(f, c, pslow)
lma_prev = lma.shift(1)
new_column = (sma < lma) & (sma_prev > lma_prev)
return new_column
#
# Function xmaup
#
def xmaup(f, c='close', pfast = 20, pslow = 50):
r"""Determine those values of the dataframe that are below the
moving average.
Parameters
----------
f : pandas.DataFrame
Dataframe containing the column ``c``.
c : str, optional
Name of the column in the dataframe ``f``.
pfast : int, optional
The period of the fast moving average.
pslow : int, optional
The period of the slow moving average.
Returns
-------
new_column : pandas.Series (bool)
The array containing the new feature.
References
----------
*In the statistics of time series, and in particular the analysis
of financial time series for stock trading purposes, a moving-average
crossover occurs when, on plotting two moving averages each based
on different degrees of smoothing, the traces of these moving averages
cross* [WIKI_XMA]_.
"""
sma = ma(f, c, pfast)
sma_prev = sma.shift(1)
lma = ma(f, c, pslow)
lma_prev = lma.shift(1)
new_column = (sma > lma) & (sma_prev < lma_prev)
return new_column
|
{"hexsha": "77ee8ea4204f8fd84f574ea93382fb86cda78990", "size": 44191, "ext": "py", "lang": "Python", "max_stars_repo_path": "alphapy/market_variables.py", "max_stars_repo_name": "MichaelFriedberg/AlphaPy", "max_stars_repo_head_hexsha": "a5d33d1d021bbcec533286af91e30e6a61f4f85d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2019-02-01T19:43:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T09:07:03.000Z", "max_issues_repo_path": "alphapy/market_variables.py", "max_issues_repo_name": "MichaelFriedberg/AlphaPy", "max_issues_repo_head_hexsha": "a5d33d1d021bbcec533286af91e30e6a61f4f85d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-02-23T18:54:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-09T01:30:32.000Z", "max_forks_repo_path": "alphapy/market_variables.py", "max_forks_repo_name": "MichaelFriedberg/AlphaPy", "max_forks_repo_head_hexsha": "a5d33d1d021bbcec533286af91e30e6a61f4f85d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2019-02-08T02:00:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T23:17:00.000Z", "avg_line_length": 23.270668773, "max_line_length": 121, "alphanum_fraction": 0.5862732231, "include": true, "reason": "import numpy", "num_tokens": 11084}
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import namedtuple
import warnings
import numpy as np
from ..adapters import Adapter
from ..config import BoolField, NumberField, StringField, ConfigValidator, ListField, ConfigError
from ..representation import DetectionPrediction
from ..utils import get_or_parse_value
DetectionBox = namedtuple('DetectionBox', ["x", "y", "w", "h", "confidence", "probabilities"])
class YoloOutputProcessor:
def __init__(self, coord_correct=None, size_correct=None, conf_correct=None,
prob_correct=None, coord_normalizer=(1, 1), size_normalizer=(1, 1)):
self.coord_correct = coord_correct if coord_correct else lambda x: x
self.size_correct = size_correct if size_correct else np.exp
self.conf_correct = conf_correct if conf_correct else lambda x: x
self.prob_correct = prob_correct if prob_correct else lambda x: x
self.x_normalizer, self.y_normalizer = coord_normalizer
self.width_normalizer, self.height_normalizer = size_normalizer
def __call__(self, bbox, i, j, anchors=None):
if anchors is None:
anchors = [1, 1]
x = (self.coord_correct(bbox.x) + i) / self.x_normalizer
y = (self.coord_correct(bbox.y) + j) / self.y_normalizer
w = self.size_correct(bbox.w) * anchors[0] / self.width_normalizer
h = self.size_correct(bbox.h) * anchors[1] / self.height_normalizer
confidence = self.conf_correct(bbox.confidence)
probabilities = self.prob_correct(bbox.probabilities)
return DetectionBox(x, y, w, h, confidence, probabilities)
class TinyYOLOv1Adapter(Adapter):
"""
Class for converting output of Tiny YOLO v1 model to DetectionPrediction representation
"""
__provider__ = 'tiny_yolo_v1'
prediction_types = (DetectionPrediction, )
def process(self, raw, identifiers, frame_meta):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
frame_meta: meta info about prediction
Returns:
list of DetectionPrediction objects
"""
prediction = self._extract_predictions(raw, frame_meta)
self.select_output_blob(prediction)
prediction = prediction[self.output_blob]
PROBABILITY_SIZE = 980
CONFIDENCE_SIZE = 98
BOXES_SIZE = 392
CELLS_X, CELLS_Y = 7, 7
CLASSES = 20
OBJECTS_PER_CELL = 2
result = []
for identifier, output in zip(identifiers, prediction):
assert PROBABILITY_SIZE + CONFIDENCE_SIZE + BOXES_SIZE == output.shape[0]
probability, scale, boxes = np.split(output, [PROBABILITY_SIZE, PROBABILITY_SIZE + CONFIDENCE_SIZE])
probability = np.reshape(probability, (CELLS_Y, CELLS_X, CLASSES))
scale = np.reshape(scale, (CELLS_Y, CELLS_X, OBJECTS_PER_CELL))
boxes = np.reshape(boxes, (CELLS_Y, CELLS_X, OBJECTS_PER_CELL, 4))
confidence = np.zeros((CELLS_Y, CELLS_X, OBJECTS_PER_CELL, CLASSES + 4))
for cls in range(CLASSES):
confidence[:, :, 0, cls] = np.multiply(probability[:, :, cls], scale[:, :, 0])
confidence[:, :, 1, cls] = np.multiply(probability[:, :, cls], scale[:, :, 1])
labels, scores, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], [], []
for i, j, k in np.ndindex((CELLS_X, CELLS_Y, OBJECTS_PER_CELL)):
box = boxes[j, i, k]
box = [(box[0] + i) / float(CELLS_X), (box[1] + j) / float(CELLS_Y), box[2] ** 2, box[3] ** 2]
label = np.argmax(confidence[j, i, k, :CLASSES])
score = confidence[j, i, k, label]
labels.append(label)
scores.append(score)
x_mins.append(box[0] - box[2] / 2.0)
y_mins.append(box[1] - box[3] / 2.0)
x_maxs.append(box[0] + box[2] / 2.0)
y_maxs.append(box[1] + box[3] / 2.0)
result.append(DetectionPrediction(identifier, labels, scores, x_mins, y_mins, x_maxs, y_maxs))
return result
def entry_index(w, h, n_coords, n_classes, pos, entry):
row = pos // (w * h)
col = pos % (w * h)
return row * w * h * (n_classes + n_coords + 1) + entry * w * h + col
def parse_output(predictions, cells, num, box_size, anchors, processor, threshold=0.001):
cells_x, cells_y = cells, cells
labels, scores, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], [], []
for x, y, n in np.ndindex((cells_x, cells_y, num)):
if predictions.shape[0] == predictions.shape[1]:
bbox = predictions[y, x, n*box_size:(n + 1)*box_size]
else:
bbox = predictions[n * box_size:(n + 1) * box_size, y, x]
raw_bbox = DetectionBox(bbox[0], bbox[1], bbox[2], bbox[3], bbox[4], bbox[5:])
processed_box = processor(raw_bbox, x, y, anchors[2*n:2*n+2])
if processed_box.confidence < threshold:
continue
classes_prob = processed_box.probabilities
label = np.argmax(classes_prob)
labels.append(label)
scores.append(processed_box.probabilities[label] * processed_box.confidence)
x_mins.append(processed_box.x - processed_box.w / 2.0)
y_mins.append(processed_box.y - processed_box.h / 2.0)
x_maxs.append(processed_box.x + processed_box.w / 2.0)
y_maxs.append(processed_box.y + processed_box.h / 2.0)
return labels, scores, x_mins, y_mins, x_maxs, y_maxs
class YoloV2Adapter(Adapter):
"""
Class for converting output of YOLO v2 family models to DetectionPrediction representation
"""
__provider__ = 'yolo_v2'
prediction_types = (DetectionPrediction, )
PRECOMPUTED_ANCHORS = {
'yolo_v2': [1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071],
'tiny_yolo_v2': [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]
}
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'classes': NumberField(
value_type=int, optional=True, min_value=1, default=20, description="Number of detection classes."
),
'coords': NumberField(
value_type=int, optional=True, min_value=1, default=4, description="Number of bbox coordinates."
),
'num': NumberField(
value_type=int, optional=True, min_value=1, default=5,
description="Num parameter from DarkNet configuration file."
),
'anchors': StringField(
optional=True, choices=YoloV2Adapter.PRECOMPUTED_ANCHORS,
allow_own_choice=True, default='yolo_v2',
description="Anchor values provided as comma-separated list or one of precomputed: "
"{}".format(', '.join(YoloV2Adapter.PRECOMPUTED_ANCHORS))
),
'cells': NumberField(
value_type=int, optional=True, min_value=1, default=13,
description="Number of cells across width and height"
),
'raw_output': BoolField(
optional=True, default=False,
description="Indicates, that output is in raw format"
),
'output_format': StringField(
choices=['BHW', 'HWB'], optional=True, default='BHW',
description="Set output layer format"
)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.WARN_ON_EXTRA_ARGUMENT
)
def configure(self):
self.classes = self.get_value_from_config('classes')
self.coords = self.get_value_from_config('coords')
self.num = self.get_value_from_config('num')
self.anchors = get_or_parse_value(self.get_value_from_config('anchors'), YoloV2Adapter.PRECOMPUTED_ANCHORS)
self.cells = self.get_value_from_config('cells')
self.raw_output = self.get_value_from_config('raw_output')
self.output_format = self.get_value_from_config('output_format')
if self.raw_output:
self.processor = YoloOutputProcessor(coord_correct=lambda x: 1. / (1 + np.exp(-x)),
conf_correct=lambda x: 1. / (1 + np.exp(-x)),
prob_correct=lambda x: np.exp(x) / np.sum(np.exp(x)),
coord_normalizer=(self.cells, self.cells),
size_normalizer=(self.cells, self.cells))
else:
self.processor = YoloOutputProcessor(coord_normalizer=(self.cells, self.cells),
size_normalizer=(self.cells, self.cells))
def process(self, raw, identifiers, frame_meta):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
frame_meta: meta info about data processing
Returns:
list of DetectionPrediction objects
"""
predictions = self._extract_predictions(raw, frame_meta)
self.select_output_blob(predictions)
predictions = predictions[self.output_blob]
result = []
box_size = self.classes + self.coords + 1
for identifier, prediction in zip(identifiers, predictions):
if len(prediction.shape) != 3:
if self.output_format == 'BHW':
new_shape = (self.num * box_size, self.cells, self.cells)
else:
new_shape = (self.cells, self.cells, self.num * box_size)
prediction = np.reshape(prediction, new_shape)
labels, scores, x_mins, y_mins, x_maxs, y_maxs = parse_output(prediction, self.cells, self.num,
box_size, self.anchors,
self.processor)
result.append(DetectionPrediction(identifier, labels, scores, x_mins, y_mins, x_maxs, y_maxs))
return result
class YoloV3Adapter(Adapter):
"""
Class for converting output of YOLO v3 family models to DetectionPrediction representation
"""
__provider__ = 'yolo_v3'
prediction_types = (DetectionPrediction, )
PRECOMPUTED_ANCHORS = {
'yolo_v3': [
10.0, 13.0,
16.0, 30.0,
33.0, 23.0,
30.0, 61.0,
62.0, 45.0,
59.0, 119.0,
116.0, 90.0,
156.0, 198.0,
373.0, 326.0
],
'tiny_yolo_v3': [
10.0, 14.0,
23.0, 27.0,
37.0, 58.0,
81.0, 82.0,
135.0, 169.0,
344.0, 319.0
]
}
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'classes': NumberField(
value_type=int, optional=True, min_value=1, default=80, description="Number of detection classes."
),
'coords': NumberField(
value_type=int, optional=True, min_value=1, default=4, description="Number of bbox coordinates."
),
'num': NumberField(
value_type=int, optional=True, min_value=1, default=3,
description="Num parameter from DarkNet configuration file."
),
'anchors': StringField(
optional=True, choices=YoloV3Adapter.PRECOMPUTED_ANCHORS.keys(), allow_own_choice=True,
default='yolo_v3',
description="Anchor values provided as comma-separated list or one of precomputed: "
"{}.".format(', '.join(YoloV3Adapter.PRECOMPUTED_ANCHORS.keys()))),
'threshold': NumberField(value_type=float, optional=True, min_value=0, default=0.001,
description="Minimal objectiveness score value for valid detections."),
'outputs': ListField(description="The list of output layers names."),
'anchor_masks': ListField(optional=True, description='per layer used anchors mask'),
'do_reshape': BoolField(
optional=True, default=False,
description="Reshapes output tensor to [B,Cy,Cx] or [Cy,Cx,B] format, depending on 'output_format'"
"value ([B,Cy,Cx] by default). You may need to specify 'cells' value."
),
'transpose': ListField(optional=True, description="Transpose output tensor to specified format."),
'cells': ListField(
optional=True, default=[13, 26, 52],
description="Grid size for each layer, according 'outputs' filed. Works only with 'do_reshape=True' or "
"when output tensor dimensions not equal 3."),
'raw_output': BoolField(
optional=True, default=False,
description="Preprocesses output in the original way."
),
'output_format': StringField(
choices=['BHW', 'HWB'], optional=True, default='BHW',
description="Set output layer format"
)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT
)
def configure(self):
self.classes = self.get_value_from_config('classes')
self.coords = self.get_value_from_config('coords')
self.num = self.get_value_from_config('num')
self.anchors = get_or_parse_value(self.get_value_from_config('anchors'), YoloV3Adapter.PRECOMPUTED_ANCHORS)
self.threshold = self.get_value_from_config('threshold')
self.outputs = self.get_value_from_config('outputs')
anchor_masks = self.get_value_from_config('anchor_masks')
self.masked_anchors = None
if anchor_masks is not None:
per_layer_anchors = []
for layer_mask in anchor_masks:
layer_anchors = []
for idx in layer_mask:
layer_anchors += [self.anchors[idx * 2], self.anchors[idx * 2 + 1]]
per_layer_anchors.append(layer_anchors)
self.masked_anchors = per_layer_anchors
self.do_reshape = self.get_value_from_config('do_reshape')
self.transpose = self.get_value_from_config('transpose')
self.cells = self.get_value_from_config('cells')
if len(self.outputs) != len(self.cells):
if self.do_reshape:
raise ConfigError('Incorrect number of output layer ({}) or detection grid size ({}). '
'Must be equal with each other, check "cells" or "outputs" option'
.format(len(self.outputs), len(self.cells)))
warnings.warn('Number of output layers ({}) not equal to detection grid size ({}). '
'Must be equal with each other, if output tensor resize is required'
.format(len(self.outputs), len(self.cells)))
if self.masked_anchors and len(self.masked_anchors) != len(self.outputs):
raise ConfigError('anchor mask should be specified for all output layers')
self.raw_output = self.get_value_from_config('raw_output')
self.output_format = self.get_value_from_config('output_format')
if self.raw_output:
self.processor = YoloOutputProcessor(coord_correct=lambda x: 1.0 / (1.0 + np.exp(-x)),
conf_correct=lambda x: 1.0 / (1.0 + np.exp(-x)),
prob_correct=lambda x: 1.0 / (1.0 + np.exp(-x)))
else:
self.processor = YoloOutputProcessor()
def process(self, raw, identifiers, frame_meta):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of DetectionPrediction objects
"""
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
batch = len(identifiers)
predictions = [[] for _ in range(batch)]
for blob in self.outputs:
for b in range(batch):
predictions[b].append(raw_outputs[blob][b])
box_size = self.coords + 1 + self.classes
for identifier, prediction, meta in zip(identifiers, predictions, frame_meta):
detections = {'labels': [], 'scores': [], 'x_mins': [], 'y_mins': [], 'x_maxs': [], 'y_maxs': []}
input_shape = list(meta.get('input_shape', {'data': (1, 3, 416, 416)}).values())[0]
nchw_layout = input_shape[1] == 3
self.processor.width_normalizer = input_shape[3 if nchw_layout else 2]
self.processor.height_normalizer = input_shape[2 if nchw_layout else 1]
for layer_id, p in enumerate(prediction):
anchors = self.masked_anchors[layer_id] if self.masked_anchors else self.anchors
num = len(anchors) // 2 if self.masked_anchors else self.num
if self.transpose:
p = np.transpose(p, self.transpose)
if self.do_reshape or len(p.shape) != 3:
try:
cells = self.cells[layer_id]
except IndexError:
raise ConfigError('Number of output layers ({}) is more than detection grid size ({}). '
'Check "cells" option.'.format(len(prediction), len(self.cells)))
if self.output_format == 'BHW':
new_shape = (num * box_size, cells, cells)
else:
new_shape = (cells, cells, num * box_size)
p = np.reshape(p, new_shape)
else:
# Get grid size from output shape - ignore self.cells value.
# N.B.: value p.shape[1] will always contain grid size, but here we use if clause just for
# clarification (works ONLY for square grids).
cells = p.shape[1] if self.output_format == 'BHW' else p.shape[0]
self.processor.x_normalizer = cells
self.processor.y_normalizer = cells
labels, scores, x_mins, y_mins, x_maxs, y_maxs = parse_output(p, cells, num,
box_size, anchors,
self.processor, self.threshold)
detections['labels'].extend(labels)
detections['scores'].extend(scores)
detections['x_mins'].extend(x_mins)
detections['y_mins'].extend(y_mins)
detections['x_maxs'].extend(x_maxs)
detections['y_maxs'].extend(y_maxs)
result.append(DetectionPrediction(
identifier, detections['labels'], detections['scores'], detections['x_mins'], detections['y_mins'],
detections['x_maxs'], detections['y_maxs']
))
return result
class YoloV3ONNX(Adapter):
__provider__ = 'yolo_v3_onnx'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'boxes_out': StringField(),
'scores_out': StringField(),
'indices_out': StringField()
})
return params
def configure(self):
self.boxes_out = self.get_value_from_config('boxes_out')
self.scores_out = self.get_value_from_config('scores_out')
self.indices_out = self.get_value_from_config('indices_out')
def process(self, raw, identifiers, frame_meta):
raw_outputs = self._extract_predictions(raw, frame_meta)
result = []
indicies_out = raw_outputs[self.indices_out]
if len(indicies_out.shape) == 2:
indicies_out = np.expand_dims(indicies_out, 0)
for identifier, boxes, scores, indices in zip(
identifiers, raw_outputs[self.boxes_out], raw_outputs[self.scores_out], indicies_out
):
out_boxes, out_scores, out_classes = [], [], []
for idx_ in indices:
if idx_[0] == -1:
break
out_classes.append(idx_[1])
out_scores.append(scores[tuple(idx_[1:])])
out_boxes.append(boxes[idx_[2]])
transposed_boxes = np.array(out_boxes).T if out_boxes else ([], [], [], [])
x_mins = transposed_boxes[1]
y_mins = transposed_boxes[0]
x_maxs = transposed_boxes[3]
y_maxs = transposed_boxes[2]
result.append(DetectionPrediction(identifier, out_classes, out_scores, x_mins, y_mins, x_maxs, y_maxs))
return result
class YoloV3TF2(Adapter):
__provider__ = 'yolo_v3_tf2'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'outputs': ListField(description="The list of output layers names."),
'score_threshold': NumberField(
description='Minimal accepted box confidence threshold', min_value=0, max_value=1, value_type=float,
optional=True, default=0
)
})
return params
def configure(self):
self.outputs = self.get_value_from_config('outputs')
self.score_threshold = self.get_value_from_config('score_threshold')
def process(self, raw, identifiers, frame_meta):
result = []
input_shape = list(frame_meta[0].get('input_shape', {'data': (1, 416, 416, 3)}).values())[0]
is_nchw = input_shape[1] == 3
input_size = min(input_shape[1], input_shape[2]) if not is_nchw else min(input_shape[2], input_shape[3])
raw_outputs = self._extract_predictions(raw, frame_meta)
batch = len(identifiers)
predictions = [[] for _ in range(batch)]
for blob in self.outputs:
for b in range(batch):
out = raw_outputs[blob][b]
if is_nchw:
out = np.transpose(out, (1, 2, 3, 0))
out = np.reshape(out, (-1, out.shape[-1]))
predictions[b].append(out)
for identifier, outputs, meta in zip(identifiers, predictions, frame_meta):
original_image_size = meta['image_size'][:2]
out = np.concatenate(outputs, axis=0)
coords, score, label = self.postprocess_boxes(out, original_image_size, input_size)
x_min, y_min, x_max, y_max = coords.T
result.append(DetectionPrediction(identifier, label, score, x_min, y_min, x_max, y_max))
return result
def postprocess_boxes(self, pred_bbox, org_img_shape, input_size):
valid_scale = [0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# # (1) (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# # (2) (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = org_img_shape
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# # (3) clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# # (4) discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# # (5) discard some boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > self.score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return coors, scores, classes
class YoloV5Adapter(YoloV3Adapter):
__provider__ = 'yolo_v5'
def configure(self):
super().configure()
if self.raw_output:
self.processor = YoloOutputProcessor(coord_correct=lambda x: 2.0 / (1.0 + np.exp(-x)) - 0.5,
size_correct=lambda x: (2.0 / (1.0 + np.exp(-x))) ** 2,
conf_correct=lambda x: 1.0 / (1.0 + np.exp(-x)),
prob_correct=lambda x: 1.0 / (1.0 + np.exp(-x)))
|
{"hexsha": "717fcb5dc43fc2bf09a980ab2f11b0455f9df995", "size": 26324, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/accuracy_checker/accuracy_checker/adapters/yolo.py", "max_stars_repo_name": "PinDanil/open_model_zoo", "max_stars_repo_head_hexsha": "8538b2769d65d7ca24dd36db0340a9c143583812", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-12T07:43:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T07:43:59.000Z", "max_issues_repo_path": "tools/accuracy_checker/accuracy_checker/adapters/yolo.py", "max_issues_repo_name": "PinDanil/open_model_zoo", "max_issues_repo_head_hexsha": "8538b2769d65d7ca24dd36db0340a9c143583812", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-07-20T10:01:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-07T10:35:52.000Z", "max_forks_repo_path": "tools/accuracy_checker/accuracy_checker/adapters/yolo.py", "max_forks_repo_name": "ygnn123/open_model_zoo", "max_forks_repo_head_hexsha": "9ca5dbeff80464bf5728e8be25daedfe9a9208d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-14T12:51:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-14T12:51:15.000Z", "avg_line_length": 45.2302405498, "max_line_length": 120, "alphanum_fraction": 0.5815605531, "include": true, "reason": "import numpy", "num_tokens": 6115}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 05:43:40 2019
@author: Roopak Ingole
"""
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
from moviepy.editor import VideoFileClip
import os
import collections
import math
debug = 0
# HYPERPARAMETERS
imgH = 720
imgW = 1280
winH = imgH/9
# Set minimum number of pixels found to recenter window
minpix = 50
# Choose the number of sliding windows
nwindows = np.int(imgH/winH)
# Set the width of the windows +/- margin
margin = 100
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
l_lines_collection = collections.deque([], 10)
r_lines_collection = collections.deque([], 10)
radius_col = collections.deque([],10)
def gen_objpoints(path,nx=9,ny=6):
images = glob.glob(path)
objpoints = []
imgpoints = []
#prepare obj points like (0,0,0), (1,0,0)...()
objp = np.zeros([nx*ny,3], np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) #x,y coordinates
for fname in images:
img = mpimg.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
return imgpoints,objpoints
def undistort(img, mtx, dist):
# Use cv2.calibrateCamera() and cv2.undistort()
#undist = np.copy(img) # Delete this line
#img = np.copy(img)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
# TODO: Write a function that takes an image, object points, and image points
# performs the camera calibration, image distortion correction and
# returns the undistorted image
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera() and cv2.undistort()
#undist = np.copy(img) # Delete this line
#img = np.copy(img)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist, mtx, dist
def calibrate_camera(path, test_img_path):
imgpoints, objpoints = gen_objpoints(path)
undist, mtx, dist = cal_undistort(mpimg.imread(test_img_path),objpoints,imgpoints)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "cam_calibration.p", "wb" ) )
def getCamCal():
dist_pickle = pickle.load( open( "cam_calibration.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
return mtx,dist
def prespectiveTransform(img, src, dst):
#img = np.copy(img)
img_size = (img.shape[1],img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
#delete the next two lines
#M = None
#warped = np.copy(img)
img_size = (img.shape[1],img.shape[0])
#img = cv2.imread(fname)
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Convert to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
# Draw and display the corners
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
#plt.imshow(img)
#print(corners)
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
src = np.float32([corners[0],corners[nx-1],corners[-1],corners[-nx]])
#print(src)
#dst = np.float32([[462, 161],[1030,269],[1030,343],[462,274]])
dst = np.float32([[100, 100],[img_size[0]-100,100],[img_size[0]-100,img_size[1]-100],[100,img_size[1]-100]])
#dst = np.float32([[462, 161],[1030,161],[1034,343],[462,274]])
M = cv2.getPerspectiveTransform(src, dst)
#Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_size, flags=cv2.INTER_LINEAR)
return warped, M
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the derivative in x or y given orient = 'x' or 'y'
# 3) Take the absolute value of the derivative or gradient
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if(orient == 'x'):
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output [(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return binary_output
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0,255)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the derivative in x or y given orient = 'x' or 'y'
# 3) Take the absolute value of the derivative or gradient
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
img1 = np.copy(img)
gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
if(orient == 'x'):
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output [(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Calculate the magnitude
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
# 5) Create a binary mask where mag thresholds are met
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
#binary_output = np.copy(img) # Remove this line
#img1 = np.copy(img)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel)
abs_sobelx = np.square(sobelx)
abs_sobely = np.square(sobely)
abs_sobelxy = np.sqrt(abs_sobelx + abs_sobely)
scaled_sobel = np.uint8(255*abs_sobelxy/np.max(abs_sobelxy))
binary_output = np.zeros_like(scaled_sobel)
binary_output [(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
return binary_output
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Take the absolute value of the x and y gradients
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
# 5) Create a binary mask where direction thresholds are met
# 6) Return this mask as your binary_output image
#img1 = np.copy(img)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
abs_sobelxy = np.arctan2(abs_sobely , abs_sobelx)
#scaled_sobel = np.uint8(255*abs_sobelxy/np.max(abs_sobelxy))
binary_output = np.zeros_like(abs_sobelxy)
binary_output [(abs_sobelxy >= thresh[0]) & (abs_sobelxy <= thresh[1])] = 1
return binary_output
def color_threshold(img, hsl='s', thd=(170,255)):
#img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Threshold color channel
binary = np.zeros_like(s_channel)
if hsl == 'h':
binary[(h_channel >= thd[0]) & (h_channel <= thd[1])] = 1
if hsl == 'l':
binary[(l_channel >= thd[0]) & (l_channel <= thd[1])] = 1
if hsl == 's':
binary[(s_channel >= thd[0]) & (s_channel <= thd[1])] = 1
return binary
def combined_thd(image):
# Choose a Sobel kernel size
ksize = 3 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(15, 210))
grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(15, 210))
mag_binary = mag_thresh(image, sobel_kernel=9, mag_thresh=(50, 200))
dir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))
color_binary = color_threshold(image,hsl='s',thd=(100,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))| (color_binary == 1)] = 1
return combined
# Edit this function to create your own pipeline.
def color_n_gradient_thd(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
#img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
return color_binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_window_centroids(image, window_width, window_height, margin):
window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
# Add what we found for the first layer
window_centroids.append((l_center,r_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
# Add what we found for that layer
window_centroids.append((l_center,r_center))
return window_centroids
def convolutional_method(warped, window_width, window_height, margin):
window_centroids = find_window_centroids(warped, window_width, window_height, margin)
# If we found any window centers
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(template) # create a zero color channel
template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8) # make window pixels green
warpage= np.dstack((warped, warped, warped))*255 # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
def measure_curvature_pixels(ploty, left_fit, right_fit):
'''
Calculates the curvature of polynomial functions in pixels.
'''
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
#ploty, left_fit, right_fit = generate_data()
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
#rCurve = np.sqrt((1 + (2Ay+B)**2)**3)/np.abs(2A)
left_curverad = 0 ## Implement the calculation of the left line here
right_curverad = 0 ## Implement the calculation of the right line here
y = np.max(ploty)
A = left_fit[0]
B = left_fit[1]
left_curverad = np.sqrt((1+((2*A*y_eval)+B)**2)**3)/np.abs(2*A) ## Implement the calculation of the left line here
A = right_fit[0]
B = right_fit[1]
right_curverad = np.sqrt((1+((2*A*y_eval)+B)**2)**3)/np.abs(2*A) ## Implement the calculation of the right line here
return left_curverad, right_curverad
def measure_curvature_real(ploty, left_fit_cr, right_fit_cr):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
#ym_per_pix = 30/720 # meters per pixel in y dimension
#xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
#ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
A = left_fit_cr[0]
B = left_fit_cr[1]
left_curverad = np.sqrt((1+((2*A*y_eval)+B)**2)**3)/np.abs(2*A) ## Implement the calculation of the left line here
A = right_fit_cr[0]
B = right_fit_cr[1]
right_curverad = np.sqrt((1+((2*A*y_eval)+B)**2)**3)/np.abs(2*A) ## Implement the calculation of the right line here
return left_curverad, right_curverad
# Define a function that thresholds the S-channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
def hls_select(img, thresh=(0, 255)):
# 1) Convert to HLS color space
# 2) Apply a threshold to the S channel
# 3) Return a binary image of threshold result
#binary_output = np.copy(img) # placeholder line
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[(S > thresh[0]) & (S <= thresh[1])] = 1
return binary_output
def calc_center(window):
# Take a histogram of the bottom half of the image
histogram = np.sum(window[window.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx = np.argmax(histogram[:midpoint])
rightx = np.argmax(histogram[midpoint:]) + midpoint
return leftx, rightx
def get_line(img_shape, left_fit, right_fit):
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
# Generate x and y values for plotting
left_fitx, right_fitx, ploty = get_line(img_shape, left_fit, right_fit)
return left_fitx, right_fitx, ploty
def search_around_poly(binary_warped,left_fit,right_fit):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
# left_lane_inds = None
# right_lane_inds = None
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*nonzeroy**2 + left_fit[1]*nonzeroy + left_fit[2]
right_fitx = right_fit[0]*nonzeroy**2 + right_fit[1]*nonzeroy + right_fit[2]
left_lane_inds = ((nonzerox >= left_fitx-margin) & (nonzerox <= left_fitx+margin))
right_lane_inds = ((nonzerox >= right_fitx-margin) & (nonzerox <= right_fitx+margin))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
# left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
return leftx, lefty, rightx, righty, out_img
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current-margin # Update this
win_xleft_high = leftx_current+margin # Update this
win_xright_low = rightx_current-margin # Update this
win_xright_high = rightx_current+margin # Update this
# Draw the windows on the visualization image
if debug:
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <= win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <= win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <= win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
# pass # Remove this when you add your function
if(len(good_left_inds)>minpix):
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if(len(good_right_inds)>minpix):
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def get_lane_coeifficient(leftx,lefty,rightx,righty):
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
return left_fit, right_fit
def fit_lane_polynomial(binary_warped, left_fit=None, right_fit=None):
# Find our lane pixels first
if left_fit is None and right_fit is None:
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
else:
leftx, lefty, rightx, righty, out_img = search_around_poly(binary_warped,left_fit,right_fit)
#print(left_fit)
#print(right_fit)
# # Generate x and y values for plotting
# # Fit new polynomials
# left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
# try:
# left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# except TypeError:
# # Avoids an error if `left` and `right_fit` are still none or incorrect
# print('The function failed to fit a line!')
# left_fitx = 1*ploty**2 + 1*ploty
# right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
if debug:
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
return leftx, lefty, rightx, righty, out_img
def find_lane(img):
out_img = np.dstack((img, img, img))
left_fit = None
right_fit = None
if len(l_lines_collection) > 1:
left_fit = np.array(np.sum(l_lines_collection, 0)/len(l_lines_collection))
if len(r_lines_collection) > 1:
right_fit = np.array(np.sum(r_lines_collection, 0)/len(r_lines_collection))
leftx, lefty, rightx, righty, debug_img = fit_lane_polynomial(img,left_fit, right_fit)
left_fit,right_fit = get_lane_coeifficient(leftx,lefty,rightx,righty)
l_lines_collection.append(left_fit)
r_lines_collection.append(right_fit)
l_avg_lines = np.array(np.sum(l_lines_collection, 0)/len(l_lines_collection))
r_avg_lines = np.array(np.sum(r_lines_collection, 0)/len(r_lines_collection))
# Generate x and y values for plotting
# Fit new polynomials
#left_fitx, right_fitx, ploty = fit_poly(img.shape, leftx, lefty, rightx, righty)
left_fitx, right_fitx, ploty = get_line(img.shape, l_avg_lines, r_avg_lines)
### TO-DO: Fit a second order polynomial to each using `np.polyfit` ###
#left_fit_rad, right_fit_rad = get_lane_coeifficient(lefty*ym_per_pix,leftx*xm_per_pix,righty*ym_per_pix,rightx*xm_per_pix)
left_fit_rad, right_fit_rad = get_lane_coeifficient(left_fitx*xm_per_pix,ploty*ym_per_pix,right_fitx*xm_per_pix,ploty*ym_per_pix)
l_points = np.squeeze(np.array(np.dstack((left_fitx, ploty)), dtype='int32'))
r_points = np.squeeze(np.array(np.dstack((right_fitx, ploty)), dtype='int32'))
points_rect = np.concatenate((r_points, l_points[::-1]), 0)
cv2.fillPoly(out_img, [points_rect], (0, 255, 0))
cv2.polylines(out_img, [l_points], False, (255, 0, 0), 15)
cv2.polylines(out_img, [r_points], False, (0, 0, 255), 15)
if debug:
cv2.polylines(debug_img, [l_points], False, (255, 0, 0), 15)
cv2.polylines(debug_img, [r_points], False, (0, 0, 255), 15)
left_r, right_r = measure_curvature_pixels(ploty, left_fit, right_fit)
left_rad, right_rad = measure_curvature_real(ploty*ym_per_pix, left_fit_rad, right_fit_rad)
dist_from_center = left_fitx[-1] + ((right_fitx[-1] - left_fitx[-1])/2)
dist_from_center = (imgW/2 - dist_from_center)*xm_per_pix
return out_img, left_rad, right_rad, dist_from_center, debug_img
def pipeline(img,mtx,dist):
#Pipeline
#1. Load Camera Calibration
#2. Distortation correction
#3. Color & Gradient threshold
#4. Prespective Transform
#5. Find Lane
#6. Measure curvature
#7. Draw Lane markers & text on original image.
img = np.copy(img)
#imgpoints,objpoints = gen_objpoints('./camera_cal/calibration*.jpg')
#mtx, dist = getCamCal()
#Camera Calibration
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
#Distortation Correction
#undist = cv2.undistort(img, mtx, dist, None, mtx)
undist = undistort(img, mtx, dist)
# cv2.imwrite('./output_images/undist.jpg', undist)
#Apply threshold
thd = combined_thd(undist)
# plt.imshow(thd)
# cv2.imwrite('./output_images/thresholded.png', thd)
#Prespective Transform
src = np.float32([[595,451], [680,451], [233,720],[1067,720]])
dst = np.float32([[350,0], [930,0], [350,720],[930,720]])
warped = prespectiveTransform(thd, src, dst)
# plt.imshow(warped)
# cv2.imwrite('./output_images/warped.jpg', warped)
#result = corners_unwarp(thd, nx, ny, mtx, dist)
#Find Lane
lane, left_r, right_r, dist_from_center, debug_img = find_lane(warped)
# plt.imshow(debug_img)
# cv2.imwrite('./output_images/lane.jpg', lane)
#Calculate curvature
radius = ((left_r+right_r)/2)
radius_col.append(radius)
avg_radius = np.sum(radius_col,0)/len(radius_col)
#draw on original image
unwraped = prespectiveTransform(lane, dst, src)
result = cv2.addWeighted(undist, 1, unwraped, .3, 0.0, dtype=0)
dist_str = 'right'
if dist_from_center >= 0:
dist_str = 'left'
cv2.putText(result, "Radius of Curvature = %.2f (m)" % (avg_radius), (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
cv2.putText(result, "Distance from center = %.2fm" % (dist_from_center), (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
if debug:
cv2.putText(result, "l:%.2f r:%.2f" % (left_r, right_r), (20, 130), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
s_img = cv2.resize(debug_img, (0,0), fx=0.33, fy=0.33)
x_offset= 1200 - s_img.shape[1]
y_offset=50
result[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
return result
def processTestImage(image,mtx,dist):
# Read in an image
img = cv2.imread(image)
path, fname = os.path.split(image)
print(fname)
result = pipeline(img, mtx, dist)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(result)
ax2.set_title('Pipeline Result', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
cv2.imwrite('./output_images/'+fname, result)
def processAll(mtx,dist):
images = glob.glob('./test_images/*.jpg')
for image in images:
reset_lines_collection()
processTestImage(image,mtx,dist)
def process_image(image):
img = pipeline(image, mtx, dist)
return img
def reset_lines_collection():
l_lines_collection.clear()
r_lines_collection.clear()
def processVideo(video_path):
file = os.path.splitext(video_path)
output_video = file[0] + '_sol' + file[1]
reset_lines_collection()
# clip1 = VideoFileClip(video_path).subclip(0,5)
clip1 = VideoFileClip(video_path)
processed_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n",
processed_clip.write_videofile(output_video, audio=False)
#calibrate_camera('./camera_cal/calibration*.jpg', './camera_cal/calibration1.jpg')
mtx, dist = getCamCal()
#processTestImage('./test_images/test1.jpg', mtx,dist)
#processAll(mtx,dist)
#processVideo('project_video.mp4')
processVideo('challenge_video.mp4')
#processVideo('harder_challenge_video.mp4')
|
{"hexsha": "2c0e5e701f172e125664fc6c25d8c6e16af8e91a", "size": 33817, "ext": "py", "lang": "Python", "max_stars_repo_path": "advanced_lane_line.py", "max_stars_repo_name": "roopakingole/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "e11c1a5e5dc3511c5013701e48125215c7877f7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "advanced_lane_line.py", "max_issues_repo_name": "roopakingole/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "e11c1a5e5dc3511c5013701e48125215c7877f7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "advanced_lane_line.py", "max_forks_repo_name": "roopakingole/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "e11c1a5e5dc3511c5013701e48125215c7877f7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0400485437, "max_line_length": 158, "alphanum_fraction": 0.6736848331, "include": true, "reason": "import numpy", "num_tokens": 9584}
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
if __name__ == '__main__':
x = np.array([[1, 2], [2, 3], [3, 3], [2, 1], [3, 2]])
y = np.array([1, 1, 1, -1, -1])
clf = svm.SVC(kernel='linear', C=10)
clf.fit(x, y)
print('w1: ' + str(clf.coef_[0][0]))
print('w2: ' + str(clf.coef_[0][1]))
print('b: ' + str(clf.intercept_[0]))
x1 = np.linspace(0, 4)
w = clf.coef_[0]
a = -w[0] / w[1]
x2_sep = a * x1 - clf.intercept_[0] / w[1]
b = clf.support_vectors_[0]
x2_margin1 = a * x1 + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
x2_margin2 = a * x1 + (b[1] - a * b[0])
plt.figure(figsize=(4, 4))
plt.plot(x1, x2_sep, label='Separating hyperplane')
plt.plot(x1, x2_margin1, 'k--', label='Margin boundary')
plt.plot(x1, x2_margin2, 'k--')
plt.scatter(x[:, 0], x[:, 1], c=y)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=150, facecolors='none', edgecolors='k',
label='Support vectors')
plt.xlim(0, 4)
plt.ylim(0, 4)
plt.legend()
plt.savefig('fig.jpg')
plt.show()
|
{"hexsha": "58740a935b8e1446ea9f17d94a9ecbc255ff9f43", "size": 1128, "ext": "py", "lang": "Python", "max_stars_repo_path": "BUPT/Machine-Learning-I/Assignment2/slm_7_2.py", "max_stars_repo_name": "dachr8/Exercise", "max_stars_repo_head_hexsha": "2e567f9edcf0d06ca4ed99cb65a0264546a36d63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BUPT/Machine-Learning-I/Assignment2/slm_7_2.py", "max_issues_repo_name": "dachr8/Exercise", "max_issues_repo_head_hexsha": "2e567f9edcf0d06ca4ed99cb65a0264546a36d63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-08T21:53:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:37:28.000Z", "max_forks_repo_path": "BUPT/Machine-Learning-I/Assignment2/slm_7_2.py", "max_forks_repo_name": "dachr8/Exercise", "max_forks_repo_head_hexsha": "2e567f9edcf0d06ca4ed99cb65a0264546a36d63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3333333333, "max_line_length": 113, "alphanum_fraction": 0.5487588652, "include": true, "reason": "import numpy", "num_tokens": 422}
|
import argparse
import json
import pandas as pd
import numpy as np
import plotly.express as px
def main():
parser = argparse.ArgumentParser(description="MSQL CMD")
parser.add_argument('input_extracted_json', help='input_extracted_json')
parser.add_argument('output_summary_html', help='output_summary_html')
args = parser.parse_args()
input_spectra = json.loads(open(args.input_extracted_json).read())
peak_list = []
for spectrum in input_spectra:
sum_intensity = sum([peak[1] for peak in spectrum['peaks']])
for peak in spectrum['peaks']:
peak_dict = {}
peak_dict["mz"] = peak[0]
peak_dict["i"] = peak[1]
peak_dict["i_norm"] = peak[1] / sum_intensity
if "precursor_mz" in spectrum:
peak_dict["precursor_mz"] = spectrum["precursor_mz"]
if "comment" in spectrum:
peak_dict["comment"] = float(spectrum["comment"])
peak_list.append(peak_dict)
peaks_df = pd.DataFrame(peak_list)
with open(args.output_summary_html, 'w') as f:
# Calculating Data
try:
peaks_df["mzminuscomment"] = peaks_df["mz"] - peaks_df["comment"]
except:
pass
# 1D Histograms
# Histogram of m/z, useful for basic things
try:
peakbins = int(max(peaks_df["mz"]) - min(peaks_df["mz"]))
fig = px.histogram(peaks_df,
x="mz",
title='m/z peak histogram',
nbins=peakbins)
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# Histogram of peaks minus comment, useful for learning about new fragmentation relative to X
try:
mz_bins = int(max(peaks_df["mz"]) - min(peaks_df["mz"]))
fig = px.histogram(peaks_df,
x="mzminuscomment",
y="i_norm",
title='m/z peak average normed spectrum minus X histogram',
nbins=mz_bins*10)
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
filtered_peaks_df = peaks_df[peaks_df["i_norm"] > 0.005]
fig = px.histogram(filtered_peaks_df,
x="mzminuscomment",
title='m/z peak frequency spectrum minus X histogram greater than 0.5% of base peak',
nbins=mz_bins*10)
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# 2D Histograms
# 2D histogram of peak m/z vs precuror m/z, useful in MS2 spectra
try:
precbins = int(max(peaks_df["precursor_mz"]) - min(peaks_df["precursor_mz"]))
fig = px.density_heatmap(peaks_df,
title='2D m/z peak histogram',
x="mz",
y="precursor_mz",
color_continuous_scale="Jet",
nbinsx=peakbins, nbinsy=precbins)
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# 2D histogram of peak m/z vs comment
try:
mz_bins = int(max(peaks_df["mz"]) - min(peaks_df["mz"]))
comment_bins = int(max(peaks_df["comment"]) - min(peaks_df["comment"]))
peaks_df["i_norm_log"] = np.log(peaks_df["i_norm"]) - np.log(min(peaks_df["i_norm"]))
fig = px.density_heatmap(peaks_df,
title='2D m/z peak histogram by X',
x="mz",
y="comment",
z="i_norm_log",
histfunc="avg",
color_continuous_scale="Hot",
nbinsx=mz_bins, nbinsy=comment_bins)
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# 2D histogram of peak minus X, marginalized on precursor m/z, useful in MS2 spectra
try:
precbins = int(max(peaks_df["precursor_mz"]) - min(peaks_df["precursor_mz"]))
fig = px.density_heatmap(peaks_df,
title='2D m/z peak histogram minus X by X',
x="mzminuscomment",
y="precursor_mz",
nbinsx=peakbins, nbinsy=precbins,
marginal_x="histogram", marginal_y="histogram")
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# 2D histogram with MS1 peaks minus comment vs comment
try:
mz_bins = int(max(peaks_df["mz"]) - min(peaks_df["mz"]))
comment_bins = int(max(peaks_df["comment"]) - min(peaks_df["comment"]))
peaks_df["mzminuscomment"] = peaks_df["mz"] - peaks_df["comment"]
fig = px.density_heatmap(peaks_df,
title='2D m/z peak histogram minus X with margins',
x="mzminuscomment",
y="comment",
nbinsx=mz_bins, nbinsy=comment_bins,
color_continuous_scale=px.colors.sequential.Hot,
marginal_x="histogram", marginal_y="histogram")
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
# 2D histogram with MS1 peaks minus comment, average histogram rather than intensity
try:
mz_bins = int(max(peaks_df["mz"]) - min(peaks_df["mz"]))
comment_bins = int(max(peaks_df["comment"]) - min(peaks_df["comment"]))
peaks_df["i_norm_log"] = np.log(peaks_df["i_norm"]) - np.log(min(peaks_df["i_norm"]))
peaks_df["mzminuscomment"] = peaks_df["mz"] - peaks_df["comment"]
fig = px.density_heatmap(peaks_df,
title='2D m/z peak histogram minus X with margins - i_norm avg',
x="mzminuscomment",
y="comment",
z="i_norm_log",
histfunc="avg",
color_continuous_scale=px.colors.sequential.Hot,
nbinsx=mz_bins, nbinsy=comment_bins,
marginal_x="histogram", marginal_y="histogram")
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
except:
pass
if __name__ == "__main__":
main()
|
{"hexsha": "47486de453120f4aaa6ca600adeb899b55f79f07", "size": 7026, "ext": "py", "lang": "Python", "max_stars_repo_path": "workflow/bin/summarize_extracted.py", "max_stars_repo_name": "sarvarkaxxorov/MassQueryLanguage", "max_stars_repo_head_hexsha": "b7618ba7fb5343c252c5691dc574f4193fb8e83e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-11-02T16:45:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:09:26.000Z", "max_issues_repo_path": "workflow/bin/summarize_extracted.py", "max_issues_repo_name": "sarvarkaxxorov/MassQueryLanguage", "max_issues_repo_head_hexsha": "b7618ba7fb5343c252c5691dc574f4193fb8e83e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-11-08T19:21:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T17:40:40.000Z", "max_forks_repo_path": "workflow/bin/summarize_extracted.py", "max_forks_repo_name": "sarvarkaxxorov/MassQueryLanguage", "max_forks_repo_head_hexsha": "b7618ba7fb5343c252c5691dc574f4193fb8e83e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-18T23:39:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T18:06:49.000Z", "avg_line_length": 43.6397515528, "max_line_length": 117, "alphanum_fraction": 0.500853971, "include": true, "reason": "import numpy", "num_tokens": 1510}
|
import numpy as np
from PIL import Image
def invert_image(image):
all_pixels = np.array(
[
[
[*image.getpixel((width_counter, height_counter)), 255]
for width_counter in range(image.width)
]
for height_counter in range(image.height)
],
dtype=np.uint8
)
inverted_pixels = 255 - all_pixels
return Image.fromarray(inverted_pixels).convert('RGB')
|
{"hexsha": "af687f49124c6569135dca3f1f56074fe8e78e91", "size": 454, "ext": "py", "lang": "Python", "max_stars_repo_path": "{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing.py", "max_stars_repo_name": "spirousschuh/cookiecutter-git-workshop-documentation", "max_stars_repo_head_hexsha": "4fb62eda345fcc8119393bbec2fe1818feaa9a5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing.py", "max_issues_repo_name": "spirousschuh/cookiecutter-git-workshop-documentation", "max_issues_repo_head_hexsha": "4fb62eda345fcc8119393bbec2fe1818feaa9a5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "{{cookiecutter.package_name}}/{{cookiecutter.package_name}}/processing.py", "max_forks_repo_name": "spirousschuh/cookiecutter-git-workshop-documentation", "max_forks_repo_head_hexsha": "4fb62eda345fcc8119393bbec2fe1818feaa9a5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8947368421, "max_line_length": 71, "alphanum_fraction": 0.5859030837, "include": true, "reason": "import numpy", "num_tokens": 94}
|
# # Exact Optimization with Rational Arithmetic
# This example can be found in section 4.3 [in the paper](https://arxiv.org/pdf/2104.06675.pdf).
# The package allows for exact optimization with rational arithmetic. For this, it suffices to set up the LMO
# to be rational and choose an appropriate step-size rule as detailed below. For the LMOs included in the
# package, this simply means initializing the radius with a rational-compatible element type, e.g., `1`, rather
# than a floating-point number, e.g., `1.0`. Given that numerators and denominators can become quite large in
# rational arithmetic, it is strongly advised to base the used rationals on extended-precision integer types such
# as `BigInt`, i.e., we use `Rational{BigInt}`.
# The second requirement ensuring that the computation runs in rational arithmetic is
# a rational-compatible step-size rule. The most basic step-size rule compatible with rational optimization is
# the agnostic step-size rule with ``\gamma_t = 2/(2 + t)``. With this step-size rule, the gradient does not even need to
# be rational as long as the atom computed by the LMO is of a rational type. Assuming these requirements are
# met, all iterates and the computed solution will then be rational.
using FrankWolfe
using LinearAlgebra
n = 100
k = n
x = fill(big(1)//100, n)
f(x) = dot(x, x)
function grad!(storage, x)
@. storage = 2 * x
end
# pick feasible region
# radius needs to be integer or rational
lmo = FrankWolfe.ProbabilitySimplexOracle{Rational{BigInt}}(1)
# compute some initial vertex
x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n));
x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.Agnostic(),
print_iter=k / 10,
verbose=true,
emphasis=FrankWolfe.blas,
);
println("\nOutput type of solution: ", eltype(x))
# Another possible step-size rule is `rationalshortstep` which computes the step size by minimizing the
# smoothness inequality as ``\gamma_t=\frac{\langle \nabla f(x_t),x_t-v_t\rangle}{2L||x_t-v_t||^2}``. However, as this step size depends on an upper bound on the
# Lipschitz constant ``L`` as well as the inner product with the gradient ``\nabla f(x_t)``, both have to be of a rational type.
@time x, v, primal, dual_gap, trajectory = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
x0,
max_iteration=k,
line_search=FrankWolfe.RationalShortstep(),
L=2,
print_iter=k / 10,
verbose=true,
emphasis=FrankWolfe.blas,
);
# Note: at the last step, we exactly close the gap, finding the solution 1//n * ones(n)
|
{"hexsha": "3d0bd154fffac2ea395de6876ae6bde6e8eca3c6", "size": 2630, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/docs_4_rational_opt.jl", "max_stars_repo_name": "gdalle/FrankWolfe.jl-2", "max_stars_repo_head_hexsha": "c3b3903c4808e24aa9e0f655aa2f8de0f2c1571c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2021-03-27T15:50:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T15:11:21.000Z", "max_issues_repo_path": "examples/docs_4_rational_opt.jl", "max_issues_repo_name": "gdalle/FrankWolfe.jl-2", "max_issues_repo_head_hexsha": "c3b3903c4808e24aa9e0f655aa2f8de0f2c1571c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 132, "max_issues_repo_issues_event_min_datetime": "2021-03-29T18:49:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T01:33:22.000Z", "max_forks_repo_path": "docs/src/examples/4_rational_opt.jl", "max_forks_repo_name": "dviladrich95/FrankWolfe.jl", "max_forks_repo_head_hexsha": "bcb441e52918bd1103f13296082cd7a8bc22607b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-06-02T13:38:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T10:04:42.000Z", "avg_line_length": 38.115942029, "max_line_length": 161, "alphanum_fraction": 0.7288973384, "num_tokens": 714}
|
_with_vdw(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in keys(vanderwaalsradius)
_with_cov(a::PDBAtom, resname_a::String) = a.element in keys(covalentradius)
ishydrophobic(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in _hydrophobic
"""
Returns true if the atom, e.g. `("HIS","CG")`, is an aromatic atom in the residue.
"""
isaromatic(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in _aromatic
"""
Returns true if the atom, e.g. `("ARG","NE")`, is a cationic atom in the residue.
"""
iscationic(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in _cationic
"""
Returns true if the atom, e.g. `("GLU","CD")`, is an anionic atom in the residue.
"""
isanionic(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in _anionic
"""
Returns true if the atom, e.g. `("ARG","N")`, is a donor in H bonds.
"""
ishbonddonor(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in keys(_hbond_donor)
"""
Returns true if the atom, e.g. `("ARG","O")`, is an acceptor in H bonds.
"""
ishbondacceptor(a::PDBAtom, resname_a::String) = (resname_a, a.atom) in keys(_hbond_acceptor)
"""
`any(f::Function, a::PDBResidue, b::PDBResidue, criteria::Function)`
Test if the function `f` is true for any pair of atoms between the residues `a` and `b`.
This function only test atoms that returns `true` for the fuction `criteria`.
"""
function Base.any(f::Function, a::PDBResidue, b::PDBResidue, criteria::Function)
resname_a, resname_b = a.id.name, b.id.name
a_atoms = a.atoms
b_atoms = b.atoms
indices_a = _find(x -> criteria(x, resname_a), a_atoms)
indices_b = _find(x -> criteria(x, resname_b), b_atoms)
if length(indices_a) != 0 && length(indices_b) != 0
@inbounds for i in indices_a
for j in indices_b
if f(a_atoms[i], b_atoms[j], resname_a, resname_b)
return(true)
end
end
end
end
return(false)
end
# Interaction types
# =================
# van der Waals
# -------------
"""
Test if two atoms or residues are in van der Waals contact using:
`distance(a,b) <= 0.5 + vanderwaalsradius[a] + vanderwaalsradius[b]`.
It returns distance `<= 0.5` if the atoms aren't in `vanderwaalsradius`.
"""
function vanderwaals(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
return( distance(a,b) <= 0.5 +
get(vanderwaalsradius, (resname_a, a.atom), 0.0) +
get(vanderwaalsradius, (resname_b, b.atom), 0.0) )
end
vanderwaals(a::PDBResidue, b::PDBResidue) = any(vanderwaals, a, b, _with_vdw)
# van der Waals clash
# -------------------
"""
Returns `true` if the distance between the atoms is less than the sum of the
`vanderwaalsradius` of the atoms. If the atoms aren't on the list (i.e. `OXT`), the
`vanderwaalsradius` of the element is used. If there is not data in the dict,
distance `0.0` is used.
"""
function vanderwaalsclash(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
return( distance(a,b) <= get(vanderwaalsradius, (resname_a, a.atom),
get(vanderwaalsradius, (resname_a, a.element), 0.0)) +
get(vanderwaalsradius, (resname_b, b.atom),
get(vanderwaalsradius, (resname_b, b.element), 0.0)) )
end
vanderwaalsclash(a::PDBResidue, b::PDBResidue) = any(vanderwaalsclash, a, b, _with_vdw)
# Covalent
# --------
"""
Returns `true` if the distance between atoms is less than the sum of the `covalentradius`
of each atom.
"""
function covalent(a::PDBAtom, b::PDBAtom, resname_a, resname_b) # any(... calls it with the res names
return( distance(a,b) <= get(covalentradius, a.element, 0.0) +
get(covalentradius, b.element, 0.0) )
end
covalent(a::PDBAtom, b::PDBAtom) = covalent(a, b, "", "")
covalent(a::PDBResidue, b::PDBResidue) = any(covalent, a, b, _with_cov)
# Disulphide
# ----------
_issulphurcys(a::PDBAtom, resname_a) = resname_a == "CYS" && a.element == "S"
"Returns `true` if two `CYS`'s `S` are at 2.08 Å or less"
function disulphide(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
if _issulphurcys(a, resname_a) && _issulphurcys(b, resname_b)
return(squared_distance(a,b) <= (2.08 ^ 2))
end
return(false)
end
disulphide(a::PDBResidue, b::PDBResidue) = any(disulphide, a, b, _issulphurcys)
# Aromatic-Sulphur
# ----------------
_issulphur(a::PDBAtom) = a.element == "S"
"""
Returns `true` if an sulphur and an aromatic atoms are 5.3 Å or less"
"""
function aromaticsulphur(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
if (_issulphur(a) && isaromatic(b, resname_b)) ||
(_issulphur(b) && isaromatic(a, resname_a))
return(squared_distance(a,b) <= 28.09) # 28.09 == 5.3 ^ 2
end
return(false)
end
_issulphuroraromatic(a::PDBAtom, resname_a) = _issulphur(a) || isaromatic(a, resname_a)
aromaticsulphur(a::PDBResidue, b::PDBResidue) = any(aromaticsulphur, a, b, _issulphuroraromatic)
# Π-Cation
# --------
"""
There's a Π-Cation interaction if a cationic and an aromatic atoms are at 6.0 Å or less
"""
function pication(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
if (iscationic(a, resname_a) && isaromatic(b, resname_b)) ||
(iscationic(b, resname_b) && isaromatic(a, resname_a))
return(squared_distance(a,b) <= 36.0) # 36.0 == 6.0 ^ 2
end
return(false)
end
_iscationicoraromatic(a::PDBAtom, resname_a) = iscationic(a, resname_a) || isaromatic(a, resname_a)
pication(a::PDBResidue, b::PDBResidue) = any(pication, a, b, _iscationicoraromatic)
# Aromatic
# --------
"""
There's an aromatic interaction if centriods are at 6.0 Å or less.
"""
function aromatic(a::PDBResidue, b::PDBResidue)
threshold = 36.0 # 6.0 ^ 2
if (a.id.name in _aromatic_res) &&
(b.id.name in _aromatic_res) &&
(squared_distance(a, b) <= threshold)
centres_a = _centre(_get_plane(a))
centres_b = _centre(_get_plane(b))
return(any(squared_distance(centroid_a,centroid_b) <= threshold for centroid_a in centres_a, centroid_b in centres_b))
end
return(false)
end
# Ionic
# -----
"""
There's an ionic interaction if a cationic and an anionic atoms are at 6.0 Å or less.
"""
function ionic(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
if (iscationic(a, resname_a) && isanionic(b, resname_b)) ||
(iscationic(b, resname_b) && isanionic(a, resname_a))
return(squared_distance(a,b) <= 36.0) # 36.0 == 6.0 ^ 2
end
return(false)
end
_iscationicoranionic(a::PDBAtom, resname_a) = iscationic(a, resname_a) || isanionic(a, resname_a)
ionic(a::PDBResidue, b::PDBResidue) = any(ionic, a, b, _iscationicoranionic)
# Hydrophobic contact
# -------------------
"""
There's an hydrophobic interaction if two hydrophobic atoms are at 5.0 Å or less.
"""
function hydrophobic(a::PDBAtom, b::PDBAtom, resname_a, resname_b)
if ishydrophobic(a, resname_a) && ishydrophobic(b, resname_b)
return(squared_distance(a,b) <= 25.0) # 5.0 ^ 2
end
return(false)
end
hydrophobic(a::PDBResidue, b::PDBResidue) = any(hydrophobic, a, b, ishydrophobic)
# Hydrogen bonds
# --------------
function _find_antecedent(res::PDBResidue, a::PDBAtom)
ids = _hbond_acceptor[(res.id.name, a.atom)]
_find(a -> a.atom in ids, res.atoms)
end
function _find_h(res::PDBResidue, a::PDBAtom)
ids = _hbond_donor[(res.id.name, a.atom)]
_find(a -> a.atom in ids, res.atoms)
end
function _hbond_kernel(donor, acceptor, indices_donor, indices_acceptor)
@inbounds for i in indices_donor
don = donor.atoms[i]
indices_h = _find_h(donor, don)
if length(indices_h) == 0
continue
end
for j in indices_acceptor
acc = acceptor.atoms[j]
indices_ant = _find_antecedent(acceptor, acc)
if squared_distance(don, acc) <= (3.9^2) && length(indices_ant) != 0
for k in indices_h
hyd = donor.atoms[k]
if squared_distance(hyd, acc) <= 6.25 && angle(don, hyd, acc) >= 90.0 # 6.25 == 2.5²
for ant in indices_ant
if angle(don, acc, acceptor.atoms[ant]) >= 90.0 && angle(hyd, acc, acceptor.atoms[ant]) >= 90.0
return(true)
end
end
end
end
end
end
end
return(false)
end
function _hydrogenbond_don_acc(donor::PDBResidue, acceptor::PDBResidue)
if donor != acceptor
indices_donor = findall(x -> ishbonddonor(x, donor.id.name), donor.atoms)
indices_acceptor = findall(x -> ishbondacceptor(x, acceptor.id.name), acceptor.atoms)
if length(indices_donor) != 0 && length(indices_acceptor) != 0
return(_hbond_kernel(donor, acceptor, indices_donor, indices_acceptor))
end
end
return(false)
end
"""
This function only works if there are hydrogens in the structure.
The criteria for a hydrogen bond are:
- d(Ai, Aj) < 3.9Å
- d(Ah, Aacc) < 2.5Å
- θ(Adon, Ah, Aacc) > 90°
- θ(Adon, Aacc, Aacc-antecedent) > 90°
- θ(Ah, Aacc, Aacc-antecedent) > 90°
Where Ah is the donated hydrogen atom, Adon is the hydrogen bond donor atom,
Aacc is the hydrogen bond acceptor atom and Aacc-antecednt is the atom antecedent to the
hydrogen bond acceptor atom.
"""
hydrogenbond(a::PDBResidue, b::PDBResidue) = _hydrogenbond_don_acc(a,b) || _hydrogenbond_don_acc(b,a)
|
{"hexsha": "912133cb8f45a8457a2f7702df0740b5b95bf309", "size": 9456, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PDB/Interaction.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/MIToS.jl-51bafb47-8a16-5ded-8b04-24ef4eede0b5", "max_stars_repo_head_hexsha": "6237d5a885e43f49b74e0a9e56120012711b2d46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2015-08-13T02:11:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T12:22:31.000Z", "max_issues_repo_path": "src/PDB/Interaction.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/MIToS.jl-51bafb47-8a16-5ded-8b04-24ef4eede0b5", "max_issues_repo_head_hexsha": "6237d5a885e43f49b74e0a9e56120012711b2d46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2015-06-19T22:44:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-30T15:40:25.000Z", "max_forks_repo_path": "src/PDB/Interaction.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/MIToS.jl-51bafb47-8a16-5ded-8b04-24ef4eede0b5", "max_forks_repo_head_hexsha": "6237d5a885e43f49b74e0a9e56120012711b2d46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-08-11T02:15:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T00:13:20.000Z", "avg_line_length": 33.7714285714, "max_line_length": 126, "alphanum_fraction": 0.6394881557, "num_tokens": 3060}
|
[STATEMENT]
lemma permutep_id [simp]: "permutep id mon = mon"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. permutep id mon = mon
[PROOF STEP]
by transfer auto
|
{"llama_tokens": 64, "file": "Symmetric_Polynomials_Symmetric_Polynomials", "length": 1}
|
"""Tests to run with a running daemon."""
import subprocess
import sys
import operator
import time
import numpy as np
from aiida import orm
from aiida.engine.daemon.client import get_daemon_client
from aiida.engine import launch
from aiida.common import exceptions
from aiida_optimize.engines import Bisection
from aiida_optimize import OptimizationWorkChain
import sample_processes
TIMEOUTSECS = 4 * 60 # 4 minutes
def print_daemon_log():
"""Print daemon log."""
daemon_client = get_daemon_client()
daemon_log = daemon_client.daemon_log_file
print(f"Output of 'cat {daemon_log}':")
try:
print(subprocess.check_output(
['cat', f'{daemon_log}'],
stderr=subprocess.STDOUT,
))
except subprocess.CalledProcessError as exception:
print(f'Note: the command failed, message: {exception}')
def wait_for(proc, time_elapsed=5):
while not proc.is_terminated:
time.sleep(time_elapsed)
def check_optimization(
engine,
func_workchain_name,
engine_kwargs,
xtol,
ftol,
x_exact,
f_exact,
evaluate=None,
input_getter=operator.attrgetter('x'),
output_port_names=None
): # pylint: disable=too-many-arguments
"""submit launch and check optimization"""
func_workchain = getattr(sample_processes, func_workchain_name)
inputs = dict(
engine=engine,
engine_kwargs=orm.Dict(dict=dict(engine_kwargs)),
evaluate_process=func_workchain,
evaluate=evaluate if evaluate is not None else {},
)
result_node = launch.submit(OptimizationWorkChain, **inputs)
wait_for(result_node)
assert 'optimal_process_uuid' in result_node.outputs
assert np.isclose(result_node.outputs.optimal_process_output.value, f_exact, atol=ftol)
calc = orm.load_node(result_node.outputs.optimal_process_uuid.value)
assert np.allclose(type(x_exact)(input_getter(calc.inputs)), x_exact, atol=xtol)
try:
optimal_process_input_node = result_node.outputs.optimal_process_input
except exceptions.NotExistentAttributeError:
return
if isinstance(optimal_process_input_node, orm.BaseType):
optimal_process_input = optimal_process_input_node.value
elif isinstance(optimal_process_input_node, orm.List):
optimal_process_input = optimal_process_input_node.get_list()
else:
optimal_process_input = optimal_process_input_node
getter_input = input_getter(calc.inputs)
if isinstance(getter_input, orm.Node):
assert getter_input.uuid == optimal_process_input_node.uuid
assert np.allclose(type(x_exact)(optimal_process_input), x_exact, atol=xtol)
assert np.allclose(type(x_exact)(getter_input), type(x_exact)(optimal_process_input), atol=xtol)
if output_port_names is not None:
for name in output_port_names:
assert name in result_node.outputs
def launch_all():
tol = 1e-1
check_optimization(
engine=Bisection,
engine_kwargs=dict(
lower=-1.1,
upper=1.,
tol=tol,
),
func_workchain_name='Echo',
xtol=tol,
ftol=tol,
x_exact=0.,
f_exact=0.,
)
def main():
"""Submit through daemon"""
launch_all()
print_daemon_log()
sys.exit(0)
if __name__ == '__main__':
main()
|
{"hexsha": "dc2d823aef6e5654eb01d1351261062d660fb5a4", "size": 3344, "ext": "py", "lang": "Python", "max_stars_repo_path": ".github/system_tests/test_daemon.py", "max_stars_repo_name": "greschd/aiida_optimize", "max_stars_repo_head_hexsha": "4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": ".github/system_tests/test_daemon.py", "max_issues_repo_name": "greschd/aiida_optimize", "max_issues_repo_head_hexsha": "4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": ".github/system_tests/test_daemon.py", "max_forks_repo_name": "greschd/aiida_optimize", "max_forks_repo_head_hexsha": "4c7bc76e4ad7e40f6105e60f34b7a20e1ab3a122", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9677419355, "max_line_length": 100, "alphanum_fraction": 0.7009569378, "include": true, "reason": "import numpy", "num_tokens": 764}
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Patrick Massot
-/
import topology.maps
import order.filter.pi
import data.fin.tuple
/-!
# Constructions of new topological spaces from old ones
This file constructs products, sums, subtypes and quotients of topological spaces
and sets up their basic theory, such as criteria for maps into or out of these
constructions to be continuous; descriptions of the open sets, neighborhood filters,
and generators of these constructions; and their behavior with respect to embeddings
and other specific classes of maps.
## Implementation note
The constructed topologies are defined using induced and coinduced topologies
along with the complete lattice structure on topologies. Their universal properties
(for example, a map `X → Y × Z` is continuous if and only if both projections
`X → Y`, `X → Z` are) follow easily using order-theoretic descriptions of
continuity. With more work we can also extract descriptions of the open sets,
neighborhood filters and so on.
## Tags
product, sum, disjoint union, subspace, quotient space
-/
noncomputable theory
open topological_space set filter
open_locale classical topological_space filter
universes u v
variables {α : Type u} {β : Type v} {γ δ ε ζ : Type*}
section constructions
instance {p : α → Prop} [t : topological_space α] : topological_space (subtype p) :=
induced coe t
instance {r : α → α → Prop} [t : topological_space α] : topological_space (quot r) :=
coinduced (quot.mk r) t
instance {s : setoid α} [t : topological_space α] : topological_space (quotient s) :=
coinduced quotient.mk t
instance [t₁ : topological_space α] [t₂ : topological_space β] : topological_space (α × β) :=
induced prod.fst t₁ ⊓ induced prod.snd t₂
instance [t₁ : topological_space α] [t₂ : topological_space β] : topological_space (α ⊕ β) :=
coinduced sum.inl t₁ ⊔ coinduced sum.inr t₂
instance {β : α → Type v} [t₂ : Πa, topological_space (β a)] : topological_space (sigma β) :=
⨆a, coinduced (sigma.mk a) (t₂ a)
instance Pi.topological_space {β : α → Type v} [t₂ : Πa, topological_space (β a)] :
topological_space (Πa, β a) :=
⨅a, induced (λf, f a) (t₂ a)
instance ulift.topological_space [t : topological_space α] : topological_space (ulift.{v u} α) :=
t.induced ulift.down
lemma quotient.preimage_mem_nhds [topological_space α] [s : setoid α]
{V : set $ quotient s} {a : α} (hs : V ∈ 𝓝 (quotient.mk a)) : quotient.mk ⁻¹' V ∈ 𝓝 a :=
preimage_nhds_coinduced hs
/-- The image of a dense set under `quotient.mk` is a dense set. -/
lemma dense.quotient [setoid α] [topological_space α] {s : set α} (H : dense s) :
dense (quotient.mk '' s) :=
(surjective_quotient_mk α).dense_range.dense_image continuous_coinduced_rng H
/-- The composition of `quotient.mk` and a function with dense range has dense range. -/
lemma dense_range.quotient [setoid α] [topological_space α] {f : β → α} (hf : dense_range f) :
dense_range (quotient.mk ∘ f) :=
(surjective_quotient_mk α).dense_range.comp hf continuous_coinduced_rng
instance {p : α → Prop} [topological_space α] [discrete_topology α] :
discrete_topology (subtype p) :=
⟨bot_unique $ assume s hs,
⟨coe '' s, is_open_discrete _, (set.preimage_image_eq _ subtype.coe_injective)⟩⟩
instance sum.discrete_topology [topological_space α] [topological_space β]
[hα : discrete_topology α] [hβ : discrete_topology β] : discrete_topology (α ⊕ β) :=
⟨by unfold sum.topological_space; simp [hα.eq_bot, hβ.eq_bot]⟩
instance sigma.discrete_topology {β : α → Type v} [Πa, topological_space (β a)]
[h : Πa, discrete_topology (β a)] : discrete_topology (sigma β) :=
⟨by { unfold sigma.topological_space, simp [λ a, (h a).eq_bot] }⟩
section topα
variable [topological_space α]
/-
The 𝓝 filter and the subspace topology.
-/
theorem mem_nhds_subtype (s : set α) (a : {x // x ∈ s}) (t : set {x // x ∈ s}) :
t ∈ 𝓝 a ↔ ∃ u ∈ 𝓝 (a : α), coe ⁻¹' u ⊆ t :=
mem_nhds_induced coe a t
theorem nhds_subtype (s : set α) (a : {x // x ∈ s}) :
𝓝 a = comap coe (𝓝 (a : α)) :=
nhds_induced coe a
end topα
/-- A type synonym equiped with the topology whose open sets are the empty set and the sets with
finite complements. -/
def cofinite_topology (α : Type*) := α
namespace cofinite_topology
/-- The identity equivalence between `α` and `cofinite_topology α`. -/
def of : α ≃ cofinite_topology α := equiv.refl α
instance [inhabited α] : inhabited (cofinite_topology α) :=
{ default := of default }
instance : topological_space (cofinite_topology α) :=
{ is_open := λ s, s.nonempty → set.finite sᶜ,
is_open_univ := by simp,
is_open_inter := λ s t, begin
rintros hs ht ⟨x, hxs, hxt⟩,
rw compl_inter,
exact (hs ⟨x, hxs⟩).union (ht ⟨x, hxt⟩),
end,
is_open_sUnion := begin
rintros s h ⟨x, t, hts, hzt⟩,
rw set.compl_sUnion,
exact set.finite.sInter (mem_image_of_mem _ hts) (h t hts ⟨x, hzt⟩),
end }
lemma is_open_iff {s : set (cofinite_topology α)} :
is_open s ↔ (s.nonempty → (sᶜ).finite) := iff.rfl
lemma is_open_iff' {s : set (cofinite_topology α)} :
is_open s ↔ (s = ∅ ∨ (sᶜ).finite) :=
by simp only [is_open_iff, ← ne_empty_iff_nonempty, or_iff_not_imp_left]
lemma is_closed_iff {s : set (cofinite_topology α)} :
is_closed s ↔ s = univ ∨ s.finite :=
by simp [← is_open_compl_iff, is_open_iff']
lemma nhds_eq (a : cofinite_topology α) : 𝓝 a = pure a ⊔ cofinite :=
begin
ext U,
rw mem_nhds_iff,
split,
{ rintro ⟨V, hVU, V_op, haV⟩,
exact mem_sup.mpr ⟨hVU haV, mem_of_superset (V_op ⟨_, haV⟩) hVU⟩ },
{ rintros ⟨hU : a ∈ U, hU' : (Uᶜ).finite⟩,
exact ⟨U, subset.rfl, λ h, hU', hU⟩ }
end
lemma mem_nhds_iff {a : cofinite_topology α} {s : set (cofinite_topology α)} :
s ∈ 𝓝 a ↔ a ∈ s ∧ sᶜ.finite :=
by simp [nhds_eq]
end cofinite_topology
end constructions
section prod
variables [topological_space α] [topological_space β] [topological_space γ] [topological_space δ]
[topological_space ε] [topological_space ζ]
@[continuity] lemma continuous_fst : continuous (@prod.fst α β) :=
continuous_inf_dom_left continuous_induced_dom
/-- Postcomposing `f` with `prod.fst` is continuous -/
lemma continuous.fst {f : α → β × γ} (hf : continuous f) : continuous (λ a : α, (f a).1) :=
continuous_fst.comp hf
/-- Precomposing `f` with `prod.fst` is continuous -/
lemma continuous.fst' {f : α → γ} (hf : continuous f) : continuous (λ x : α × β, f x.fst) :=
hf.comp continuous_fst
lemma continuous_at_fst {p : α × β} : continuous_at prod.fst p :=
continuous_fst.continuous_at
/-- Postcomposing `f` with `prod.fst` is continuous at `x` -/
lemma continuous_at.fst {f : α → β × γ} {x : α} (hf : continuous_at f x) :
continuous_at (λ a : α, (f a).1) x :=
continuous_at_fst.comp hf
/-- Precomposing `f` with `prod.fst` is continuous at `(x, y)` -/
lemma continuous_at.fst' {f : α → γ} {x : α} {y : β} (hf : continuous_at f x) :
continuous_at (λ x : α × β, f x.fst) (x, y) :=
continuous_at.comp hf continuous_at_fst
/-- Precomposing `f` with `prod.fst` is continuous at `x : α × β` -/
lemma continuous_at.fst'' {f : α → γ} {x : α × β} (hf : continuous_at f x.fst) :
continuous_at (λ x : α × β, f x.fst) x :=
hf.comp continuous_at_fst
@[continuity] lemma continuous_snd : continuous (@prod.snd α β) :=
continuous_inf_dom_right continuous_induced_dom
/-- Postcomposing `f` with `prod.snd` is continuous -/
lemma continuous.snd {f : α → β × γ} (hf : continuous f) : continuous (λ a : α, (f a).2) :=
continuous_snd.comp hf
/-- Precomposing `f` with `prod.snd` is continuous -/
lemma continuous.snd' {f : β → γ} (hf : continuous f) : continuous (λ x : α × β, f x.snd) :=
hf.comp continuous_snd
lemma continuous_at_snd {p : α × β} : continuous_at prod.snd p :=
continuous_snd.continuous_at
/-- Postcomposing `f` with `prod.snd` is continuous at `x` -/
lemma continuous_at.snd {f : α → β × γ} {x : α} (hf : continuous_at f x) :
continuous_at (λ a : α, (f a).2) x :=
continuous_at_snd.comp hf
/-- Precomposing `f` with `prod.snd` is continuous at `(x, y)` -/
lemma continuous_at.snd' {f : β → γ} {x : α} {y : β} (hf : continuous_at f y) :
continuous_at (λ x : α × β, f x.snd) (x, y) :=
continuous_at.comp hf continuous_at_snd
/-- Precomposing `f` with `prod.snd` is continuous at `x : α × β` -/
lemma continuous_at.snd'' {f : β → γ} {x : α × β} (hf : continuous_at f x.snd) :
continuous_at (λ x : α × β, f x.snd) x :=
hf.comp continuous_at_snd
@[continuity] lemma continuous.prod_mk {f : γ → α} {g : γ → β}
(hf : continuous f) (hg : continuous g) : continuous (λx, (f x, g x)) :=
continuous_inf_rng (continuous_induced_rng hf) (continuous_induced_rng hg)
@[continuity] lemma continuous.prod.mk (a : α) : continuous (λ b : β, (a, b)) :=
continuous_const.prod_mk continuous_id'
@[continuity] lemma continuous.prod.mk_left (b : β) : continuous (λ a : α, (a, b)) :=
continuous_id'.prod_mk continuous_const
lemma continuous.comp₂ {g : α × β → γ} (hg : continuous g) {e : δ → α} (he : continuous e)
{f : δ → β} (hf : continuous f) : continuous (λ x, g (e x, f x)) :=
hg.comp $ he.prod_mk hf
lemma continuous.comp₃ {g : α × β × γ → ε} (hg : continuous g)
{e : δ → α} (he : continuous e) {f : δ → β} (hf : continuous f)
{k : δ → γ} (hk : continuous k) : continuous (λ x, g (e x, f x, k x)) :=
hg.comp₂ he $ hf.prod_mk hk
lemma continuous.comp₄ {g : α × β × γ × ζ → ε} (hg : continuous g)
{e : δ → α} (he : continuous e) {f : δ → β} (hf : continuous f)
{k : δ → γ} (hk : continuous k) {l : δ → ζ} (hl : continuous l) :
continuous (λ x, g (e x, f x, k x, l x)) :=
hg.comp₃ he hf $ hk.prod_mk hl
lemma continuous.prod_map {f : γ → α} {g : δ → β} (hf : continuous f) (hg : continuous g) :
continuous (λ x : γ × δ, (f x.1, g x.2)) :=
hf.fst'.prod_mk hg.snd'
/-- A version of `continuous_inf_dom_left` for binary functions -/
lemma continuous_inf_dom_left₂ {α β γ} {f : α → β → γ}
{ta1 ta2 : topological_space α} {tb1 tb2 : topological_space β} {tc1 : topological_space γ}
(h : by haveI := ta1; haveI := tb1; exact continuous (λ p : α × β, f p.1 p.2)) :
by haveI := ta1 ⊓ ta2; haveI := tb1 ⊓ tb2; exact continuous (λ p : α × β, f p.1 p.2) :=
begin
have ha := @continuous_inf_dom_left _ _ id ta1 ta2 ta1 (@continuous_id _ (id _)),
have hb := @continuous_inf_dom_left _ _ id tb1 tb2 tb1 (@continuous_id _ (id _)),
have h_continuous_id := @continuous.prod_map _ _ _ _ ta1 tb1 (ta1 ⊓ ta2) (tb1 ⊓ tb2) _ _ ha hb,
exact @continuous.comp _ _ _ (id _) (id _) _ _ _ h h_continuous_id,
end
/-- A version of `continuous_inf_dom_right` for binary functions -/
lemma continuous_inf_dom_right₂ {α β γ} {f : α → β → γ}
{ta1 ta2 : topological_space α} {tb1 tb2 : topological_space β} {tc1 : topological_space γ}
(h : by haveI := ta2; haveI := tb2; exact continuous (λ p : α × β, f p.1 p.2)) :
by haveI := ta1 ⊓ ta2; haveI := tb1 ⊓ tb2; exact continuous (λ p : α × β, f p.1 p.2) :=
begin
have ha := @continuous_inf_dom_right _ _ id ta1 ta2 ta2 (@continuous_id _ (id _)),
have hb := @continuous_inf_dom_right _ _ id tb1 tb2 tb2 (@continuous_id _ (id _)),
have h_continuous_id := @continuous.prod_map _ _ _ _ ta2 tb2 (ta1 ⊓ ta2) (tb1 ⊓ tb2) _ _ ha hb,
exact @continuous.comp _ _ _ (id _) (id _) _ _ _ h h_continuous_id,
end
/-- A version of `continuous_Inf_dom` for binary functions -/
lemma continuous_Inf_dom₂ {α β γ} {f : α → β → γ}
{tas : set (topological_space α)} {tbs : set (topological_space β)}
{ta : topological_space α} {tb : topological_space β} {tc : topological_space γ}
(ha : ta ∈ tas) (hb : tb ∈ tbs)
(hf : continuous (λ p : α × β, f p.1 p.2)):
by haveI := Inf tas; haveI := Inf tbs; exact @continuous _ _ _ tc (λ p : α × β, f p.1 p.2) :=
begin
let t : topological_space (α × β) := prod.topological_space,
have ha := continuous_Inf_dom ha continuous_id,
have hb := continuous_Inf_dom hb continuous_id,
have h_continuous_id := @continuous.prod_map _ _ _ _ ta tb (Inf tas) (Inf tbs) _ _ ha hb,
exact @continuous.comp _ _ _ (id _) (id _) _ _ _ hf h_continuous_id,
end
lemma filter.eventually.prod_inl_nhds {p : α → Prop} {a : α} (h : ∀ᶠ x in 𝓝 a, p x) (b : β) :
∀ᶠ x in 𝓝 (a, b), p (x : α × β).1 :=
continuous_at_fst h
lemma filter.eventually.prod_inr_nhds {p : β → Prop} {b : β} (h : ∀ᶠ x in 𝓝 b, p x) (a : α) :
∀ᶠ x in 𝓝 (a, b), p (x : α × β).2 :=
continuous_at_snd h
lemma filter.eventually.prod_mk_nhds {pa : α → Prop} {a} (ha : ∀ᶠ x in 𝓝 a, pa x)
{pb : β → Prop} {b} (hb : ∀ᶠ y in 𝓝 b, pb y) :
∀ᶠ p in 𝓝 (a, b), pa (p : α × β).1 ∧ pb p.2 :=
(ha.prod_inl_nhds b).and (hb.prod_inr_nhds a)
lemma continuous_swap : continuous (prod.swap : α × β → β × α) :=
continuous_snd.prod_mk continuous_fst
lemma continuous_uncurry_left {f : α → β → γ} (a : α)
(h : continuous (function.uncurry f)) : continuous (f a) :=
show continuous (function.uncurry f ∘ (λ b, (a, b))), from h.comp (by continuity)
lemma continuous_uncurry_right {f : α → β → γ} (b : β)
(h : continuous (function.uncurry f)) : continuous (λ a, f a b) :=
show continuous (function.uncurry f ∘ (λ a, (a, b))), from h.comp (by continuity)
lemma continuous_curry {g : α × β → γ} (a : α)
(h : continuous g) : continuous (function.curry g a) :=
show continuous (g ∘ (λ b, (a, b))), from h.comp (by continuity)
lemma is_open.prod {s : set α} {t : set β} (hs : is_open s) (ht : is_open t) :
is_open (s ×ˢ t) :=
(hs.preimage continuous_fst).inter (ht.preimage continuous_snd)
lemma nhds_prod_eq {a : α} {b : β} : 𝓝 (a, b) = 𝓝 a ×ᶠ 𝓝 b :=
by rw [filter.prod, prod.topological_space, nhds_inf, nhds_induced, nhds_induced]
/-- If a function `f x y` is such that `y ↦ f x y` is continuous for all `x`, and `x` lives in a
discrete space, then `f` is continuous. -/
lemma continuous_uncurry_of_discrete_topology [discrete_topology α]
{f : α → β → γ} (hf : ∀ a, continuous (f a)) : continuous (function.uncurry f) :=
begin
apply continuous_iff_continuous_at.2,
rintros ⟨a, x⟩,
change map _ _ ≤ _,
rw [nhds_prod_eq, nhds_discrete, filter.map_pure_prod],
exact (hf a).continuous_at
end
lemma mem_nhds_prod_iff {a : α} {b : β} {s : set (α × β)} :
s ∈ 𝓝 (a, b) ↔ ∃ (u ∈ 𝓝 a) (v ∈ 𝓝 b), u ×ˢ v ⊆ s :=
by rw [nhds_prod_eq, mem_prod_iff]
lemma mem_nhds_prod_iff' {a : α} {b : β} {s : set (α × β)} :
s ∈ 𝓝 (a, b) ↔ ∃ (u : set α) (v : set β), is_open u ∧ a ∈ u ∧ is_open v ∧ b ∈ v ∧ u ×ˢ v ⊆ s :=
begin
rw mem_nhds_prod_iff,
split,
{ rintros ⟨u, Hu, v, Hv, h⟩,
rcases mem_nhds_iff.1 Hu with ⟨u', u'u, u'_open, Hu'⟩,
rcases mem_nhds_iff.1 Hv with ⟨v', v'v, v'_open, Hv'⟩,
exact ⟨u', v', u'_open, Hu', v'_open, Hv', (set.prod_mono u'u v'v).trans h⟩ },
{ rintros ⟨u, v, u_open, au, v_open, bv, huv⟩,
exact ⟨u, u_open.mem_nhds au, v, v_open.mem_nhds bv, huv⟩ }
end
lemma _root_.prod.tendsto_iff {α} (seq : α → β × γ) {f : filter α} (x : β × γ) :
tendsto seq f (𝓝 x)
↔ tendsto (λ n, (seq n).fst) f (𝓝 x.fst) ∧ tendsto (λ n, (seq n).snd) f (𝓝 x.snd) :=
by { cases x, rw [nhds_prod_eq, filter.tendsto_prod_iff'], }
lemma filter.has_basis.prod_nhds {ιa ιb : Type*} {pa : ιa → Prop} {pb : ιb → Prop}
{sa : ιa → set α} {sb : ιb → set β} {a : α} {b : β} (ha : (𝓝 a).has_basis pa sa)
(hb : (𝓝 b).has_basis pb sb) :
(𝓝 (a, b)).has_basis (λ i : ιa × ιb, pa i.1 ∧ pb i.2) (λ i, sa i.1 ×ˢ sb i.2) :=
by { rw nhds_prod_eq, exact ha.prod hb }
lemma filter.has_basis.prod_nhds' {ιa ιb : Type*} {pa : ιa → Prop} {pb : ιb → Prop}
{sa : ιa → set α} {sb : ιb → set β} {ab : α × β} (ha : (𝓝 ab.1).has_basis pa sa)
(hb : (𝓝 ab.2).has_basis pb sb) :
(𝓝 ab).has_basis (λ i : ιa × ιb, pa i.1 ∧ pb i.2) (λ i, sa i.1 ×ˢ sb i.2) :=
by { cases ab, exact ha.prod_nhds hb }
instance [discrete_topology α] [discrete_topology β] : discrete_topology (α × β) :=
⟨eq_of_nhds_eq_nhds $ assume ⟨a, b⟩,
by rw [nhds_prod_eq, nhds_discrete α, nhds_discrete β, nhds_bot, filter.prod_pure_pure]⟩
lemma prod_mem_nhds_iff {s : set α} {t : set β} {a : α} {b : β} :
s ×ˢ t ∈ 𝓝 (a, b) ↔ s ∈ 𝓝 a ∧ t ∈ 𝓝 b :=
by rw [nhds_prod_eq, prod_mem_prod_iff]
lemma prod_is_open.mem_nhds {s : set α} {t : set β} {a : α} {b : β}
(ha : s ∈ 𝓝 a) (hb : t ∈ 𝓝 b) : s ×ˢ t ∈ 𝓝 (a, b) :=
prod_mem_nhds_iff.2 ⟨ha, hb⟩
lemma nhds_swap (a : α) (b : β) : 𝓝 (a, b) = (𝓝 (b, a)).map prod.swap :=
by rw [nhds_prod_eq, filter.prod_comm, nhds_prod_eq]; refl
lemma filter.tendsto.prod_mk_nhds {γ} {a : α} {b : β} {f : filter γ} {ma : γ → α} {mb : γ → β}
(ha : tendsto ma f (𝓝 a)) (hb : tendsto mb f (𝓝 b)) :
tendsto (λc, (ma c, mb c)) f (𝓝 (a, b)) :=
by rw [nhds_prod_eq]; exact filter.tendsto.prod_mk ha hb
lemma filter.eventually.curry_nhds {p : α × β → Prop} {x : α} {y : β} (h : ∀ᶠ x in 𝓝 (x, y), p x) :
∀ᶠ x' in 𝓝 x, ∀ᶠ y' in 𝓝 y, p (x', y') :=
by { rw [nhds_prod_eq] at h, exact h.curry }
lemma continuous_at.prod {f : α → β} {g : α → γ} {x : α}
(hf : continuous_at f x) (hg : continuous_at g x) : continuous_at (λx, (f x, g x)) x :=
hf.prod_mk_nhds hg
lemma continuous_at.prod_map {f : α → γ} {g : β → δ} {p : α × β}
(hf : continuous_at f p.fst) (hg : continuous_at g p.snd) :
continuous_at (λ p : α × β, (f p.1, g p.2)) p :=
hf.fst''.prod hg.snd''
lemma continuous_at.prod_map' {f : α → γ} {g : β → δ} {x : α} {y : β}
(hf : continuous_at f x) (hg : continuous_at g y) :
continuous_at (λ p : α × β, (f p.1, g p.2)) (x, y) :=
hf.fst'.prod hg.snd'
lemma prod_generate_from_generate_from_eq {α β : Type*} {s : set (set α)} {t : set (set β)}
(hs : ⋃₀ s = univ) (ht : ⋃₀ t = univ) :
@prod.topological_space α β (generate_from s) (generate_from t) =
generate_from {g | ∃ u ∈ s, ∃ v ∈ t, g = u ×ˢ v} :=
let G := generate_from {g | ∃ u ∈ s, ∃ v ∈ t, g = u ×ˢ v} in
le_antisymm
(le_generate_from $ λ g ⟨u, hu, v, hv, g_eq⟩, g_eq.symm ▸
@is_open.prod _ _ (generate_from s) (generate_from t) _ _
(generate_open.basic _ hu) (generate_open.basic _ hv))
(le_inf
(coinduced_le_iff_le_induced.mp $ le_generate_from $ λ u hu,
have (⋃ v ∈ t, u ×ˢ v) = prod.fst ⁻¹' u,
by simp_rw [← prod_Union, ← sUnion_eq_bUnion, ht, prod_univ],
show G.is_open (prod.fst ⁻¹' u),
by { rw [← this], exactI is_open_Union (λ v, is_open_Union $ λ hv,
generate_open.basic _ ⟨_, hu, _, hv, rfl⟩) })
(coinduced_le_iff_le_induced.mp $ le_generate_from $ λ v hv,
have (⋃ u ∈ s, u ×ˢ v) = prod.snd ⁻¹' v,
by simp_rw [← Union_prod_const, ← sUnion_eq_bUnion, hs, univ_prod],
show G.is_open (prod.snd ⁻¹' v),
by { rw [← this], exactI is_open_Union (λ u, is_open_Union $ λ hu,
generate_open.basic _ ⟨_, hu, _, hv, rfl⟩) }))
lemma prod_eq_generate_from :
prod.topological_space =
generate_from {g | ∃(s:set α) (t:set β), is_open s ∧ is_open t ∧ g = s ×ˢ t} :=
le_antisymm
(le_generate_from $ λ g ⟨s, t, hs, ht, g_eq⟩, g_eq.symm ▸ hs.prod ht)
(le_inf
(ball_image_of_ball $ λt ht, generate_open.basic _ ⟨t, univ, by simpa [set.prod_eq] using ht⟩)
(ball_image_of_ball $ λt ht, generate_open.basic _ ⟨univ, t, by simpa [set.prod_eq] using ht⟩))
lemma is_open_prod_iff {s : set (α × β)} : is_open s ↔
(∀a b, (a, b) ∈ s →
∃ (u : set α) (v : set β), is_open u ∧ is_open v ∧ a ∈ u ∧ b ∈ v ∧ u ×ˢ v ⊆ s) :=
begin
rw [is_open_iff_nhds],
simp_rw [le_principal_iff, prod.forall,
((nhds_basis_opens _).prod_nhds (nhds_basis_opens _)).mem_iff, prod.exists, exists_prop],
simp only [and_assoc, and.left_comm]
end
/-- A product of induced topologies is induced by the product map -/
lemma prod_induced_induced {α γ : Type*} (f : α → β) (g : γ → δ) :
@prod.topological_space α γ (induced f ‹_›) (induced g ‹_›) =
induced (λ p, (f p.1, g p.2)) prod.topological_space :=
by simp_rw [prod.topological_space, induced_inf, induced_compose]
lemma continuous_uncurry_of_discrete_topology_left [discrete_topology α]
{f : α → β → γ} (h : ∀ a, continuous (f a)) : continuous (function.uncurry f) :=
continuous_iff_continuous_at.2 $ λ ⟨a, b⟩,
by simp only [continuous_at, nhds_prod_eq, nhds_discrete α, pure_prod, tendsto_map'_iff, (∘),
function.uncurry, (h a).tendsto]
/-- Given a neighborhood `s` of `(x, x)`, then `(x, x)` has a square open neighborhood
that is a subset of `s`. -/
lemma exists_nhds_square {s : set (α × α)} {x : α} (hx : s ∈ 𝓝 (x, x)) :
∃ U : set α, is_open U ∧ x ∈ U ∧ U ×ˢ U ⊆ s :=
by simpa [nhds_prod_eq, (nhds_basis_opens x).prod_self.mem_iff, and.assoc, and.left_comm] using hx
/-- `prod.fst` maps neighborhood of `x : α × β` within the section `prod.snd ⁻¹' {x.2}`
to `𝓝 x.1`. -/
lemma map_fst_nhds_within (x : α × β) : map prod.fst (𝓝[prod.snd ⁻¹' {x.2}] x) = 𝓝 x.1 :=
begin
refine le_antisymm (continuous_at_fst.mono_left inf_le_left) (λ s hs, _),
rcases x with ⟨x, y⟩,
rw [mem_map, nhds_within, mem_inf_principal, mem_nhds_prod_iff] at hs,
rcases hs with ⟨u, hu, v, hv, H⟩,
simp only [prod_subset_iff, mem_singleton_iff, mem_set_of_eq, mem_preimage] at H,
exact mem_of_superset hu (λ z hz, H _ hz _ (mem_of_mem_nhds hv) rfl)
end
@[simp] lemma map_fst_nhds (x : α × β) : map prod.fst (𝓝 x) = 𝓝 x.1 :=
le_antisymm continuous_at_fst $ (map_fst_nhds_within x).symm.trans_le (map_mono inf_le_left)
/-- The first projection in a product of topological spaces sends open sets to open sets. -/
lemma is_open_map_fst : is_open_map (@prod.fst α β) :=
is_open_map_iff_nhds_le.2 $ λ x, (map_fst_nhds x).ge
/-- `prod.snd` maps neighborhood of `x : α × β` within the section `prod.fst ⁻¹' {x.1}`
to `𝓝 x.2`. -/
lemma map_snd_nhds_within (x : α × β) : map prod.snd (𝓝[prod.fst ⁻¹' {x.1}] x) = 𝓝 x.2 :=
begin
refine le_antisymm (continuous_at_snd.mono_left inf_le_left) (λ s hs, _),
rcases x with ⟨x, y⟩,
rw [mem_map, nhds_within, mem_inf_principal, mem_nhds_prod_iff] at hs,
rcases hs with ⟨u, hu, v, hv, H⟩,
simp only [prod_subset_iff, mem_singleton_iff, mem_set_of_eq, mem_preimage] at H,
exact mem_of_superset hv (λ z hz, H _ (mem_of_mem_nhds hu) _ hz rfl)
end
@[simp] lemma map_snd_nhds (x : α × β) : map prod.snd (𝓝 x) = 𝓝 x.2 :=
le_antisymm continuous_at_snd $ (map_snd_nhds_within x).symm.trans_le (map_mono inf_le_left)
/-- The second projection in a product of topological spaces sends open sets to open sets. -/
lemma is_open_map_snd : is_open_map (@prod.snd α β) :=
is_open_map_iff_nhds_le.2 $ λ x, (map_snd_nhds x).ge
/-- A product set is open in a product space if and only if each factor is open, or one of them is
empty -/
lemma is_open_prod_iff' {s : set α} {t : set β} :
is_open (s ×ˢ t) ↔ (is_open s ∧ is_open t) ∨ (s = ∅) ∨ (t = ∅) :=
begin
cases (s ×ˢ t : set _).eq_empty_or_nonempty with h h,
{ simp [h, prod_eq_empty_iff.1 h] },
{ have st : s.nonempty ∧ t.nonempty, from prod_nonempty_iff.1 h,
split,
{ assume H : is_open (s ×ˢ t),
refine or.inl ⟨_, _⟩,
show is_open s,
{ rw ← fst_image_prod s st.2,
exact is_open_map_fst _ H },
show is_open t,
{ rw ← snd_image_prod st.1 t,
exact is_open_map_snd _ H } },
{ assume H,
simp only [st.1.ne_empty, st.2.ne_empty, not_false_iff, or_false] at H,
exact H.1.prod H.2 } }
end
lemma closure_prod_eq {s : set α} {t : set β} :
closure (s ×ˢ t) = closure s ×ˢ closure t :=
set.ext $ assume ⟨a, b⟩,
have (𝓝 a ×ᶠ 𝓝 b) ⊓ 𝓟 (s ×ˢ t) = (𝓝 a ⊓ 𝓟 s) ×ᶠ (𝓝 b ⊓ 𝓟 t),
by rw [←prod_inf_prod, prod_principal_principal],
by simp [closure_eq_cluster_pts, cluster_pt, nhds_prod_eq, this]; exact prod_ne_bot
lemma interior_prod_eq (s : set α) (t : set β) :
interior (s ×ˢ t) = interior s ×ˢ interior t :=
set.ext $ λ ⟨a, b⟩, by simp only [mem_interior_iff_mem_nhds, mem_prod, prod_mem_nhds_iff]
lemma frontier_prod_eq (s : set α) (t : set β) :
frontier (s ×ˢ t) = closure s ×ˢ frontier t ∪ frontier s ×ˢ closure t :=
by simp only [frontier, closure_prod_eq, interior_prod_eq, prod_diff_prod]
@[simp] lemma frontier_prod_univ_eq (s : set α) :
frontier (s ×ˢ (univ : set β)) = frontier s ×ˢ (univ : set β) :=
by simp [frontier_prod_eq]
@[simp] lemma frontier_univ_prod_eq (s : set β) :
frontier ((univ : set α) ×ˢ s) = (univ : set α) ×ˢ (frontier s) :=
by simp [frontier_prod_eq]
lemma map_mem_closure2 {s : set α} {t : set β} {u : set γ} {f : α → β → γ} {a : α} {b : β}
(hf : continuous (λp:α×β, f p.1 p.2)) (ha : a ∈ closure s) (hb : b ∈ closure t)
(hu : ∀a b, a ∈ s → b ∈ t → f a b ∈ u) :
f a b ∈ closure u :=
have (a, b) ∈ closure (s ×ˢ t), by rw [closure_prod_eq]; from ⟨ha, hb⟩,
show (λp:α×β, f p.1 p.2) (a, b) ∈ closure u, from
map_mem_closure hf this $ assume ⟨a, b⟩ ⟨ha, hb⟩, hu a b ha hb
lemma is_closed.prod {s₁ : set α} {s₂ : set β} (h₁ : is_closed s₁) (h₂ : is_closed s₂) :
is_closed (s₁ ×ˢ s₂) :=
closure_eq_iff_is_closed.mp $ by simp only [h₁.closure_eq, h₂.closure_eq, closure_prod_eq]
/-- The product of two dense sets is a dense set. -/
lemma dense.prod {s : set α} {t : set β} (hs : dense s) (ht : dense t) :
dense (s ×ˢ t) :=
λ x, by { rw closure_prod_eq, exact ⟨hs x.1, ht x.2⟩ }
/-- If `f` and `g` are maps with dense range, then `prod.map f g` has dense range. -/
lemma dense_range.prod_map {ι : Type*} {κ : Type*} {f : ι → β} {g : κ → γ}
(hf : dense_range f) (hg : dense_range g) : dense_range (prod.map f g) :=
by simpa only [dense_range, prod_range_range_eq] using hf.prod hg
lemma inducing.prod_mk {f : α → β} {g : γ → δ} (hf : inducing f) (hg : inducing g) :
inducing (λx:α×γ, (f x.1, g x.2)) :=
⟨by rw [prod.topological_space, prod.topological_space, hf.induced, hg.induced,
induced_compose, induced_compose, induced_inf, induced_compose, induced_compose]⟩
lemma embedding.prod_mk {f : α → β} {g : γ → δ} (hf : embedding f) (hg : embedding g) :
embedding (λx:α×γ, (f x.1, g x.2)) :=
{ inj := assume ⟨x₁, x₂⟩ ⟨y₁, y₂⟩, by simp; exact assume h₁ h₂, ⟨hf.inj h₁, hg.inj h₂⟩,
..hf.to_inducing.prod_mk hg.to_inducing }
protected lemma is_open_map.prod {f : α → β} {g : γ → δ} (hf : is_open_map f) (hg : is_open_map g) :
is_open_map (λ p : α × γ, (f p.1, g p.2)) :=
begin
rw [is_open_map_iff_nhds_le],
rintros ⟨a, b⟩,
rw [nhds_prod_eq, nhds_prod_eq, ← filter.prod_map_map_eq],
exact filter.prod_mono (is_open_map_iff_nhds_le.1 hf a) (is_open_map_iff_nhds_le.1 hg b)
end
protected lemma open_embedding.prod {f : α → β} {g : γ → δ}
(hf : open_embedding f) (hg : open_embedding g) : open_embedding (λ x : α × γ, (f x.1, g x.2)) :=
open_embedding_of_embedding_open (hf.1.prod_mk hg.1)
(hf.is_open_map.prod hg.is_open_map)
lemma embedding_graph {f : α → β} (hf : continuous f) : embedding (λ x, (x, f x)) :=
embedding_of_embedding_compose (continuous_id.prod_mk hf) continuous_fst embedding_id
end prod
section sum
open sum
variables [topological_space α] [topological_space β] [topological_space γ]
@[continuity] lemma continuous_inl : continuous (@inl α β) :=
continuous_sup_rng_left continuous_coinduced_rng
@[continuity] lemma continuous_inr : continuous (@inr α β) :=
continuous_sup_rng_right continuous_coinduced_rng
@[continuity] lemma continuous_sum_rec {f : α → γ} {g : β → γ}
(hf : continuous f) (hg : continuous g) : @continuous (α ⊕ β) γ _ _ (@sum.rec α β (λ_, γ) f g) :=
begin
apply continuous_sup_dom;
rw continuous_def at hf hg ⊢;
assumption
end
lemma is_open_sum_iff {s : set (α ⊕ β)} :
is_open s ↔ is_open (inl ⁻¹' s) ∧ is_open (inr ⁻¹' s) :=
iff.rfl
lemma is_open_map_sum {f : α ⊕ β → γ}
(h₁ : is_open_map (λ a, f (inl a))) (h₂ : is_open_map (λ b, f (inr b))) :
is_open_map f :=
begin
intros u hu,
rw is_open_sum_iff at hu,
cases hu with hu₁ hu₂,
have : u = inl '' (inl ⁻¹' u) ∪ inr '' (inr ⁻¹' u),
{ ext (_|_); simp },
rw [this, set.image_union, set.image_image, set.image_image],
exact is_open.union (h₁ _ hu₁) (h₂ _ hu₂)
end
lemma embedding_inl : embedding (@inl α β) :=
{ induced := begin
unfold sum.topological_space,
apply le_antisymm,
{ rw ← coinduced_le_iff_le_induced, exact le_sup_left },
{ intros u hu, existsi (inl '' u),
change
(is_open (inl ⁻¹' (@inl α β '' u)) ∧
is_open (inr ⁻¹' (@inl α β '' u))) ∧
inl ⁻¹' (inl '' u) = u,
rw [preimage_image_eq u sum.inl_injective, preimage_inr_image_inl],
exact ⟨⟨hu, is_open_empty⟩, rfl⟩ }
end,
inj := λ _ _, inl.inj_iff.mp }
lemma embedding_inr : embedding (@inr α β) :=
{ induced := begin
unfold sum.topological_space,
apply le_antisymm,
{ rw ← coinduced_le_iff_le_induced, exact le_sup_right },
{ intros u hu, existsi (inr '' u),
change
(is_open (inl ⁻¹' (@inr α β '' u)) ∧
is_open (inr ⁻¹' (@inr α β '' u))) ∧
inr ⁻¹' (inr '' u) = u,
rw [preimage_inl_image_inr, preimage_image_eq u sum.inr_injective],
exact ⟨⟨is_open_empty, hu⟩, rfl⟩ }
end,
inj := λ _ _, inr.inj_iff.mp }
lemma is_open_range_inl : is_open (range (inl : α → α ⊕ β)) :=
is_open_sum_iff.2 $ by simp
lemma is_open_range_inr : is_open (range (inr : β → α ⊕ β)) :=
is_open_sum_iff.2 $ by simp
lemma is_closed_range_inl : is_closed (range (inl : α → α ⊕ β)) :=
by { rw [← is_open_compl_iff, compl_range_inl], exact is_open_range_inr }
lemma is_closed_range_inr : is_closed (range (inr : β → α ⊕ β)) :=
by { rw [← is_open_compl_iff, compl_range_inr], exact is_open_range_inl }
lemma open_embedding_inl : open_embedding (inl : α → α ⊕ β) :=
{ open_range := is_open_range_inl,
.. embedding_inl }
lemma open_embedding_inr : open_embedding (inr : β → α ⊕ β) :=
{ open_range := is_open_range_inr,
.. embedding_inr }
lemma closed_embedding_inl : closed_embedding (inl : α → α ⊕ β) :=
{ closed_range := is_closed_range_inl,
.. embedding_inl }
lemma closed_embedding_inr : closed_embedding (inr : β → α ⊕ β) :=
{ closed_range := is_closed_range_inr,
.. embedding_inr }
end sum
section subtype
variables [topological_space α] [topological_space β] [topological_space γ] {p : α → Prop}
lemma inducing_coe {b : set β} : inducing (coe : b → β) := ⟨rfl⟩
lemma inducing.of_cod_restrict {f : α → β} {b : set β} (hb : ∀ a, f a ∈ b)
(h : inducing (b.cod_restrict f hb)) : inducing f := inducing_coe.comp h
lemma embedding_subtype_coe : embedding (coe : subtype p → α) :=
⟨⟨rfl⟩, subtype.coe_injective⟩
lemma closed_embedding_subtype_coe (h : is_closed {a | p a}) :
closed_embedding (coe : subtype p → α) :=
⟨embedding_subtype_coe, by rwa [subtype.range_coe_subtype]⟩
@[continuity] lemma continuous_subtype_val : continuous (@subtype.val α p) :=
continuous_induced_dom
lemma continuous_subtype_coe : continuous (coe : subtype p → α) :=
continuous_subtype_val
lemma continuous.subtype_coe {f : β → subtype p} (hf : continuous f) :
continuous (λ x, (f x : α)) :=
continuous_subtype_coe.comp hf
lemma is_open.open_embedding_subtype_coe {s : set α} (hs : is_open s) :
open_embedding (coe : s → α) :=
{ induced := rfl,
inj := subtype.coe_injective,
open_range := (subtype.range_coe : range coe = s).symm ▸ hs }
lemma is_open.is_open_map_subtype_coe {s : set α} (hs : is_open s) :
is_open_map (coe : s → α) :=
hs.open_embedding_subtype_coe.is_open_map
lemma is_open_map.restrict {f : α → β} (hf : is_open_map f) {s : set α} (hs : is_open s) :
is_open_map (s.restrict f) :=
hf.comp hs.is_open_map_subtype_coe
lemma is_closed.closed_embedding_subtype_coe {s : set α} (hs : is_closed s) :
closed_embedding (coe : {x // x ∈ s} → α) :=
{ induced := rfl,
inj := subtype.coe_injective,
closed_range := (subtype.range_coe : range coe = s).symm ▸ hs }
@[continuity] lemma continuous_subtype_mk {f : β → α}
(hp : ∀x, p (f x)) (h : continuous f) : continuous (λx, (⟨f x, hp x⟩ : subtype p)) :=
continuous_induced_rng h
lemma continuous_inclusion {s t : set α} (h : s ⊆ t) : continuous (inclusion h) :=
continuous_subtype_mk _ continuous_subtype_coe
lemma continuous_at_subtype_coe {p : α → Prop} {a : subtype p} :
continuous_at (coe : subtype p → α) a :=
continuous_iff_continuous_at.mp continuous_subtype_coe _
lemma subtype.dense_iff {s : set α} {t : set s} : dense t ↔ s ⊆ closure (coe '' t) :=
by { rw [inducing_coe.dense_iff, set_coe.forall], refl }
lemma map_nhds_subtype_coe_eq {a : α} (ha : p a) (h : {a | p a} ∈ 𝓝 a) :
map (coe : subtype p → α) (𝓝 ⟨a, ha⟩) = 𝓝 a :=
map_nhds_induced_of_mem $ by simpa only [subtype.coe_mk, subtype.range_coe] using h
lemma nhds_subtype_eq_comap {a : α} {h : p a} :
𝓝 (⟨a, h⟩ : subtype p) = comap coe (𝓝 a) :=
nhds_induced _ _
lemma tendsto_subtype_rng {β : Type*} {p : α → Prop} {b : filter β} {f : β → subtype p} :
∀{a:subtype p}, tendsto f b (𝓝 a) ↔ tendsto (λx, (f x : α)) b (𝓝 (a : α))
| ⟨a, ha⟩ := by rw [nhds_subtype_eq_comap, tendsto_comap_iff, subtype.coe_mk]
lemma continuous_subtype_nhds_cover {ι : Sort*} {f : α → β} {c : ι → α → Prop}
(c_cover : ∀x:α, ∃i, {x | c i x} ∈ 𝓝 x)
(f_cont : ∀i, continuous (λ(x : subtype (c i)), f x)) :
continuous f :=
continuous_iff_continuous_at.mpr $ assume x,
let ⟨i, (c_sets : {x | c i x} ∈ 𝓝 x)⟩ := c_cover x in
let x' : subtype (c i) := ⟨x, mem_of_mem_nhds c_sets⟩ in
calc map f (𝓝 x) = map f (map coe (𝓝 x')) :
congr_arg (map f) (map_nhds_subtype_coe_eq _ $ c_sets).symm
... = map (λx:subtype (c i), f x) (𝓝 x') : rfl
... ≤ 𝓝 (f x) : continuous_iff_continuous_at.mp (f_cont i) x'
lemma continuous_subtype_is_closed_cover {ι : Sort*} {f : α → β} (c : ι → α → Prop)
(h_lf : locally_finite (λi, {x | c i x}))
(h_is_closed : ∀i, is_closed {x | c i x})
(h_cover : ∀x, ∃i, c i x)
(f_cont : ∀i, continuous (λ(x : subtype (c i)), f x)) :
continuous f :=
continuous_iff_is_closed.mpr $
assume s hs,
have ∀i, is_closed ((coe : {x | c i x} → α) '' (f ∘ coe ⁻¹' s)),
from assume i,
(closed_embedding_subtype_coe (h_is_closed _)).is_closed_map _ (hs.preimage (f_cont i)),
have is_closed (⋃i, (coe : {x | c i x} → α) '' (f ∘ coe ⁻¹' s)),
from locally_finite.is_closed_Union
(h_lf.subset $ assume i x ⟨⟨x', hx'⟩, _, heq⟩, heq ▸ hx')
this,
have f ⁻¹' s = (⋃i, (coe : {x | c i x} → α) '' (f ∘ coe ⁻¹' s)),
begin
apply set.ext,
have : ∀ (x : α), f x ∈ s ↔ ∃ (i : ι), c i x ∧ f x ∈ s :=
λ x, ⟨λ hx, let ⟨i, hi⟩ := h_cover x in ⟨i, hi, hx⟩,
λ ⟨i, hi, hx⟩, hx⟩,
simpa [and.comm, @and.left_comm (c _ _), ← exists_and_distrib_right],
end,
by rwa [this]
lemma closure_subtype {x : {a // p a}} {s : set {a // p a}}:
x ∈ closure s ↔ (x : α) ∈ closure ((coe : _ → α) '' s) :=
closure_induced
@[continuity] lemma continuous.cod_restrict {f : α → β} {s : set β} (hf : continuous f)
(hs : ∀ a, f a ∈ s) : continuous (s.cod_restrict f hs) := continuous_subtype_mk hs hf
lemma inducing.cod_restrict {e : α → β} (he : inducing e) {s : set β} (hs : ∀ x, e x ∈ s) :
inducing (cod_restrict e s hs) :=
inducing_of_inducing_compose (he.continuous.cod_restrict hs) continuous_subtype_coe he
lemma embedding.cod_restrict {e : α → β} (he : embedding e) (s : set β) (hs : ∀ x, e x ∈ s) :
embedding (cod_restrict e s hs) :=
embedding_of_embedding_compose (he.continuous.cod_restrict hs) continuous_subtype_coe he
end subtype
section quotient
variables [topological_space α] [topological_space β] [topological_space γ]
variables {r : α → α → Prop} {s : setoid α}
lemma quotient_map_quot_mk : quotient_map (@quot.mk α r) :=
⟨quot.exists_rep, rfl⟩
@[continuity] lemma continuous_quot_mk : continuous (@quot.mk α r) :=
continuous_coinduced_rng
@[continuity] lemma continuous_quot_lift {f : α → β} (hr : ∀ a b, r a b → f a = f b)
(h : continuous f) : continuous (quot.lift f hr : quot r → β) :=
continuous_coinduced_dom h
lemma quotient_map_quotient_mk : quotient_map (@quotient.mk α s) :=
quotient_map_quot_mk
lemma continuous_quotient_mk : continuous (@quotient.mk α s) :=
continuous_coinduced_rng
lemma continuous_quotient_lift {f : α → β} (hs : ∀ a b, a ≈ b → f a = f b)
(h : continuous f) : continuous (quotient.lift f hs : quotient s → β) :=
continuous_coinduced_dom h
lemma continuous_quotient_lift_on' {f : α → β} (hs : ∀ a b, a ≈ b → f a = f b)
(h : continuous f) : continuous (λ x, quotient.lift_on' x f hs : quotient s → β) :=
continuous_coinduced_dom h
end quotient
section pi
variables {ι : Type*} {π : ι → Type*}
@[continuity]
lemma continuous_pi [topological_space α] [∀i, topological_space (π i)] {f : α → Πi:ι, π i}
(h : ∀i, continuous (λa, f a i)) : continuous f :=
continuous_infi_rng $ assume i, continuous_induced_rng $ h i
@[continuity]
lemma continuous_apply [∀i, topological_space (π i)] (i : ι) :
continuous (λp:Πi, π i, p i) :=
continuous_infi_dom continuous_induced_dom
@[continuity]
lemma continuous_apply_apply {κ : Type*} {ρ : κ → ι → Type*}
[∀ j i, topological_space (ρ j i)] (j : κ) (i : ι) :
continuous (λ p : (Π j, Π i, ρ j i), p j i) :=
(continuous_apply i).comp (continuous_apply j)
lemma continuous_at_apply [∀i, topological_space (π i)] (i : ι) (x : Π i, π i) :
continuous_at (λ p : Π i, π i, p i) x :=
(continuous_apply i).continuous_at
lemma filter.tendsto.apply [∀i, topological_space (π i)] {l : filter α} {f : α → Π i, π i}
{x : Π i, π i} (h : tendsto f l (𝓝 x)) (i : ι) :
tendsto (λ a, f a i) l (𝓝 $ x i) :=
(continuous_at_apply i _).tendsto.comp h
lemma continuous_pi_iff [topological_space α] [∀ i, topological_space (π i)] {f : α → Π i, π i} :
continuous f ↔ ∀ i, continuous (λ y, f y i) :=
iff.intro (λ h i, (continuous_apply i).comp h) continuous_pi
lemma nhds_pi [t : ∀i, topological_space (π i)] {a : Πi, π i} :
𝓝 a = pi (λ i, 𝓝 (a i)) :=
calc 𝓝 a = (⨅i, @nhds _ (@topological_space.induced _ _ (λx:Πi, π i, x i) (t i)) a) : nhds_infi
... = (⨅i, comap (λx, x i) (𝓝 (a i))) : by simp [nhds_induced]
lemma tendsto_pi_nhds [t : ∀i, topological_space (π i)] {f : α → Πi, π i} {g : Πi, π i}
{u : filter α} :
tendsto f u (𝓝 g) ↔ ∀ x, tendsto (λ i, f i x) u (𝓝 (g x)) :=
by rw [nhds_pi, filter.tendsto_pi]
lemma continuous_at_pi [∀ i, topological_space (π i)] [topological_space α] {f : α → Π i, π i}
{x : α} :
continuous_at f x ↔ ∀ i, continuous_at (λ y, f y i) x :=
tendsto_pi_nhds
lemma filter.tendsto.update [∀i, topological_space (π i)] [decidable_eq ι]
{l : filter α} {f : α → Π i, π i} {x : Π i, π i} (hf : tendsto f l (𝓝 x)) (i : ι)
{g : α → π i} {xi : π i} (hg : tendsto g l (𝓝 xi)) :
tendsto (λ a, function.update (f a) i (g a)) l (𝓝 $ function.update x i xi) :=
tendsto_pi_nhds.2 $ λ j, by { rcases em (j = i) with rfl|hj; simp [*, hf.apply] }
lemma continuous_at.update [∀i, topological_space (π i)] [topological_space α] [decidable_eq ι]
{f : α → Π i, π i} {a : α} (hf : continuous_at f a) (i : ι) {g : α → π i}
(hg : continuous_at g a) :
continuous_at (λ a, function.update (f a) i (g a)) a :=
hf.update i hg
lemma continuous.update [∀i, topological_space (π i)] [topological_space α] [decidable_eq ι]
{f : α → Π i, π i} (hf : continuous f) (i : ι) {g : α → π i} (hg : continuous g) :
continuous (λ a, function.update (f a) i (g a)) :=
continuous_iff_continuous_at.2 $ λ x, hf.continuous_at.update i hg.continuous_at
/-- `function.update f i x` is continuous in `(f, x)`. -/
@[continuity] lemma continuous_update [∀i, topological_space (π i)] [decidable_eq ι] (i : ι) :
continuous (λ f : (Π j, π j) × π i, function.update f.1 i f.2) :=
continuous_fst.update i continuous_snd
lemma filter.tendsto.fin_insert_nth {n} {π : fin (n + 1) → Type*} [Π i, topological_space (π i)]
(i : fin (n + 1)) {f : α → π i} {l : filter α} {x : π i} (hf : tendsto f l (𝓝 x))
{g : α → Π j : fin n, π (i.succ_above j)} {y : Π j, π (i.succ_above j)} (hg : tendsto g l (𝓝 y)) :
tendsto (λ a, i.insert_nth (f a) (g a)) l (𝓝 $ i.insert_nth x y) :=
tendsto_pi_nhds.2 (λ j, fin.succ_above_cases i (by simpa) (by simpa using tendsto_pi_nhds.1 hg) j)
lemma continuous_at.fin_insert_nth {n} {π : fin (n + 1) → Type*} [Π i, topological_space (π i)]
[topological_space α] (i : fin (n + 1)) {f : α → π i} {a : α} (hf : continuous_at f a)
{g : α → Π j : fin n, π (i.succ_above j)} (hg : continuous_at g a) :
continuous_at (λ a, i.insert_nth (f a) (g a)) a :=
hf.fin_insert_nth i hg
lemma continuous.fin_insert_nth {n} {π : fin (n + 1) → Type*} [Π i, topological_space (π i)]
[topological_space α] (i : fin (n + 1)) {f : α → π i} (hf : continuous f)
{g : α → Π j : fin n, π (i.succ_above j)} (hg : continuous g) :
continuous (λ a, i.insert_nth (f a) (g a)) :=
continuous_iff_continuous_at.2 $ λ a, hf.continuous_at.fin_insert_nth i hg.continuous_at
lemma is_open_set_pi [∀a, topological_space (π a)] {i : set ι} {s : Πa, set (π a)}
(hi : i.finite) (hs : ∀a∈i, is_open (s a)) : is_open (pi i s) :=
by rw [pi_def]; exact (is_open_bInter hi $ assume a ha, (hs _ ha).preimage (continuous_apply _))
lemma is_closed_set_pi [∀a, topological_space (π a)] {i : set ι} {s : Πa, set (π a)}
(hs : ∀a∈i, is_closed (s a)) : is_closed (pi i s) :=
by rw [pi_def];
exact (is_closed_Inter $ λ a, is_closed_Inter $ λ ha, (hs _ ha).preimage (continuous_apply _))
lemma mem_nhds_of_pi_mem_nhds {ι : Type*} {α : ι → Type*} [Π (i : ι), topological_space (α i)]
{I : set ι} {s : Π i, set (α i)} (a : Π i, α i) (hs : I.pi s ∈ 𝓝 a) {i : ι} (hi : i ∈ I) :
s i ∈ 𝓝 (a i) :=
by { rw nhds_pi at hs, exact mem_of_pi_mem_pi hs hi }
lemma set_pi_mem_nhds [Π a, topological_space (π a)] {i : set ι} {s : Π a, set (π a)}
{x : Π a, π a} (hi : i.finite) (hs : ∀ a ∈ i, s a ∈ 𝓝 (x a)) :
pi i s ∈ 𝓝 x :=
by { rw [pi_def, bInter_mem hi], exact λ a ha, (continuous_apply a).continuous_at (hs a ha) }
lemma set_pi_mem_nhds_iff {α : ι → Type*} [Π (i : ι), topological_space (α i)]
{I : set ι} (hI : I.finite) {s : Π i, set (α i)} (a : Π i, α i) :
I.pi s ∈ 𝓝 a ↔ ∀ (i : ι), i ∈ I → s i ∈ 𝓝 (a i) :=
by { rw [nhds_pi, pi_mem_pi_iff hI], apply_instance }
lemma interior_pi_set {α : ι → Type*} [Π i, topological_space (α i)]
{I : set ι} (hI : I.finite) {s : Π i, set (α i)} :
interior (pi I s) = I.pi (λ i, interior (s i)) :=
by { ext a, simp only [set.mem_pi, mem_interior_iff_mem_nhds, set_pi_mem_nhds_iff hI] }
lemma exists_finset_piecewise_mem_of_mem_nhds [decidable_eq ι] [Π i, topological_space (π i)]
{s : set (Π a, π a)} {x : Π a, π a} (hs : s ∈ 𝓝 x) (y : Π a, π a) :
∃ I : finset ι, I.piecewise x y ∈ s :=
begin
simp only [nhds_pi, filter.mem_pi'] at hs,
rcases hs with ⟨I, t, htx, hts⟩,
refine ⟨I, hts $ λ i hi, _⟩,
simpa [finset.mem_coe.1 hi] using mem_of_mem_nhds (htx i)
end
lemma pi_eq_generate_from [∀a, topological_space (π a)] :
Pi.topological_space =
generate_from {g | ∃(s:Πa, set (π a)) (i : finset ι), (∀a∈i, is_open (s a)) ∧ g = pi ↑i s} :=
le_antisymm
(le_generate_from $ assume g ⟨s, i, hi, eq⟩, eq.symm ▸ is_open_set_pi (finset.finite_to_set _) hi)
(le_infi $ assume a s ⟨t, ht, s_eq⟩, generate_open.basic _ $
⟨function.update (λa, univ) a t, {a}, by simpa using ht, s_eq ▸ by ext f; simp [set.pi]⟩)
lemma pi_generate_from_eq {g : Πa, set (set (π a))} :
@Pi.topological_space ι π (λa, generate_from (g a)) =
generate_from {t | ∃(s:Πa, set (π a)) (i : finset ι), (∀a∈i, s a ∈ g a) ∧ t = pi ↑i s} :=
let G := {t | ∃(s:Πa, set (π a)) (i : finset ι), (∀a∈i, s a ∈ g a) ∧ t = pi ↑i s} in
begin
rw [pi_eq_generate_from],
refine le_antisymm (generate_from_mono _) (le_generate_from _),
exact assume s ⟨t, i, ht, eq⟩, ⟨t, i, assume a ha, generate_open.basic _ (ht a ha), eq⟩,
{ rintros s ⟨t, i, hi, rfl⟩,
rw [pi_def],
apply is_open_bInter (finset.finite_to_set _),
assume a ha, show ((generate_from G).coinduced (λf:Πa, π a, f a)).is_open (t a),
refine le_generate_from _ _ (hi a ha),
exact assume s hs, generate_open.basic _ ⟨function.update (λa, univ) a s, {a}, by simp [hs]⟩ }
end
lemma pi_generate_from_eq_fintype {g : Πa, set (set (π a))} [fintype ι] (hg : ∀a, ⋃₀ g a = univ) :
@Pi.topological_space ι π (λa, generate_from (g a)) =
generate_from {t | ∃(s:Πa, set (π a)), (∀a, s a ∈ g a) ∧ t = pi univ s} :=
begin
rw [pi_generate_from_eq],
refine le_antisymm (generate_from_mono _) (le_generate_from _),
exact assume s ⟨t, ht, eq⟩, ⟨t, finset.univ, by simp [ht, eq]⟩,
{ rintros s ⟨t, i, ht, rfl⟩,
apply is_open_iff_forall_mem_open.2 _,
assume f hf,
choose c hc using show ∀a, ∃s, s ∈ g a ∧ f a ∈ s,
{ assume a, have : f a ∈ ⋃₀ g a, { rw [hg], apply mem_univ }, simpa },
refine ⟨pi univ (λa, if a ∈ i then t a else (c : Πa, set (π a)) a), _, _, _⟩,
{ simp [pi_if] },
{ refine generate_open.basic _ ⟨_, assume a, _, rfl⟩,
by_cases a ∈ i; simp [*, set.pi] at * },
{ have : f ∈ pi {a | a ∉ i} c, { simp [*, set.pi] at * },
simpa [pi_if, hf] } }
end
/-- Suppose `π i` is a family of topological spaces indexed by `i : ι`, and `X` is a type
endowed with a family of maps `f i : X → π i` for every `i : ι`, hence inducing a
map `g : X → Π i, π i`. This lemma shows that infimum of the topologies on `X` induced by
the `f i` as `i : ι` varies is simply the topology on `X` induced by `g : X → Π i, π i`
where `Π i, π i` is endowed with the usual product topology. -/
lemma inducing_infi_to_pi {X : Type*} [∀ i, topological_space (π i)] (f : Π i, X → π i) :
@inducing X (Π i, π i) (⨅ i, induced (f i) infer_instance) _ (λ x i, f i x) :=
begin
constructor,
erw induced_infi,
congr' 1,
funext,
erw induced_compose,
end
variables [fintype ι] [∀ i, topological_space (π i)] [∀ i, discrete_topology (π i)]
/-- A finite product of discrete spaces is discrete. -/
instance Pi.discrete_topology : discrete_topology (Π i, π i) :=
singletons_open_iff_discrete.mp (λ x,
begin
rw show {x} = ⋂ i, {y : Π i, π i | y i = x i},
{ ext, simp only [function.funext_iff, set.mem_singleton_iff, set.mem_Inter, set.mem_set_of_eq] },
exact is_open_Inter (λ i, (continuous_apply i).is_open_preimage {x i} (is_open_discrete {x i}))
end)
end pi
section sigma
variables {ι : Type*} {σ : ι → Type*} [Π i, topological_space (σ i)]
@[continuity]
lemma continuous_sigma_mk {i : ι} : continuous (@sigma.mk ι σ i) :=
continuous_supr_rng continuous_coinduced_rng
lemma is_open_sigma_iff {s : set (sigma σ)} : is_open s ↔ ∀ i, is_open (sigma.mk i ⁻¹' s) :=
by simp only [is_open_supr_iff, is_open_coinduced]
lemma is_closed_sigma_iff {s : set (sigma σ)} : is_closed s ↔ ∀ i, is_closed (sigma.mk i ⁻¹' s) :=
by simp only [← is_open_compl_iff, is_open_sigma_iff, preimage_compl]
lemma is_open_map_sigma_mk {i : ι} : is_open_map (@sigma.mk ι σ i) :=
begin
intros s hs,
rw is_open_sigma_iff,
intro j,
rcases eq_or_ne i j with (rfl|hne),
{ rwa set.preimage_image_eq _ sigma_mk_injective },
{ convert is_open_empty,
apply set.eq_empty_of_subset_empty,
rintro x ⟨y, _, hy⟩,
have : i = j, by cc,
contradiction }
end
lemma is_open_range_sigma_mk {i : ι} : is_open (set.range (@sigma.mk ι σ i)) :=
is_open_map_sigma_mk.is_open_range
lemma is_closed_map_sigma_mk {i : ι} : is_closed_map (@sigma.mk ι σ i) :=
begin
intros s hs,
rw is_closed_sigma_iff,
intro j,
rcases eq_or_ne i j with (rfl|hne),
{ rwa set.preimage_image_eq _ sigma_mk_injective },
{ convert is_closed_empty,
apply set.eq_empty_of_subset_empty,
rintro x ⟨y, _, hy⟩,
have : i = j, by cc,
contradiction }
end
lemma is_closed_sigma_mk {i : ι} : is_closed (set.range (@sigma.mk ι σ i)) :=
by { rw ←set.image_univ, exact is_closed_map_sigma_mk _ is_closed_univ }
lemma open_embedding_sigma_mk {i : ι} : open_embedding (@sigma.mk ι σ i) :=
open_embedding_of_continuous_injective_open
continuous_sigma_mk sigma_mk_injective is_open_map_sigma_mk
lemma closed_embedding_sigma_mk {i : ι} : closed_embedding (@sigma.mk ι σ i) :=
closed_embedding_of_continuous_injective_closed
continuous_sigma_mk sigma_mk_injective is_closed_map_sigma_mk
lemma embedding_sigma_mk {i : ι} : embedding (@sigma.mk ι σ i) :=
closed_embedding_sigma_mk.1
lemma is_open_sigma_fst_preimage (s : set ι) : is_open (sigma.fst ⁻¹' s : set (Σ a, σ a)) :=
begin
rw [← bUnion_of_singleton s, preimage_Union₂],
simp only [← range_sigma_mk],
exact is_open_bUnion (λ _ _, is_open_range_sigma_mk)
end
/-- A map out of a sum type is continuous if its restriction to each summand is. -/
@[continuity]
lemma continuous_sigma [topological_space β] {f : sigma σ → β}
(h : ∀ i, continuous (λ a, f ⟨i, a⟩)) : continuous f :=
continuous_supr_dom (λ i, continuous_coinduced_dom (h i))
@[continuity]
lemma continuous_sigma_map {κ : Type*} {τ : κ → Type*} [Π k, topological_space (τ k)]
{f₁ : ι → κ} {f₂ : Π i, σ i → τ (f₁ i)} (hf : ∀ i, continuous (f₂ i)) :
continuous (sigma.map f₁ f₂) :=
continuous_sigma $ λ i,
show continuous (λ a, sigma.mk (f₁ i) (f₂ i a)),
from continuous_sigma_mk.comp (hf i)
lemma is_open_map_sigma [topological_space β] {f : sigma σ → β}
(h : ∀ i, is_open_map (λ a, f ⟨i, a⟩)) : is_open_map f :=
begin
intros s hs,
rw is_open_sigma_iff at hs,
rw [← Union_image_preimage_sigma_mk_eq_self s, image_Union],
apply is_open_Union,
intro i,
rw [image_image],
exact h i _ (hs i)
end
/-- The sum of embeddings is an embedding. -/
lemma embedding_sigma_map {τ : ι → Type*} [Π i, topological_space (τ i)]
{f : Π i, σ i → τ i} (hf : ∀ i, embedding (f i)) : embedding (sigma.map id f) :=
begin
refine ⟨⟨_⟩, function.injective_id.sigma_map (λ i, (hf i).inj)⟩,
refine le_antisymm
(continuous_iff_le_induced.mp (continuous_sigma_map (λ i, (hf i).continuous))) _,
intros s hs,
replace hs := is_open_sigma_iff.mp hs,
have : ∀ i, ∃ t, is_open t ∧ f i ⁻¹' t = sigma.mk i ⁻¹' s,
{ intro i,
apply is_open_induced_iff.mp,
convert hs i,
exact (hf i).induced.symm },
choose t ht using this,
apply is_open_induced_iff.mpr,
refine ⟨⋃ i, sigma.mk i '' t i, is_open_Union (λ i, is_open_map_sigma_mk _ (ht i).1), _⟩,
ext ⟨i, x⟩,
change (sigma.mk i (f i x) ∈ ⋃ (i : ι), sigma.mk i '' t i) ↔ x ∈ sigma.mk i ⁻¹' s,
rw [←(ht i).2, mem_Union],
split,
{ rintro ⟨j, hj⟩,
rw mem_image at hj,
rcases hj with ⟨y, hy₁, hy₂⟩,
rcases sigma.mk.inj_iff.mp hy₂ with ⟨rfl, hy⟩,
replace hy := eq_of_heq hy,
subst y,
exact hy₁ },
{ intro hx,
use i,
rw mem_image,
exact ⟨f i x, hx, rfl⟩ }
end
end sigma
section ulift
@[continuity] lemma continuous_ulift_down [topological_space α] :
continuous (ulift.down : ulift.{v u} α → α) :=
continuous_induced_dom
@[continuity] lemma continuous_ulift_up [topological_space α] :
continuous (ulift.up : α → ulift.{v u} α) :=
continuous_induced_rng continuous_id
end ulift
lemma mem_closure_of_continuous [topological_space α] [topological_space β]
{f : α → β} {a : α} {s : set α} {t : set β}
(hf : continuous f) (ha : a ∈ closure s) (h : maps_to f s (closure t)) :
f a ∈ closure t :=
calc f a ∈ f '' closure s : mem_image_of_mem _ ha
... ⊆ closure (f '' s) : image_closure_subset_closure_image hf
... ⊆ closure t : closure_minimal h.image_subset is_closed_closure
lemma mem_closure_of_continuous2 [topological_space α] [topological_space β] [topological_space γ]
{f : α → β → γ} {a : α} {b : β} {s : set α} {t : set β} {u : set γ}
(hf : continuous (λp:α×β, f p.1 p.2)) (ha : a ∈ closure s) (hb : b ∈ closure t)
(h : ∀a∈s, ∀b∈t, f a b ∈ closure u) :
f a b ∈ closure u :=
have (a,b) ∈ closure (s ×ˢ t),
by simp [closure_prod_eq, ha, hb],
show f (a, b).1 (a, b).2 ∈ closure u,
from @mem_closure_of_continuous (α×β) _ _ _ (λp:α×β, f p.1 p.2) (a,b) _ u hf this $
assume ⟨p₁, p₂⟩ ⟨h₁, h₂⟩, h p₁ h₁ p₂ h₂
|
{"author": "nick-kuhn", "repo": "leantools", "sha": "567a98c031fffe3f270b7b8dea48389bc70d7abb", "save_path": "github-repos/lean/nick-kuhn-leantools", "path": "github-repos/lean/nick-kuhn-leantools/leantools-567a98c031fffe3f270b7b8dea48389bc70d7abb/src/topology/constructions.lean"}
|
import os
import pandas as pd
import numpy as np
import math
import pickle
os.chdir('C:/Users/VADDADISAIRAHUL/Downloads/indian_movies_data_final/')
successful_list = ['All Time Blockbuster','Blockbuster','Hit','Super Hit','Semi Hit','Above Average','Average']
unsuccessful_list = ['Flop','Below Average','Disaster']
def is_null_record(data_dict):
data_list1 = [data_dict['language'],data_dict['budget'],data_dict['box_office_collection_worldwide'],data_dict['runtime'],
data_dict['release_date'],data_dict['verdict'],data_dict['cbfc_rating']]
data_list2 = [data_dict['genre'],data_dict['actors'],data_dict['music'],data_dict['director'],data_dict['writer'],data_dict['producer']]
for item in data_list1:
if item!=item or item in ["",'--','N/A','NA']:
return True
for item in data_list2:
#null check
if item!=item:
return True
else:
item_cleaned = item.strip('][').split(',')
if len(item_cleaned)==1 and item_cleaned[0]=='':
return True
return False
def language_preprocess(ln):
if ln == 'hi':
return 0
elif ln == 'ta':
return 1
elif ln == 'te':
return 2
elif ln == 'ma':
return 3
elif ln == 'kn':
return 4
def genre_preprocess(genre,genre_list):
keys = genre.strip('][').split(',')
list_ = [0]*len(genre_list)
for key in keys:
key_cleaned = key.strip().strip('\'')
list_[genre_list.index(key_cleaned)] = 1
return list_
def release_date_preprocess(date):
release_month = None
release_day = None
if type(date) == str:
date_list = date.split('-')
release_day = int(date_list[0])
release_month = int(date_list[1])
else:
release_month = date.month
release_day = date.day
return [release_day,release_month]
def runtime_preprocess(runtime):
return int(runtime)
def budget_preprocess(budget):
return int(budget)
def collection_preprocess(collection):
return int(collection)
def cbfc_preprocess(rating):
if rating == 'U':
return 0
elif rating == 'UA':
return 1
elif rating == 'A':
return 2
def people_preprocess(peoples_list,popularity_dict):
popularity_list = []
for people_list in peoples_list:
score = 0
keys = people_list.strip('][').split(',')
for key in keys:
key_cleaned = key.strip().strip('\'')
if key_cleaned in popularity_dict:
score+=popularity_dict[key_cleaned]
popularity_list.append(score)
return popularity_list
def verdict_preprocess(verdict):
if verdict in successful_list:
return 1
else:
return 0
def main():
# read excel file
movie_data_files = ['hindi_movies_final.csv','Tamil_movies.csv','telugu_movies_final.csv',
'malayalam_movies_final.csv','kannada_movies_final.csv','other_movies_final.xlsx']
popularity_files = ['Hindi_people.csv','Kannada_people.csv','Malayalam_people.csv','Tamil_people.csv','Telugu_people.csv']
popularity_dict = {}
for popularity_file in popularity_files:
popularity = pd.read_csv(popularity_file)
for row in range(popularity.shape[0]):
popularity_dict[popularity.iloc[row,0]] = popularity.iloc[row,1]
data = []
frames = []
genre_set = set()
for file in movie_data_files:
temp_data = None
if file.endswith('.csv'):
temp_data = pd.read_csv(file)
else:
temp_data = pd.read_excel(file)
temp_data = temp_data.iloc[:,:-2]
#print(temp_data.shape,temp_data.columns)
frames.append(temp_data)
for genre in temp_data["genre"]:
if genre!=genre or genre == "":
continue
res = genre.strip('][').split(',')
if len(res) == 0:
continue
for item in res:
genre_set.add(item.strip().strip('\''))
dataframe = pd.concat(frames,ignore_index=True)
#print(dataframe.columns)
#dataframe.drop('name', inplace=True, axis=1)
movies = []
genre_list = list(genre_set)
genre_list.sort()
for index in range(dataframe.shape[0]):
record = dataframe.iloc[index,:]
if is_null_record(record):
continue
data1 = language_preprocess(record['language'])
data2 = genre_preprocess(record['genre'],genre_list)
data3 = release_date_preprocess(record['release_date'])
data4 = runtime_preprocess(record['runtime'])
data5 = cbfc_preprocess(record['cbfc_rating'])
data6 = budget_preprocess(record['budget'])
data7 = collection_preprocess(record['box_office_collection_worldwide'])
data8 = verdict_preprocess(record['verdict'])
data9 = data7/data6
data10 = people_preprocess([record['actors'],record['music'],record['director'],record['writer'],record['producer']],popularity_dict)
data.append([data1] + data2 + data3 + [data4,data5,data6,data7] + data10 + [data9,data8])
movies.append(record['name'])
dataarray = np.array(data)
columns = ['language'] + genre_list + ['release_day','release_month'] + ['runtime','cbfc_rating','budget','box_office_collection_worldwide',
'actors','music','director','writer','producer'] + ['roi','verdict']
file = open('movie_data_1.pkl','wb')
obj = [columns,movies,dataarray]
pickle.dump(obj,file)
file.close()
if __name__=='__main__':
main()
|
{"hexsha": "2ef1e9673db1cb1f8f196577fbd049041cc2e825", "size": 6012, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/movie_data_preprocessing.py", "max_stars_repo_name": "VaddadiSaiRahul/Early-Movie-Success-Prediction", "max_stars_repo_head_hexsha": "49062a62b9527e4f9ff5aa3b904563d670701d07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-24T14:44:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T19:52:18.000Z", "max_issues_repo_path": "codes/movie_data_preprocessing.py", "max_issues_repo_name": "VaddadiSaiRahul/Early-Movie-Success-Prediction", "max_issues_repo_head_hexsha": "49062a62b9527e4f9ff5aa3b904563d670701d07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/movie_data_preprocessing.py", "max_forks_repo_name": "VaddadiSaiRahul/Early-Movie-Success-Prediction", "max_forks_repo_head_hexsha": "49062a62b9527e4f9ff5aa3b904563d670701d07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6421052632, "max_line_length": 146, "alphanum_fraction": 0.5828343313, "include": true, "reason": "import numpy", "num_tokens": 1352}
|
# -*- coding: utf-8 -*-
"""RHESSI TimeSeries subclass definitions."""
from collections import OrderedDict
import datetime
import matplotlib.dates
import matplotlib.pyplot as plt
from pandas import DataFrame
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.util.metadata import MetaDict
from sunpy.instr import rhessi
import sunpy.io
from astropy import units as u
__all__ = ['RHESSISummaryTimeSeries']
class RHESSISummaryTimeSeries(GenericTimeSeries):
"""
RHESSI X-ray Summary Lightcurve TimeSeries.
The RHESSI mission consists of a single spin-stabilized
spacecraft in a low-altitude orbit inclined 38 degrees to
the Earth's equator. The only instrument on board is a set of 9
Germanium spectrometers with the ability to obtain high
fidelity solar spectra from X rays (down to 3 keV) to gamma rays (1 MeV).
Each spectrometer is coupled to a set of grids with different pitches
which enable fourier-style imaging as the spacecraft spins.
RHESSI provides summary lightcurves in the following passbands
* 3 - 6 keV
* 6 - 12 keV
* 12 - 25 keV
* 25 - 50 keV
* 50 - 100 keV
* 100 - 300 keV
* 300 - 800 keV
* 800 - 7000 keV
* 7000 - 20000 keV
RHESSI was launched on 5 February 2002.
Examples
--------
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> import sunpy.timeseries
>>> rhessi = sunpy.timeseries.TimeSeries(sunpy.data.sample.RHESSI_TIMESERIES) # doctest: +REMOTE_DATA
>>> rhessi.peek() # doctest: +SKIP
References
----------
* RHESSI Homepage `<https://hesperia.gsfc.nasa.gov/rhessi3/index.html>`_
* Mission Paper `<https://doi.org/10.1023/A:1022428818870>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = 'rhessi'
def peek(self, title="RHESSI Observing Summary Count Rate", **kwargs):
"""Plots RHESSI Count Rate light curve. An example is shown below.
.. plot::
import sunpy.data.sample
import sunpy.timeseries
rhessi = sunpy.timeseries.TimeSeries(sunpy.data.sample.RHESSI_TIMESERIES, source='RHESSI')
rhessi.peek()
Parameters
----------
title : `str`
The title of the plot.
**kwargs : `dict`
Any additional plot arguments that should be used
when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
figure = plt.figure()
axes = plt.gca()
lc_linecolors = rhessi.hsi_linecolors()
for lc_color, (item, frame) in zip(lc_linecolors, self.data.items()):
axes.plot_date(self.data.index, frame.values, '-', label=item, lw=2, color=lc_color)
axes.set_yscale("log")
axes.set_xlabel(datetime.datetime.isoformat(self.data.index[0])[0:10])
axes.set_title(title)
axes.set_ylabel('Count Rate s$^{-1}$ detector$^{-1}$')
axes.yaxis.grid(True, 'major')
axes.xaxis.grid(False, 'major')
axes.legend()
# @todo: display better tick labels for date range (e.g. 06/01 - 06/05)
formatter = matplotlib.dates.DateFormatter('%H:%M')
axes.xaxis.set_major_formatter(formatter)
axes.fmt_xdata = matplotlib.dates.DateFormatter('%H:%M')
figure.autofmt_xdate()
figure.show()
@classmethod
def _parse_file(cls, filepath):
"""Parses rhessi FITS data files to create TimeSeries."""
hdus = sunpy.io.read_file(filepath)
return cls._parse_hdus(hdus)
@classmethod
def _parse_hdus(cls, hdulist):
"""Parses a RHESSI FITS HDU list form a FITS file."""
header, d = rhessi.parse_observing_summary_hdulist(hdulist)
# The time of dict d is astropy Time. But dataframe can only take datetime
d['time'] = d['time'].datetime
header = MetaDict(OrderedDict(header))
data = DataFrame(d['data'], columns=d['labels'], index=d['time'])
# Add the units data
units = OrderedDict([('3 - 6 keV', u.ct / u.s / u.Unit('detector')),
('6 - 12 keV', u.ct / u.s / u.Unit('detector')),
('12 - 25 keV', u.ct / u.s / u.Unit('detector')),
('25 - 50 keV', u.ct / u.s / u.Unit('detector')),
('50 - 100 keV', u.ct / u.s / u.Unit('detector')),
('100 - 300 keV', u.ct / u.s / u.Unit('detector')),
('300 - 800 keV', u.ct / u.s / u.Unit('detector')),
('800 - 7000 keV', u.ct / u.s / u.Unit('detector')),
('7000 - 20000 keV', u.ct / u.s / u.Unit('detector'))])
# Todo: check units used. https://hesperia.gsfc.nasa.gov/ssw/hessi/doc/guides/hessi_data_access.htm
return data, header, units
@classmethod
def is_datasource_for(cls, **kwargs):
"""Determines if the file corresponds to a RHESSI X-ray Summary lightcurve"""
# Check if source is explicitly assigned
if 'source' in kwargs.keys():
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
# Check if HDU defines the source instrument
if 'meta' in kwargs.keys():
return kwargs['meta'].get('telescop', '').startswith('HESSI')
|
{"hexsha": "c470e62bf678eaa0eff0d18be7a2c675237403c7", "size": 5464, "ext": "py", "lang": "Python", "max_stars_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_stars_repo_name": "drewleonard42/sunpy", "max_stars_repo_head_hexsha": "79ca90a032213d82d42a3657a693b20b99b22464", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_issues_repo_name": "drewleonard42/sunpy", "max_issues_repo_head_hexsha": "79ca90a032213d82d42a3657a693b20b99b22464", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_forks_repo_name": "drewleonard42/sunpy", "max_forks_repo_head_hexsha": "79ca90a032213d82d42a3657a693b20b99b22464", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6827586207, "max_line_length": 107, "alphanum_fraction": 0.6083455344, "include": true, "reason": "from astropy", "num_tokens": 1399}
|
#!/usr/bin/python3
#-*- coding: UTF-8 -*-
import struct
import os
import sys
import numpy as np
#import matplotlib.pyplot as plt
import PIL.Image
if len(sys.argv) == 3:
print("ubyteFileName:", sys.argv[1])
print("savePath:", sys.argv[2])
print("")
else:
print("USED: ", sys.argv[0], " ubyteFileName savePath")
os._exit(0)
fileName=sys.argv[1]
savePath=sys.argv[2]
if os.path.exists(savePath):
print("savePath is exist:", sys.argv[2])
else:
os.makedirs(savePath)
print("savePath is create:", sys.argv[2])
binfile=open(fileName,'rb')
buf=binfile.read()
index=0
magic,numImages,numRows,numColumns=struct.unpack_from('>IIII',buf,index)
print(magic,numImages,numRows,numColumns)
index+=struct.calcsize('>IIII')
for image in range(0,numImages):
im=struct.unpack_from('>784B',buf,index)
index+=struct.calcsize('>784B')
im=np.array(im,dtype='uint8')
im=im.reshape(28,28)
im=PIL.Image.fromarray(im)
im.save(savePath+'/train_%s.bmp'%image,'bmp')
|
{"hexsha": "e37d054bb189e1f4a5982306ac79e3006e04b2aa", "size": 995, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mnist/cov_image_to_bmp.py", "max_stars_repo_name": "ZhengPengqiao/studyCaffe", "max_stars_repo_head_hexsha": "dda514fdb5903ef53944dd7a355dc8aadcd3a78a", "max_stars_repo_licenses": ["Intel", "BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/mnist/cov_image_to_bmp.py", "max_issues_repo_name": "ZhengPengqiao/studyCaffe", "max_issues_repo_head_hexsha": "dda514fdb5903ef53944dd7a355dc8aadcd3a78a", "max_issues_repo_licenses": ["Intel", "BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/mnist/cov_image_to_bmp.py", "max_forks_repo_name": "ZhengPengqiao/studyCaffe", "max_forks_repo_head_hexsha": "dda514fdb5903ef53944dd7a355dc8aadcd3a78a", "max_forks_repo_licenses": ["Intel", "BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6904761905, "max_line_length": 72, "alphanum_fraction": 0.6874371859, "include": true, "reason": "import numpy", "num_tokens": 284}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 19:52:06 2017
@author: Gowtham
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("HR_comma_sep.csv")
X = dataset.iloc[:,[0,1,2,3,4,5,7,8,9] ].values
y = dataset.iloc[:, 6].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 7] = labelencoder_X.fit_transform(X[:, 7])
X[:, 8] = labelencoder_X.fit_transform(X[:, 8])
onehotencoder = OneHotEncoder(categorical_features = [7, 8])
X = onehotencoder.fit_transform(X).toarray()
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.svm import SVC
clf = SVC(kernel = 'rbf', random_state = 0)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_svm = accuracy_score(y_test, y_pred, normalize = True)
from sklearn.naive_bayes import GaussianNB
clf1 = GaussianNB()
clf1.fit(X_train, y_train)
y_pred_nb = clf.predict(X_test)
accuracy_nb = accuracy_score(y_test, y_pred_nb, normalize = True)
from sklearn.neighbors import KNeighborsClassifier
clf2 = KNeighborsClassifier(n_neighbors = 6)
clf2.fit(X_train, y_train)
y_pred_knn = clf2.predict(X_test)
accuracy_knn = accuracy_score(y_test, y_pred_knn, normalize = True)
print(accuracy_knn)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 11, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred_rf = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
accuracy_rf = accuracy_score(y_test, y_pred_rf, normalize = True)
|
{"hexsha": "b0c7457046127ff43a934ac685ba429f9ba05e35", "size": 1895, "ext": "py", "lang": "Python", "max_stars_repo_path": "hr.py", "max_stars_repo_name": "Gowtham1729/Human-Resources-Prediction", "max_stars_repo_head_hexsha": "5deb28bbae65da88b5a7ad864a0c6992ebd50247", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-24T13:22:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T03:36:06.000Z", "max_issues_repo_path": "hr.py", "max_issues_repo_name": "Gowtham1729/Human-Resources-Prediction", "max_issues_repo_head_hexsha": "5deb28bbae65da88b5a7ad864a0c6992ebd50247", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hr.py", "max_forks_repo_name": "Gowtham1729/Human-Resources-Prediction", "max_forks_repo_head_hexsha": "5deb28bbae65da88b5a7ad864a0c6992ebd50247", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0655737705, "max_line_length": 96, "alphanum_fraction": 0.7488126649, "include": true, "reason": "import numpy", "num_tokens": 490}
|
'''
File name: autoencoder_train_CNN_vs_MLP.py
Author: Lloyd Windrim
Date created: August 2019
Python package: deephyp
Description: An example script for training an MLP (or dense) autoencoder and a convolutional autoencoder on the
Pavia Uni hyperspectral dataset.
'''
import scipy.io
import urllib
import os
import shutil
from utils import reporthook
# import toolbox libraries
import sys
sys.path.insert(0, '..')
from deephyp import autoencoder
from deephyp import data
if __name__ == '__main__':
# download dataset (if already downloaded, comment this out)
#urllib.urlretrieve( 'http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat', os.path.join(os.getcwd(),'PaviaU.mat'), reporthook )
# read data into numpy array
mat = scipy.io.loadmat( 'PaviaU.mat' )
img = mat[ 'paviaU' ]
# create a hyperspectral dataset object from the numpy array
hypData = data.HypImg( img )
# pre-process data to make the model easier to train
hypData.pre_process( 'minmax' )
# create data iterator objects for training and validation using the pre-processed data
trainSamples = 200000
valSamples = 100
dataTrain = data.Iterator( dataSamples=hypData.spectraPrep[:trainSamples, :],
targets=hypData.spectraPrep[:trainSamples, :], batchSize=1000 )
dataVal = data.Iterator( dataSamples=hypData.spectraPrep[trainSamples:trainSamples+valSamples, :],
targets=hypData.spectraPrep[trainSamples:trainSamples+valSamples, :] )
# shuffle training data
dataTrain.shuffle()
# setup a fully-connected autoencoder neural network with 3 encoder layers
net_mlp = autoencoder.mlp_1D_network( inputSize=hypData.numBands, encoderSize=[50,30,10,3], activationFunc='relu',
weightInitOpt='truncated_normal', tiedWeights=None, skipConnect=False )
# setup a convolutional autoencoder neural network with 3 conv encoder layers
net_cnn = autoencoder.cnn_1D_network( inputSize=hypData.numBands, zDim=3, encoderNumFilters=[10,10,10] ,
encoderFilterSize=[20,10,10], activationFunc='relu', weightInitOpt='truncated_normal',
encoderStride=[1, 1, 1], tiedWeights=None, skipConnect=False )
# setup a training operation for each network (using the same loss function)
net_mlp.add_train_op(name='csa', lossFunc='CSA', learning_rate=1e-3, decay_steps=None, decay_rate=None,
method='Adam', wd_lambda=0.0)
net_cnn.add_train_op( name='csa', lossFunc='CSA', learning_rate=1e-3, decay_steps=None, decay_rate=None,
method='Adam', wd_lambda=0.0 )
# create directories to save the learnt models
model_dirs = []
for method in ['mlp','cnn']:
model_dir = os.path.join('models','test_ae_comparison_%s'%(method))
if os.path.exists(model_dir):
# if directory already exists, delete it
shutil.rmtree(model_dir)
os.mkdir(model_dir)
model_dirs.append( model_dir )
# train the mlp model (100 epochs)
dataTrain.reset_batch()
net_mlp.train(dataTrain=dataTrain, dataVal=dataVal, train_op_name='csa', n_epochs=100, save_addr=model_dirs[0],
visualiseRateTrain=10, visualiseRateVal=10, save_epochs=[100])
# train the cnn model (takes longer, so only 10 epochs)
dataTrain.reset_batch()
net_cnn.train(dataTrain=dataTrain, dataVal=dataVal, train_op_name='csa', n_epochs=10, save_addr=model_dirs[1],
visualiseRateTrain=1, visualiseRateVal=10, save_epochs=[10])
|
{"hexsha": "b9712bb38a16c7f012f4674bf9ef284bfca55306", "size": 3647, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/autoencoder_train_CNN_vs_MLP.py", "max_stars_repo_name": "forkbabu/hyperspectral-autoencoders", "max_stars_repo_head_hexsha": "0b2cb987ca9a3aa8a27a5fe0241ca6f76c56a8ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-12T16:27:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-12T16:27:27.000Z", "max_issues_repo_path": "examples/autoencoder_train_CNN_vs_MLP.py", "max_issues_repo_name": "forkbabu/hyperspectral-autoencoders", "max_issues_repo_head_hexsha": "0b2cb987ca9a3aa8a27a5fe0241ca6f76c56a8ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/autoencoder_train_CNN_vs_MLP.py", "max_forks_repo_name": "forkbabu/hyperspectral-autoencoders", "max_forks_repo_head_hexsha": "0b2cb987ca9a3aa8a27a5fe0241ca6f76c56a8ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-07T13:53:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-07T13:53:42.000Z", "avg_line_length": 39.2150537634, "max_line_length": 132, "alphanum_fraction": 0.6849465314, "include": true, "reason": "import scipy", "num_tokens": 917}
|
#!/usr/bin/env python3
"""
Calculates the correlation for N pair, up to N_max, using the parity criteria.
2016.11.22 Alessandro Cere
"""
import glob
import numpy as np
import subprocess
from math import pi
from uncertainties import unumpy
sink_file = 'par_chsh.dat'
sink_file_err = 'par_chsh_err.dat'
N_max = 20
angles = [x / 100. for x in range(0, 15, 2)] + \
[.15] + [x / 100. for x in range(16, 25, 2)]
angles = np.array([angle / pi * 180 for angle in angles])
def pp_corr(filename, N=3):
cmd = './parity {} '.format(N) + filename
a = subprocess.check_output(cmd, shell=True)
return float(a.strip())
def bell_value(a0b0, a0b1, a1b0, a1b1, corr, N=1):
"""
returns bell-operator estimated value
for a given correlation function
"""
return np.sum([corr(k, N) * s
for k, s
in zip([a0b0, a0b1, a1b0, a1b1],
[1, -1, 1, 1])])
def bell_value_err(a0b0_file, a0b1_file, a1b0_file, a1b1_file, corr, N=1,
trials=1):
"""
returns bell-operator estimated value
for a given correlation function
including an estimate of the errors using the bootstrapping technique
"""
files = [a0b0_file, a0b1_file, a1b0_file, a1b1_file]
# To prevent unnecessary shuffling of single bit cases
if N == 1:
trials = 1
correlations = [[corr(k, N) for _ in range(trials)] for k in files]
# Calculates the correlation and the associated error from
# the obtained distribution
corr_u = unumpy.uarray(np.mean(correlations, 1), np.std(correlations, 1))
return np.sum([k * s
for k, s
in zip(corr_u,
[1, -1, 1, 1])])
if __name__ == '__main__':
# d = glob.glob('meas*')
d = glob.glob('measurements/meas*.dat')
# first raw of both output files contains the angle
with open(sink_file, 'w') as sink, open(sink_file_err, 'w') as sink_err:
sink.write(('angle' + '\t{:.4f}' * len(angles) + '\n').format(*angles))
sink_err.write(('angle' + '\t{:.4f}' * len(angles) +
'\n').format(*angles))
# Main loop over chunk size N
for N in range(1, N_max):
bells = [bell_value_err(*d[k * 4: k * 4 + 4], pp_corr, N, 1000)
for k
in range(len(angles))]
# Write of the result for each N as separate rows
with open(sink_file, 'a') as sink, \
open(sink_file_err, 'a') as sink_err:
sink.write(
('{}' + '\t{:.5f}' * len(angles) +
'\n').format(N, *unumpy.nominal_values(bells)))
sink_err.write(
('{}' + '\t{:.5f}' * len(angles) +
'\n').format(N, *unumpy.std_devs(bells)))
# for visual feedback
print(N)
|
{"hexsha": "ec1291f5d241f4c3d0faca6da2449400c159aa5a", "size": 2841, "ext": "py", "lang": "Python", "max_stars_repo_path": "parity.py", "max_stars_repo_name": "acere/Bell-manypairs", "max_stars_repo_head_hexsha": "8708c6914003fe284ba77af4359ad6fe83a0564f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-31T04:53:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-31T04:53:05.000Z", "max_issues_repo_path": "parity.py", "max_issues_repo_name": "acere/Bell-manypairs", "max_issues_repo_head_hexsha": "8708c6914003fe284ba77af4359ad6fe83a0564f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parity.py", "max_forks_repo_name": "acere/Bell-manypairs", "max_forks_repo_head_hexsha": "8708c6914003fe284ba77af4359ad6fe83a0564f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8804347826, "max_line_length": 79, "alphanum_fraction": 0.5656458993, "include": true, "reason": "import numpy", "num_tokens": 830}
|
(* SPDX-License-Identifier: GPL-2.0 *)
Require Import Coqlib.
Require Import AST.
Require Import Integers.
Require Import Values.
Require Import Cop.
Require Import Clight.
Require Import CDataTypes.
Require Import Ctypes.
Require Import Ident.
Local Open Scope Z_scope.
Definition _Rd : ident := 999%positive.
Definition _addr := 1%positive.
Definition _alloc := 2%positive.
Definition _arg := 3%positive.
Definition _base := 4%positive.
Definition _cb_offset := 5%positive.
Definition _cbndx := 6%positive.
Definition _cnt := 7%positive.
Definition _count := 8%positive.
Definition _cur_ticket := 9%positive.
Definition _cur_vcpuid := 10%positive.
Definition _cur_vmid := 11%positive.
Definition _data := 12%positive.
Definition _end := 13%positive.
Definition _esr := 14%positive.
Definition _fault_ipa := 15%positive.
Definition _get_now := 16%positive.
Definition _gfn := 17%positive.
Definition _gpa := 18%positive.
Definition _hsr := 19%positive.
Definition _i := 20%positive.
Definition _inbuf := 21%positive.
Definition _inc_exe := 22%positive.
Definition _incr_now := 23%positive.
Definition _incr_ticket := 24%positive.
Definition _index := 25%positive.
Definition _inowner := 26%positive.
Definition _inpfn := 27%positive.
Definition _iova := 28%positive.
Definition _is_write := 29%positive.
Definition _kvm := 30%positive.
Definition _len := 31%positive.
Definition _level := 32%positive.
Definition _lk := 33%positive.
Definition _load_addr := 34%positive.
Definition _load_idx := 35%positive.
Definition _load_info_cnt := 36%positive.
Definition _log_hold := 37%positive.
Definition _log_incr := 38%positive.
Definition _main := 39%positive.
Definition _map := 40%positive.
Definition _mapped := 41%positive.
Definition _my_ticket := 42%positive.
Definition _n := 43%positive.
Definition _next := 44%positive.
Definition _num := 45%positive.
Definition _num_context_banks := 46%positive.
Definition _offset := 47%positive.
Definition _outbuf := 48%positive.
Definition _outowner := 49%positive.
Definition _outpfn := 50%positive.
Definition _owner := 51%positive.
Definition _p_index := 52%positive.
Definition _pa := 53%positive.
Definition _paddr := 54%positive.
Definition _page_count := 55%positive.
Definition _pass_hlock := 56%positive.
Definition _pass_lock := 57%positive.
Definition _pass_qlock := 58%positive.
Definition _perm := 59%positive.
Definition _pfn := 60%positive.
Definition _pgd := 61%positive.
Definition _pgd_idx := 62%positive.
Definition _pgd_pa := 63%positive.
Definition _pgnum := 64%positive.
Definition _pmd := 65%positive.
Definition _pmd_idx := 66%positive.
Definition _pmd_pa := 67%positive.
Definition _power := 68%positive.
Definition _prot := 69%positive.
Definition _pte := 70%positive.
Definition _pte_idx := 71%positive.
Definition _pte_pa := 72%positive.
Definition _pud := 73%positive.
Definition _pud_idx := 74%positive.
Definition _pud_pa := 75%positive.
Definition _r_index := 76%positive.
Definition _reg := 77%positive.
Definition _remap := 78%positive.
Definition _remap_addr := 79%positive.
Definition _res := 80%positive.
Definition _ret := 81%positive.
Definition _rt := 82%positive.
Definition _size := 83%positive.
Definition _smmu_enable := 84%positive.
Definition _smmu_index := 85%positive.
Definition _start := 86%positive.
Definition _state := 87%positive.
Definition _t_vmid := 88%positive.
Definition _target := 89%positive.
Definition _target_addr := 90%positive.
Definition _target_vmid := 91%positive.
Definition _total_smmu := 92%positive.
Definition _ttbr := 93%positive.
Definition _ttbr_pa := 94%positive.
Definition _type := 95%positive.
Definition _val := 96%positive.
Definition _valid := 97%positive.
Definition _vcpu := 98%positive.
Definition _vcpu_state := 99%positive.
Definition _vcpuid := 100%positive.
Definition _vm_state := 101%positive.
Definition _vmid := 102%positive.
Definition _vttbr := 103%positive.
Definition _vttbr_pa := 104%positive.
Definition _wait_hlock := 105%positive.
Definition _wait_lock := 106%positive.
Definition _wait_qlock := 107%positive.
Definition _write_val := 108%positive.
Definition _t'1 := 109%positive.
Definition _t'2 := 110%positive.
Definition _t'3 := 111%positive.
Definition get_s2_page_index_body :=
(Ssequence
(Ssequence
(Scall (Some _t'1)
(Evar mem_region_search (Tfunction (Tcons tulong Tnil) tuint
cc_default))
((Etempvar _addr tulong) :: nil))
(Sset _r_index (Etempvar _t'1 tuint)))
(Ssequence
(Sset _ret (Econst_long (Int64.repr (-1)) tulong))
(Ssequence
(Sifthenelse (Ebinop One (Etempvar _r_index tuint)
(Econst_int (Int.repr (-1)) tuint) tint)
(Ssequence
(Ssequence
(Scall (Some _t'2)
(Evar get_mem_region_index (Tfunction (Tcons tuint Tnil)
tulong cc_default))
((Etempvar _r_index tuint) :: nil))
(Sset _p_index (Etempvar _t'2 tulong)))
(Sifthenelse (Ebinop One (Etempvar _p_index tulong)
(Econst_long (Int64.repr (-1)) tulong) tint)
(Ssequence
(Ssequence
(Scall (Some _t'3)
(Evar get_mem_region_base (Tfunction (Tcons tuint Tnil)
tulong cc_default))
((Etempvar _r_index tuint) :: nil))
(Sset _base (Etempvar _t'3 tulong)))
(Sset _ret
(Ebinop Oadd (Etempvar _p_index tulong)
(Ebinop Odiv
(Ebinop Osub (Etempvar _addr tulong)
(Etempvar _base tulong) tulong)
(Econst_long (Int64.repr 4096) tulong) tulong) tulong)))
Sskip))
Sskip)
(Sreturn (Some (Etempvar _ret tulong)))))).
Definition f_get_s2_page_index := {|
fn_return := tulong;
fn_callconv := cc_default;
fn_params := ((_addr, tulong) :: nil);
fn_vars := nil;
fn_temps := ((_ret, tulong) :: (_p_index, tulong) :: (_base, tulong) ::
(_r_index, tuint) :: (_t'3, tulong) :: (_t'2, tulong) ::
(_t'1, tuint) :: nil);
fn_body := get_s2_page_index_body
|}.
|
{"author": "VeriGu", "repo": "VRM-proof", "sha": "9e3c9751f31713a133a0a7e98f3d4c9600ca7bde", "save_path": "github-repos/coq/VeriGu-VRM-proof", "path": "github-repos/coq/VeriGu-VRM-proof/VRM-proof-9e3c9751f31713a133a0a7e98f3d4c9600ca7bde/sekvm/PageIndex/Code.v"}
|
import seaborn as sns
import sys
import csv
from statistics import stdev
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
pd.set_option('display.max_rows', None)
files = [
{'file': 'b000', 'bonus': '0.00'},
{'file': 'b001', 'bonus': '0.01'},
{'file': 'b002', 'bonus': '0.02'},
{'file': 'b003', 'bonus': '0.03'},
{'file': 'b004', 'bonus': '0.04'},
{'file': 'b005', 'bonus': '0.05'},
{'file': 'b006', 'bonus': '0.06'},
{'file': 'b007', 'bonus': '0.07'},
{'file': 'b008', 'bonus': '0.08'},
{'file': 'b009', 'bonus': '0.09'},
{'file': 'b010', 'bonus': '0.10'},
{'file': 'b011', 'bonus': '0.11'},
{'file': 'b012', 'bonus': '0.12'},
{'file': 'b013', 'bonus': '0.13'},
{'file': 'b014', 'bonus': '0.14'},
{'file': 'b015', 'bonus': '0.15'},
]
all = pd.DataFrame([], columns=['qta', 'qtb', 'type'])
for f in files:
with open(f['file']+'/'+f['file']+'.txt') as csv_file_r:
#with open(f['file']+'.txt') as csv_file_r:
csv_reader = csv.reader(csv_file_r, delimiter=';')
qtas = []
qtbs = []
results = []
for row in csv_reader:
if(row[0]!='partial'):
qta = int(row[0])
qtb = int(row[1])
result = 'Undefined'
if qta == 500:
result = 'A fixation'
elif qta == 0:
result = 'B fixation'
qtas.append(qta)
qtbs.append(qtb)
results.append(result)
e00 = pd.DataFrame(list(zip(qtas,qtbs,results)), columns=['qta', 'qtb', 'type'])
e00["bonus"]=f['bonus']
all = pd.concat([all, e00])
#all = pd.concat([e00,e001,e002,e003,e004,e005,e006,e007,e008,e009,e010,e011,e012,e013,e014,e015])
#all[""] = np.select(
# [all["numa"]==500, all["numb"]==500],
# ["A", "B"],
# default="Undef.")
#ex = all.loc[all["type"]=="Undef."]
#print(ex)
resumo = all.groupby(["bonus", "type"])["qta"].count().unstack(fill_value=0).stack().reset_index(name="sum")
resumo2 = resumo
#for x in resumo["bonus"].unique():
# u_x = all.loc[(all["type"]=="Undef.") & (all["bonus"]==x)]
# u_x_a = u_x["qta"].sum()
# u_x_b = u_x["qtb"].sum()
# col_a = {"bonus":x, "type":"Undef. A", "sum":u_x_a/500}
# col_b = {"bonus":x, "type":"Undef. B", "sum":u_x_b/500}
# resumo = resumo.append(col_a, ignore_index=True)
# resumo = resumo.append(col_b, ignore_index=True)
print(resumo)
fig_dims = (6, 4)
fig, ax = plt.subplots(figsize=fig_dims)
#plt.fill_between(resumo[~resumo.bonus.duplicated()].bonus, 0,
# resumo.loc[resumo["type"]=="Undef. A", "sum"], facecolor="lawngreen", alpha=0.5, label="A nodes in Undef.")
#plt.fill_between(resumo[~resumo.bonus.duplicated()].bonus, resumo.loc[resumo["type"]=="Undef. A", "sum"],
# resumo.loc[resumo["type"]=="Undef.", "sum"], facecolor="forestgreen", alpha=0.5, label="B nodes in Undef.")
fig = sns.lineplot(data=resumo2, x="bonus", y="sum", hue="type")
ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=5000000))
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.tight_layout()
ax.set(xlabel="α", ylabel="Fixation %" )
plt.setp(ax.get_xticklabels(), rotation=90, horizontalalignment='center')
#plt.show()
plt.savefig("lineplot-ex1.svg")
plt.savefig("lineplot-ex1.png", dpi=200)
|
{"hexsha": "247ee1fe132049b1764410cca95a901c0e3cfade", "size": 3431, "ext": "py", "lang": "Python", "max_stars_repo_path": "article/figure-2a-2c/plot-05-25-1.py", "max_stars_repo_name": "guilherme-araujo/gsop-dist", "max_stars_repo_head_hexsha": "15da82ffa8add74cc61b95d3544ec3aaa0e71a32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "article/figure-2a-2c/plot-05-25-1.py", "max_issues_repo_name": "guilherme-araujo/gsop-dist", "max_issues_repo_head_hexsha": "15da82ffa8add74cc61b95d3544ec3aaa0e71a32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "article/figure-2a-2c/plot-05-25-1.py", "max_forks_repo_name": "guilherme-araujo/gsop-dist", "max_forks_repo_head_hexsha": "15da82ffa8add74cc61b95d3544ec3aaa0e71a32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3711340206, "max_line_length": 125, "alphanum_fraction": 0.5572719324, "include": true, "reason": "import numpy", "num_tokens": 1087}
|
/*
* AddingNoise.cpp
*
* Created on: May 22, 2015
* Author: dbazazian
*/
// #define STANDARD_DEVIATION_NEIGHBORS
#define GAUSSIAN_NOISE
#ifdef STANDARD_DEVIATION_NEIGHBORS
#include <iostream>
#include <stdio.h> /* printf, NULL */
#include <stdlib.h> /* srand, rand */
#include "time.h"
#include <boost/thread/thread.hpp>
#include <pcl/common/common_headers.h>
#include <pcl/features/normal_3d.h>
#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/console/parse.h>
#include <pcl/io/ply_io.h>
int
main (int argc, char*argv[])
{
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr Noisycloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
// pcl::io::loadPCDFile ("path/to/OnePlane.pcd", *cloud);
// pcl::io::loadPCDFile ("path/to/CubeSharpEdge.pcd", *cloud);
pcl::io::loadPCDFile ("path/to/CubeFractal2.pcd", *cloud);
std::cout << "Number of points in the input cloud is:"<< cloud->points.size() << std::endl;
Noisycloud->resize(cloud ->points.size () );
// creat kdtree
pcl::KdTreeFLANN<pcl::PointXYZRGBA> kdtree;
kdtree.setInputCloud (cloud);
pcl::PointXYZRGBA searchPoint;
// K nearest neighbor search
int NumbersNeighbor = 12; // numbers of neighbors 7
std::vector<int> NeighborsKNSearch(NumbersNeighbor);
std::vector<float> NeighborsKNSquaredDistance(NumbersNeighbor);
double* StndDevX = new double [cloud->points.size() ];
double* StndDevY = new double [cloud->points.size() ];
double* StndDevZ = new double [cloud->points.size() ];
//All the Points of the cloud
for (size_t i = 0; i < cloud ->points.size (); ++i) {
searchPoint.x = cloud->points[i].x;
searchPoint.y = cloud->points[i].y;
searchPoint.z = cloud->points[i].z;
if ( kdtree.nearestKSearch (searchPoint, NumbersNeighbor, NeighborsKNSearch, NeighborsKNSquaredDistance) > 0 ) {
NumbersNeighbor = NeighborsKNSearch.size (); }
else { NumbersNeighbor = 0; }
// computing VAriance
double sumX = 0.00 ; double sumY = 0.00 ; double sumZ = 0.00 ;
for (size_t ii = 0; ii < NeighborsKNSearch.size (); ++ii) {
sumX += (cloud->points[ NeighborsKNSearch[ii] ].x);
sumY += (cloud->points[ NeighborsKNSearch[ii] ].y);
sumZ += (cloud->points[ NeighborsKNSearch[ii] ].z);
} // For each neighbor of the query point
double AvgX = sumX / NeighborsKNSearch.size () ; double AvgY = sumY / NeighborsKNSearch.size () ; double AvgZ= sumZ / NeighborsKNSearch.size () ;
sumX = 0.00 ; sumY = 0.00 ; sumZ = 0.00 ;
for (size_t ii = 0; ii < NeighborsKNSearch.size (); ++ii) {
sumX += ( ((cloud->points[ NeighborsKNSearch[ii] ].x) - AvgX) * ((cloud->points[ NeighborsKNSearch[ii] ].x) - AvgX) ) ;
sumY += ( ((cloud->points[ NeighborsKNSearch[ii] ].y) - AvgY) * ((cloud->points[ NeighborsKNSearch[ii] ].y) - AvgY) ) ;
sumZ += ( ((cloud->points[ NeighborsKNSearch[ii] ].z) - AvgZ) * ((cloud->points[ NeighborsKNSearch[ii] ].z) - AvgZ) ) ;
} // For each neighbor of the query point
StndDevX [i] = sqrt(sumX / ( NeighborsKNSearch.size () - 1)) ;
StndDevY [i] = sqrt(sumY / ( NeighborsKNSearch.size () - 1)) ;
StndDevZ [i] = sqrt(sumZ / ( NeighborsKNSearch.size () - 1)) ;
} // For each Point of the Cloud
// First copy all the points of main cloud to the noisy cloud
for (size_t i = 0; i < Noisycloud ->points.size (); ++i ) {
// Noisy point cloud
Noisycloud->points[i].x = cloud->points[i].x;
Noisycloud->points[i].y = cloud->points[i].y;
Noisycloud->points[i].z = cloud->points[i].z;
Noisycloud->points[i].r = 255;
Noisycloud->points[i].g = 255;
Noisycloud->points[i].b = 255;
}
// Then add noise to each 10 point of the cloud
for (size_t i = 0; i < Noisycloud ->points.size (); i+=10 ) {
// Noisy point cloud
Noisycloud->points[i].x = cloud->points[i].x + (7* StndDevX[i]);
Noisycloud->points[i].y = cloud->points[i].y + (7* StndDevY[i]);
Noisycloud->points[i].z = cloud->points[i].z + (7* StndDevZ[i]);
Noisycloud->points[i].r = 255;
Noisycloud->points[i].g = 255;
Noisycloud->points[i].b = 255;
}
std::cout << "Number of points in the Noisy cloud is:"<< Noisycloud->points.size() << std::endl;
// write red point cloud to disk
pcl::io::savePCDFile ("path/to//AddingNoise/Frac2withNoise710.pcd", *Noisycloud);
pcl::PLYWriter writePLY;
writePLY.write ("path/to//AddingNoise/Frac2withNoise710.ply", *Noisycloud, false);
// Show the cloud
//pcl::visualization::CloudViewer viewer(" ONePlaneWithNoise ");
//viewer.showCloud(Noisycloud);
//while (!viewer.wasStopped ())
//{}
return (0);
}
#endif
#ifdef GAUSSIAN_NOISE
#include <iostream>
#include <stdio.h> /* printf, NULL */
#include <stdlib.h> /* srand, rand */
#include <math.h>
#include "time.h"
#include <boost/thread/thread.hpp>
#include <pcl/common/common_headers.h>
#include <pcl/features/normal_3d.h>
#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/console/parse.h>
#include <pcl/io/ply_io.h>
#define PI 3.14159265
int
main (int argc, char*argv[])
{
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr Noisycloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
// pcl::io::loadPCDFile ("path/to/OnePlane.pcd", *cloud);
// pcl::io::loadPCDFile ("path/to/CubeSharpEdge.pcd", *cloud);
// pcl::io::loadPCDFile ("path/to/CubeFractal2.pcd", *cloud);
// pcl::io::loadPCDFile ("path/to/TwoPlane22.pcd", *cloud);
// pcl::io::loadPCDFile ("path/to/bunny.pcd",*cloud);
pcl::io::loadPCDFile ("path/to/Tetrahedron.pcd", *cloud);
std::cout << "Number of points in the input cloud is:"<< cloud->points.size() << std::endl;
Noisycloud->resize(cloud ->points.size () );
// First copy all the points of main cloud to the noisy cloud
for (size_t i = 0; i < Noisycloud ->points.size (); ++i ) {
// Noisy point cloud
Noisycloud->points[i].x = cloud->points[i].x;
Noisycloud->points[i].y = cloud->points[i].y;
Noisycloud->points[i].z = cloud->points[i].z;
Noisycloud->points[i].r = 255;
Noisycloud->points[i].g = 255;
Noisycloud->points[i].b = 255;
}
double* GussNosX = new double [cloud->points.size() ];
double* GussNosY = new double [cloud->points.size() ];
double* GussNosZ = new double [cloud->points.size() ];
double variance = 0.3 ;
double mean = 0.00;
// http://forums.codeguru.com/showthread.php?459963-Adding-noise-to-image
//All the Points of the cloud
for (size_t i = 0; i < cloud ->points.size (); i+=35) { // 10 , 50
double u1 = (((((float) rand()) / (float) RAND_MAX) * (0.003 - 0.00)) + 0.00) ;
double u2 = (((((float) rand()) / (float) RAND_MAX) * (0.003- 0.00)) + 0.00) ;
// if (u1 > 0.005 ) u1 = 0.005;
// temp = sqrt(-2.0*variance*log(u1));
// tempint = p[ix][iy] + (int) (temp * sin(TWO_PI*u2) + mean);
double temp = sqrt(-2.0*variance*log(u1));
double tempin = (temp * sin(2* PI *u2) + mean);
Noisycloud->points[i].x = cloud->points[i].x + tempin;
Noisycloud->points[i].y = cloud->points[i].y + tempin;
Noisycloud->points[i].z = cloud->points[i].z + tempin;
}// For each point
// Then add noise to each 10 point of the cloud
// for (size_t i = 0; i < Noisycloud ->points.size (); i+=10 ) {
// // Noisy point cloud
// Noisycloud->points[i].x = cloud->points[i].x + (7* StndDevX[i]);
// Noisycloud->points[i].y = cloud->points[i].y + (7* StndDevY[i]);
// Noisycloud->points[i].z = cloud->points[i].z + (7* StndDevZ[i]);
// Noisycloud->points[i].r = 255;
// Noisycloud->points[i].g = 255;
// Noisycloud->points[i].b = 255;
// }
std::cout << "Number of points in the Noisy cloud is:"<< Noisycloud->points.size() << std::endl;
// write red point cloud to disk
// pcl::io::savePCDFile ("path/to/AddingNoise/Two22plane14Noise.pcd", *Noisycloud);
// pcl::io::savePCDFile ("path/to/AddingNoise/Bunny03Noise50.pcd", *Noisycloud);
pcl::io::savePCDFile ("path/to/AddingNoise/TetrahedronNoise35.pcd", *Noisycloud);
// pcl::io::savePCDFile ("path/to/AddingNoise/Frac2Guass14Noise.pcd", *Noisycloud);
// pcl::io::savePCDFile ("path/to/AddingNoise/Frac2withNoise710.pcd", *Noisycloud);
pcl::PLYWriter writePLY;
writePLY.write ("path/to//AddingNoise/TetrahedronNoise35.ply", *Noisycloud, false);
// writePLY.write ("path/to//AddingNoise/Bunny03Noise50.ply", *Noisycloud, false);
// writePLY.write ("path/to/AddingNoise/Frac2Guass14Noise.ply", *Noisycloud, false);
// writePLY.write ("path/to/AddingNoise/Frac2withNoise710.ply", *Noisycloud, false);
// Show the cloud
pcl::visualization::CloudViewer viewer(" ONePlaneWithNoise ");
viewer.showCloud(Noisycloud);
while (!viewer.wasStopped ())
{}
return (0);
}
#endif
|
{"hexsha": "7ba2f2bfbcb771d86bff35d23224498daa41583b", "size": 9585, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "AddingNoise.cpp", "max_stars_repo_name": "n1ckfg/Edge_Extraction", "max_stars_repo_head_hexsha": "2bbe215350faf02334652af54eac4f4666872d4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 102.0, "max_stars_repo_stars_event_min_datetime": "2017-12-14T14:17:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T12:26:13.000Z", "max_issues_repo_path": "AddingNoise.cpp", "max_issues_repo_name": "n1ckfg/Edge_Extraction", "max_issues_repo_head_hexsha": "2bbe215350faf02334652af54eac4f4666872d4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2019-08-22T23:08:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-30T10:24:35.000Z", "max_forks_repo_path": "AddingNoise.cpp", "max_forks_repo_name": "n1ckfg/Edge_Extraction", "max_forks_repo_head_hexsha": "2bbe215350faf02334652af54eac4f4666872d4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33.0, "max_forks_repo_forks_event_min_datetime": "2017-07-12T03:05:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T09:00:30.000Z", "avg_line_length": 38.8056680162, "max_line_length": 155, "alphanum_fraction": 0.613458529, "num_tokens": 3032}
|
#!/usr/bin/env python
#
# File: vis_hostage.py
#
# Created: Monday, August 1 2016 by rejuvyesh <mail@rejuvyesh.com>
#
from __future__ import absolute_import, print_function
import argparse
import json
import pprint
from gym import spaces
import h5py
import numpy as np
import tensorflow as tf
import rltools.algos
import rltools.log
import rltools.util
from madrl_environments import ObservationBuffer
from madrl_environments.hostage import ContinuousHostageWorld
from rltools.baselines.linear import LinearFeatureBaseline
from rltools.baselines.mlp import MLPBaseline
from rltools.baselines.zero import ZeroBaseline
from rltools.policy.gaussian import GaussianMLPPolicy
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str) # defaultIS.h5/snapshots/iter0000480
parser.add_argument('--vid', type=str, default='/tmp/madrl.mp4')
parser.add_argument('--deterministic', action='store_true', default=False)
parser.add_argument('--n_steps', type=int, default=1000)
args = parser.parse_args()
# Load file
filename, file_key = rltools.util.split_h5_name(args.filename)
print('Loading parameters from {} in {}'.format(file_key, filename))
with h5py.File(filename, 'r') as f:
train_args = json.loads(f.attrs['args'])
dset = f[file_key]
pprint.pprint(dict(dset.attrs))
centralized = True if train_args['control'] == 'centralized' else False
env = ContinuousHostageWorld(train_args['n_good'],
train_args['n_hostage'],
train_args['n_bad'],
train_args['n_coop_save'],
train_args['n_coop_avoid'],
n_sensors=train_args['n_sensors'],
sensor_range=train_args['sensor_range'],
save_reward=train_args['save_reward'],
hit_reward=train_args['hit_reward'],
encounter_reward=train_args['encounter_reward'],
bomb_reward=train_args['bomb_reward'],)
if train_args['buffer_size'] > 1:
env = ObservationBuffer(env, train_args['buffer_size'])
if centralized:
obsfeat_space = spaces.Box(low=env.agents[0].observation_space.low[0],
high=env.agents[0].observation_space.high[0],
shape=(env.agents[0].observation_space.shape[0] *
len(env.agents),)) # XXX
action_space = spaces.Box(low=env.agents[0].action_space.low[0],
high=env.agents[0].action_space.high[0],
shape=(env.agents[0].action_space.shape[0] *
len(env.agents),)) # XXX
else:
obsfeat_space = env.agents[0].observation_space
action_space = env.agents[0].action_space
policy = GaussianMLPPolicy(obsfeat_space, action_space,
hidden_spec=train_args['policy_hidden_spec'], enable_obsnorm=True,
min_stdev=0., init_logstdev=0., tblog=train_args['tblog'],
varscope_name='gaussmlp_policy')
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
policy.load_h5(sess, filename, file_key)
rew = env.animate(
act_fn=lambda o: policy.sample_actions(sess, o[None, ...], deterministic=args.deterministic),
nsteps=args.n_steps, file_name=args.vid)
print(rew)
if __name__ == '__main__':
main()
|
{"hexsha": "4fa1d46fbb3403a433f7e5089c31f38a24696e73", "size": 3705, "ext": "py", "lang": "Python", "max_stars_repo_path": "vis/rltools/vis_hostage.py", "max_stars_repo_name": "SurvivorT/SRTP", "max_stars_repo_head_hexsha": "1ddc0c4ec31d61daf9f4292c533722e61818eb51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 489, "max_stars_repo_stars_event_min_datetime": "2017-02-21T21:40:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:01:30.000Z", "max_issues_repo_path": "vis/rltools/vis_hostage.py", "max_issues_repo_name": "AliBeikmohammadi/MADRL", "max_issues_repo_head_hexsha": "3156eb6d6a1e8a4c91ff1dce9f5fc565b2c25c94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2017-03-10T12:28:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T14:58:21.000Z", "max_forks_repo_path": "vis/rltools/vis_hostage.py", "max_forks_repo_name": "AliBeikmohammadi/MADRL", "max_forks_repo_head_hexsha": "3156eb6d6a1e8a4c91ff1dce9f5fc565b2c25c94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 121, "max_forks_repo_forks_event_min_datetime": "2017-02-24T20:13:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T08:56:32.000Z", "avg_line_length": 40.2717391304, "max_line_length": 105, "alphanum_fraction": 0.6070175439, "include": true, "reason": "import numpy", "num_tokens": 794}
|
from pyamg.testing import *
import numpy
import scipy
from scipy.sparse import spdiags, csr_matrix, bsr_matrix, eye
from scipy import arange, ones, zeros, array, allclose, zeros_like, \
tril, diag, triu, rand, asmatrix, mat
from scipy.linalg import solve
from pyamg.gallery import poisson, sprand, elasticity
from pyamg.relaxation import *
from pyamg.util.utils import get_block_diag
# Ignore efficiency warnings
import warnings
from scipy.sparse import SparseEfficiencyWarning
warnings.simplefilter('ignore',SparseEfficiencyWarning)
class TestCommonRelaxation(TestCase):
def setUp(self):
self.cases = []
self.cases.append( (gauss_seidel, (), {}) )
self.cases.append( (jacobi, (), {}) )
self.cases.append( (block_jacobi, (), {}) )
self.cases.append( (block_gauss_seidel, (), {}) )
self.cases.append( (jacobi_ne, (), {}) )
self.cases.append( (schwarz, (), {}) )
self.cases.append( (sor, (0.5,), {}) )
self.cases.append( (gauss_seidel_indexed, ([1,0],), {}) )
self.cases.append( (polynomial, ([0.6,0.1],), {}) )
def test_single_precision(self):
for method,args,kwargs in self.cases:
A = poisson((4,), format='csr').astype('float32')
b = arange(A.shape[0], dtype='float32')
x = 0*b
method(A, x, b, *args, **kwargs)
def test_double_precision(self):
for method,args,kwargs in self.cases:
A = poisson((4,), format='csr').astype('float64')
b = arange(A.shape[0], dtype='float64')
x = 0*b
method(A, x, b, *args, **kwargs)
def test_strided_x(self):
"""non-contiguous x should raise errors"""
for method,args,kwargs in self.cases:
A = poisson((4,), format='csr').astype('float64')
b = arange(A.shape[0], dtype='float64')
x = zeros(2*A.shape[0])[::2]
assert_raises(ValueError, method, A, x, b, *args, **kwargs)
def test_mixed_precision(self):
"""mixed precision arguments should raise errors"""
for method,args,kwargs in self.cases:
A32 = poisson((4,), format='csr').astype('float32')
b32 = arange(A32.shape[0], dtype='float32')
x32 = 0*b32
A64 = poisson((4,), format='csr').astype('float64')
b64 = arange(A64.shape[0], dtype='float64')
x64 = 0*b64
assert_raises(TypeError, method, A32, x32, b64, *args, **kwargs)
assert_raises(TypeError, method, A32, x64, b32, *args, **kwargs)
assert_raises(TypeError, method, A64, x32, b32, *args, **kwargs)
assert_raises(TypeError, method, A32, x64, b64, *args, **kwargs)
assert_raises(TypeError, method, A64, x64, b32, *args, **kwargs)
assert_raises(TypeError, method, A64, x32, b64, *args, **kwargs)
def test_vector_sizes(self):
"""incorrect vector sizes should raise errors"""
for method,args,kwargs in self.cases:
A = poisson((4,), format='csr').astype('float64')
b4 = arange(4, dtype='float64')
x4 = 0*b4
b5 = arange(5, dtype='float64')
x5 = 0*b5
assert_raises(ValueError, method, A, x4, b5, *args, **kwargs)
assert_raises(ValueError, method, A, x5, b4, *args, **kwargs)
assert_raises(ValueError, method, A, x5, b5, *args, **kwargs)
def test_non_square_matrix(self):
for method,args,kwargs in self.cases:
A = poisson((4,), format='csr').astype('float64')
A = A[:3]
b = arange(A.shape[0], dtype='float64')
x = ones(A.shape[1], dtype='float64')
assert_raises(ValueError, method, A, x, b, *args, **kwargs)
class TestRelaxation(TestCase):
def test_polynomial(self):
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x0 = arange(N, dtype=A.dtype)
x = x0.copy()
b = zeros(N, dtype=A.dtype)
r = (b - A*x0)
polynomial(A,x,b,[-1.0/3.0])
assert_almost_equal(x,x0-1.0/3.0*r)
x = x0.copy()
polynomial(A,x,b,[0.2,-1])
assert_almost_equal(x,x0 + 0.2*A*r - r)
x = x0.copy()
polynomial(A,x,b,[0.2,-1])
assert_almost_equal(x,x0 + 0.2*A*r - r)
x = x0.copy()
polynomial(A,x,b,[-0.14285714, 1., -2.])
assert_almost_equal(x,x0 - 0.14285714*A*A*r + A*r - 2*r)
# polynomial() optimizes for the case x=0
x = 0*x0
polynomial(A,x,b,[-1.0/3.0])
assert_almost_equal(x,1.0/3.0*b)
x = 0*x0
polynomial(A,x,b,[-0.14285714, 1., -2.])
assert_almost_equal(x,0.14285714*A*A*b + A*b - 2*b)
def test_jacobi(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
jacobi(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = zeros(N)
b = arange(N).astype(numpy.float64)
jacobi(A,x,b)
assert_almost_equal(x,array([0.0,0.5,1.0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
jacobi(A,x,b)
assert_almost_equal(x,array([0.5,1.0,0.5]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10], dtype=A.dtype)
jacobi(A,x,b)
assert_almost_equal(x,array([5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10,20,30], dtype=A.dtype)
jacobi(A,x,b)
assert_almost_equal(x,array([5.5,11.0,15.5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
x_copy = x.copy()
b = array([10,20,30], dtype=A.dtype)
jacobi(A,x,b,omega=1.0/3.0)
assert_almost_equal(x,2.0/3.0*x_copy + 1.0/3.0*array([5.5,11.0,15.5]))
def test_jacobi_bsr(self):
cases = []
#JBS: remove some N
for N in [1,2,3,4,5,6,10]:
cases.append( spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr() )
cases.append( elasticity.linear_elasticity((N,N))[0].tocsr() )
C = csr_matrix( rand(N,N) )
cases.append( C*C.H )
C = sprand(N*2,N*2,0.3) + eye(N*2,N*2)
cases.append( C*C.H )
for A in cases:
divisors = [ n for n in range(1,A.shape[0]+1) if A.shape[0] % n == 0 ]
x_csr = arange(A.shape[0]).astype(numpy.float64)
b = x_csr**2
jacobi(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = arange(B.shape[0]).astype(numpy.float64)
jacobi(B,x_bsr,b)
assert_almost_equal(x_bsr,x_csr)
def test_gauss_seidel_bsr(self):
sweeps = ['forward', 'backward', 'symmetric']
cases = []
for N in [1,2,3,4,5,6,10]:
cases.append( spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr() )
cases.append( elasticity.linear_elasticity((N,N))[0].tocsr() )
C = csr_matrix( rand(N,N) )
cases.append( C*C.H )
C = sprand(N*2,N*2,0.3) + eye(N*2,N*2)
cases.append( C*C.H )
for A in cases:
for sweep in sweeps:
divisors = [ n for n in range(1,A.shape[0]+1) if A.shape[0] % n == 0 ]
x_csr = arange(A.shape[0]).astype(numpy.float64)
b = x_csr**2
gauss_seidel(A,x_csr,b,sweep=sweep)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = arange(B.shape[0]).astype(numpy.float64)
gauss_seidel(B,x_bsr,b,sweep=sweep)
assert_almost_equal(x_bsr,x_csr)
def test_gauss_seidel_gold(self):
scipy.random.seed(0)
cases = []
cases.append( poisson( (4,), format='csr' ) )
cases.append( poisson( (4,4), format='csr' ) )
temp = asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.T * temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = A.todense()
L = tril(A,k=-1)
D = diag(diag(A))
U = triu(A,k=1)
for i in range(iterations):
if sweep == 'forward':
x = solve(L + D, (b - U*x) )
elif sweep == 'backward':
x = solve(U + D, (b - L*x) )
else:
x = solve(L + D, (b - U*x) )
x = solve(U + D, (b - L*x) )
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1))
x = asmatrix(rand(A.shape[0],1))
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
def test_gauss_seidel_csr(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([1.0/2.0,5.0/4.0,5.0/8.0]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel(A,x,b,sweep='backward')
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel(A,x,b,sweep='backward')
assert_almost_equal(x,array([1.0/8.0,1.0/4.0,1.0/2.0]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10], dtype=A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10,20,30], dtype=A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([11.0/2.0,55.0/4,175.0/8.0]))
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = ones(N)
b = zeros(N)
gauss_seidel(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N)
gauss_seidel(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.01 and resid2 < 0.01)
self.assert_(allclose(resid1,resid2))
def test_gauss_seidel_indexed(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[0])
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[0,1,2])
assert_almost_equal(x,array([1.0/2.0,5.0/4.0,5.0/8.0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[2,1,0],sweep='backward')
assert_almost_equal(x,array([1.0/2.0,5.0/4.0,5.0/8.0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[0,1,2],sweep='backward')
assert_almost_equal(x,array([1.0/8.0,1.0/4.0,1.0/2.0]))
N = 4
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = ones(N)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[0,3])
assert_almost_equal(x,array([1.0/2.0, 1.0, 1.0, 1.0/2.0]))
N = 4
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = ones(N)
b = zeros(N)
gauss_seidel_indexed(A,x,b,[0,0])
assert_almost_equal(x,array([1.0/2.0, 1.0, 1.0, 1.0]))
def test_jacobi_ne(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = zeros(N)
b = arange(N).astype(numpy.float64)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([-1./6., -1./15., 19./30.]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([2./5., 7./5., 4./5.]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10], dtype=A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10,20,30], dtype=A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([16./15., 1./15., (9 + 7./15.) ]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
x_copy = x.copy()
b = array([10,20,30], dtype=A.dtype)
jacobi_ne(A,x,b,omega=1.0/3.0)
assert_almost_equal(x,2.0/3.0*x_copy + 1.0/3.0*array([16./15., 1./15., (9 + 7./15.) ]))
def test_gauss_seidel_ne_bsr(self):
cases = []
#JBS: remove some N
for N in [1,2,3,4,5,6,10]:
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
divisors = [ n for n in range(1,N+1) if N % n == 0 ]
x_csr = arange(N).astype(numpy.float64)
b = x_csr**2
gauss_seidel_ne(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = arange(N).astype(numpy.float64)
gauss_seidel_ne(B,x_bsr,b)
assert_almost_equal(x_bsr,x_csr)
def test_gauss_seidel_ne_new(self):
scipy.random.seed(0)
cases = []
cases.append( poisson( (4,), format='csr' ) )
cases.append( poisson( (4,4), format='csr' ) )
temp = asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.T * temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = mat(A.todense())
AA = A*A.T
L = tril(AA,k=0)
U = triu(AA,k=0)
for i in range(iterations):
if sweep == 'forward':
x = x + A.T*(solve(L, (b - A*x) ))
elif sweep == 'backward':
x = x + A.T*(solve(U, (b - A*x) ))
else:
x = x + A.T*(solve(L, (b - A*x) ))
x = x + A.T*(solve(U, (b - A*x) ))
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1))
x = asmatrix(rand(A.shape[0],1))
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
def test_gauss_seidel_ne_csr(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(numpy.float64)
b = zeros(N)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([4./15., 8./5., 4./5.]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = zeros(N, dtype=A.dtype)
gauss_seidel_ne(A,x,b,sweep='backward')
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = zeros(N, dtype=A.dtype)
gauss_seidel_ne(A,x,b,sweep='backward')
assert_almost_equal(x,array([2./5., 4./5., 6./5.]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10], dtype=A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([5]))
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10], dtype=A.dtype)
gauss_seidel_ne(A,x,b,sweep='backward')
assert_almost_equal(x,array([5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N, dtype=A.dtype)
b = array([10,20,30], dtype=A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([-2./5., -2./5., (14 + 4./5.)]))
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = ones(N)
b = zeros(N)
gauss_seidel_ne(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N)
gauss_seidel_ne(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.2 and resid2 < 0.2)
self.assert_(allclose(resid1,resid2))
def test_gauss_seidel_nr_bsr(self):
cases = []
for N in [1,2,3,4,5,6,10]:
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
divisors = [ n for n in range(1,N+1) if N % n == 0 ]
x_csr = arange(N).astype(numpy.float64)
b = x_csr**2
gauss_seidel_nr(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = arange(N).astype(numpy.float64)
gauss_seidel_nr(B,x_bsr,b)
assert_almost_equal(x_bsr,x_csr)
def test_gauss_seidel_nr(self):
scipy.random.seed(0)
cases = []
cases.append( poisson( (4,), format='csr' ) )
cases.append( poisson( (4,4), format='csr' ) )
temp = asmatrix( rand(1,1) )
cases.append( csr_matrix( temp) )
temp = asmatrix( rand(2,2) )
cases.append( csr_matrix( temp) )
temp = asmatrix( rand(4,4) )
cases.append( csr_matrix( temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = mat(A.todense())
AA = A.H*A
L = tril(AA,k=0)
U = triu(AA,k=0)
for i in range(iterations):
if sweep == 'forward':
x = x + (solve(L, A.H*(b - A*x) ))
elif sweep == 'backward':
x = x + (solve(U, A.H*(b - A*x) ))
else:
x = x + (solve(L, A.H*(b - A*x) ))
x = x + (solve(U, A.H*(b - A*x) ))
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1))
x = asmatrix(rand(A.shape[0],1))
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = ones(N)
b = zeros(N)
gauss_seidel_nr(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N)
gauss_seidel_nr(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.2 and resid2 < 0.2)
self.assert_(allclose(resid1,resid2))
def test_schwarz_gold(self):
scipy.random.seed(0)
cases = []
cases.append( poisson( (4,), format='csr' ) )
cases.append( poisson( (4,4), format='csr' ) )
A = poisson( (8,8), format='csr' )
A.data[0] = 10.0; A.data[1] = -0.5; A.data[3] = -0.5
cases.append( A )
temp = asmatrix( rand(1,1) )
cases.append( csr_matrix( temp.T * temp) )
temp = asmatrix( rand(2,2) )
cases.append( csr_matrix( temp.T * temp) )
temp = asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.T * temp) )
# reference implementation
def gold(A,x,b,iterations,sweep='forward'):
A = csr_matrix(A)
##
# Default is point-wise iteration with each subdomain a point's neighborhood
# in the matrix graph
subdomains = [A.indices[A.indptr[i]:A.indptr[i+1]] for i in range(A.shape[0]) ]
##
# extract each subdomain's block from the matrix
subblocks = [ scipy.linalg.pinv2(( (A[subdomains[i],:]).tocsc()[:,subdomains[i]] ).todense())\
for i in range(len(subdomains)) ]
if sweep == 'forward':
indices = range(len(subdomains))
elif sweep == 'backward':
indices = range(len(subdomains)-1,-1,-1)
elif sweep == 'symmetric':
indices = range(len(subdomains))
indices.extend( range(len(subdomains)-1,-1,-1) )
##
# Multiplicative Schwarz iterations
for j in xrange(iterations):
for i in indices:
x[subdomains[i]] = scipy.dot(subblocks[i], (b[subdomains[i]] - A[subdomains[i],:]*x)) + \
x[subdomains[i]]
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1))
x = asmatrix(rand(A.shape[0],1))
x_copy = x.copy()
schwarz(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
schwarz(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
schwarz(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
# Test complex arithmetic
class TestComplexRelaxation(TestCase):
def test_jacobi(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = zeros(N).astype(A.dtype)
jacobi(A,x,b)
assert_almost_equal(x,array([0]))
x = array([1.0 + 1.0j])
b = array([-1.0 + 1.0j])
omega = 4.0 - 1.0j
jacobi(A,x,b,omega=omega)
assert_almost_equal(x,array([-3.5]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = zeros(N).astype(A.dtype)
b = arange(N).astype(A.dtype)
b = b + 1.0j*b
soln = array([0.0,0.5,1.0])
jacobi(A,x,b)
assert_almost_equal(x,soln)
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
soln = array([0.5 + 0.5j,1.0 + 1.0j,0.5+0.5j])
jacobi(A,x,b)
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = array([10]).astype(A.dtype)
jacobi(A,x,b)
assert_almost_equal(x,array([2.5 - 2.5j]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = array([10,20,30]).astype(A.dtype)
soln = array([3.0-2.0j, 6.0-4.0j, 8.0-7.0j])
jacobi(A,x,b)
assert_almost_equal(x,soln)
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
x_copy = x.copy()
b = array([10,20,30]).astype(A.dtype)
soln = 2.0/3.0*x_copy + 1.0/3.0*array([3.0-2.0j, 6.0-4.0j, 8.0-7.0j])
jacobi(A,x,b,omega=1.0/3.0)
assert_almost_equal(x,soln)
def test_jacobi_bsr(self):
cases = []
for N in [1,2,3,4,5,6,10]:
#
C = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
C.data = C.data + 1.0j*1e-3*rand(C.data.shape[0],)
cases.append( C )
#
cases.append( 1.0j*elasticity.linear_elasticity((N,N))[0].tocsr() )
#
C = csr_matrix( rand(N,N) + 1.0j*rand(N,N) )
cases.append( C*C.H )
#
C = sprand(N*2,N*2,0.3) + 1.0j*sprand(N*2,N*2,0.3) + eye(N*2,N*2)
cases.append( C*C.H )
for A in cases:
divisors = [ n for n in range(1,A.shape[0]+1) if A.shape[0] % n == 0 ]
x0 = (arange(A.shape[0]) + 1.0j*1e-3*rand(A.shape[0],)).astype(A.dtype)
x_csr = x0.copy()
b = x_csr**2
jacobi(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = x0.copy()
jacobi(B,x_bsr,b)
assert_almost_equal(x_bsr,x_csr)
def test_gauss_seidel_bsr(self):
sweeps = ['forward', 'backward', 'symmetric']
cases = []
for N in [1,2,3,4,5,6,10]:
#
C = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
C.data = C.data + 1.0j*1e-3*rand(C.data.shape[0],)
cases.append( C )
#
cases.append( 1.0j*elasticity.linear_elasticity((N,N))[0].tocsr() )
#
C = csr_matrix( rand(N,N) + 1.0j*rand(N,N) )
cases.append( C*C.H )
#
C = sprand(N*2,N*2,0.3) + 1.0j*sprand(N*2,N*2,0.3) + eye(N*2,N*2)
cases.append( C*C.H )
for A in cases:
for sweep in sweeps:
divisors = [ n for n in range(1,A.shape[0]+1) if A.shape[0] % n == 0 ]
x0 = (arange(A.shape[0]) + 1.0j*1e-3*rand(A.shape[0],)).astype(A.dtype)
x_csr = x0.copy()
b = x_csr**2
gauss_seidel(A,x_csr,b,sweep=sweep)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr = x0.copy()
gauss_seidel(B,x_bsr,b,sweep=sweep)
assert_almost_equal(x_bsr,x_csr)
def test_schwarz_gold(self):
scipy.random.seed(0)
cases = []
A = poisson( (4,), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A)
A = poisson( (4,4), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A);
temp = asmatrix( rand(1,1) ) + 1.0j*asmatrix( rand(1,1) )
cases.append( csr_matrix( temp.H * temp) )
temp = asmatrix( rand(2,2) ) + 1.0j*asmatrix( rand(2,2) )
cases.append( csr_matrix( temp.H * temp) )
temp = asmatrix( rand(4,4) ) + 1.0j*asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.H * temp) )
# reference implementation
def gold(A,x,b,iterations):
A = csr_matrix(A)
##
# Default is point-wise iteration with each subdomain a point's neighborhood
# in the matrix graph
subdomains = [A.indices[A.indptr[i]:A.indptr[i+1]] for i in range(A.shape[0]) ]
##
# extract each subdomain's block from the matrix
subblocks = [ scipy.linalg.pinv2(( (A[subdomains[i],:]).tocsc()[:,subdomains[i]] ).todense())\
for i in range(len(subdomains)) ]
##
# Multiplicative Schwarz iterations
for j in xrange(iterations):
for i in xrange(len(subdomains)):
x[subdomains[i]] = scipy.dot(subblocks[i], (b[subdomains[i]] - A[subdomains[i],:]*x)) + \
x[subdomains[i]]
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x_copy = x.copy()
schwarz(A, x, b, iterations=1)
assert_almost_equal( x, gold(A,x_copy,b,iterations=1) )
def test_gauss_seidel_new(self):
scipy.random.seed(0)
cases = []
A = poisson( (4,), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A)
A = poisson( (4,4), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A);
temp = asmatrix( rand(4,4) ) + 1.0j*asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.H * temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = A.todense()
L = tril(A,k=-1)
D = diag(diag(A))
U = triu(A,k=1)
for i in range(iterations):
if sweep == 'forward':
x = solve(L + D, (b - U*x) )
elif sweep == 'backward':
x = solve(U + D, (b - L*x) )
else:
x = solve(L + D, (b - U*x) )
x = solve(U + D, (b - L*x) )
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
##
# Gauss-Seidel Tests
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
##
# Indexed Gauss-Seidel Tests
x_copy = x.copy()
gauss_seidel_indexed(A, x, b, indices=arange(A.shape[0]), iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel_indexed(A, x, b, indices=arange(A.shape[0]), iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel_indexed(A, x, b, indices=arange(A.shape[0]), iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
def test_gauss_seidel_csr(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
soln = array([1.0/2.0,5.0/4.0,5.0/8.0])
soln = soln + 1.0j*soln
b = zeros(N).astype(A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel(A,x,b,sweep='backward')
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
soln = array([1.0/8.0,1.0/4.0,1.0/2.0])
soln = soln + 1.0j*soln
b = zeros(N).astype(A.dtype)
gauss_seidel(A,x,b,sweep='backward')
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = array([10]).astype(A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,array([2.5 - 2.5j]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([3.0 - 2.0j, 7.5 - 5.0j, 11.25 - 10.0j])
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = array([10,20,30]).astype(A.dtype)
gauss_seidel(A,x,b)
assert_almost_equal(x,soln)
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
gauss_seidel(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.03 and resid2 < 0.03)
self.assert_(allclose(resid1,resid2))
def test_jacobi_ne(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = zeros(N).astype(A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([-1./6., -1./15., 19./30.])
x = zeros(N).astype(A.dtype)
b = arange(N).astype(A.dtype)
b = b + 1.0j*b
jacobi_ne(A,x,b)
assert_almost_equal(x,soln)
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([2./5. + 2.0j/5., 7./5. + 7.0j/5., 4./5. + 4.0j/5.])
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = array([10]).astype(A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,array([2.5 - 2.5j]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([11./15. + 1.0j/15., 11./15. + 31.0j/15, 77./15. - 53.0j/15.])
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = array([10,20,30]).astype(A.dtype)
jacobi_ne(A,x,b)
assert_almost_equal(x,soln)
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = array([10,20,30]).astype(A.dtype)
x_copy = x.copy()
soln = 2.0/3.0*x_copy + 1.0/3.0*array([11./15. + 1.0j/15., 11./15. + 31.0j/15, 77./15. - 53.0j/15.])
jacobi_ne(A,x,b,omega=1.0/3.0)
assert_almost_equal(x,soln)
def test_gauss_seidel_ne_bsr(self):
cases = []
for N in [1,2,3,4,5,6,10]:
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
divisors = [ n for n in range(1,N+1) if N % n == 0 ]
x_csr = (arange(N) + 1.0j*1e-3*rand(N,)).astype(A.dtype)
x_bsr = x_csr.copy()
b = x_csr**2
gauss_seidel_ne(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr_temp = x_bsr.copy()
gauss_seidel_ne(B,x_bsr_temp,b)
assert_almost_equal(x_bsr_temp,x_csr)
def test_gauss_seidel_ne_new(self):
scipy.random.seed(0)
cases = []
A = poisson( (4,), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A)
A = poisson( (4,4), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A.tobsr(blocksize=(2,2)))
temp = asmatrix( rand(4,4) ) + 1.0j*asmatrix( rand(4,4) )
cases.append( csr_matrix( temp.T * temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = mat(A.todense())
AA = A*A.H
L = tril(AA,k=0)
U = triu(AA,k=0)
for i in range(iterations):
if sweep == 'forward':
x = x + A.H*(solve(L, (b - A*x) ))
elif sweep == 'backward':
x = x + A.H*(solve(U, (b - A*x) ))
else:
x = x + A.H*(solve(L, (b - A*x) ))
x = x + A.H*(solve(U, (b - A*x) ))
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel_ne(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
def test_gauss_seidel_ne_csr(self):
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
A.data = A.data + 1.0j*A.data
b = zeros(N).astype(A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([4./15., 8./5., 4./5.])
soln = soln + 1.0j*soln
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel_ne(A,x,b,sweep='backward')
assert_almost_equal(x,array([0]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([2./5., 4./5., 6./5.])
soln = soln + 1.0j*soln
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel_ne(A,x,b,sweep='backward')
assert_almost_equal(x,soln)
N = 1
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = arange(N).astype(A.dtype)
b = array([10]).astype(A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,array([2.5 - 2.5j]))
N = 3
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
soln = array([-1./15.+0.6j, 0.6+2.6j, 7.8-6.2j])
x = arange(N).astype(A.dtype)
x = x + 1.0j*x
b = array([10,20,30]).astype(A.dtype)
gauss_seidel_ne(A,x,b)
assert_almost_equal(x,soln)
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel_ne(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
gauss_seidel_ne(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.3 and resid2 < 0.3)
self.assert_(allclose(resid1,resid2))
def test_gauss_seidel_nr_bsr(self):
cases = []
for N in [1,2,3,4,5,6,10]:
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
divisors = [ n for n in range(1,N+1) if N % n == 0 ]
x_csr = (arange(N) + 1.0j*1e-3*rand(N,)).astype(A.dtype)
x_bsr = x_csr.copy()
b = x_csr**2
gauss_seidel_nr(A,x_csr,b)
for D in divisors:
B = A.tobsr(blocksize=(D,D))
x_bsr_temp = x_bsr.copy()
gauss_seidel_nr(B,x_bsr_temp,b)
assert_almost_equal(x_bsr_temp,x_csr)
def test_gauss_seidel_nr(self):
scipy.random.seed(0)
cases = []
A = poisson( (4,), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A)
A = poisson( (4,4), format='csr' ); A.data = A.data + 1.0j*1e-3*rand(A.data.shape[0],)
cases.append(A.tobsr(blocksize=(2,2)))
temp = asmatrix( rand(1,1) ) + 1.0j*asmatrix( rand(1,1) )
cases.append( csr_matrix( temp) )
temp = asmatrix( rand(2,2) ) + 1.0j*asmatrix( rand(2,2) )
cases.append( csr_matrix( temp) )
temp = asmatrix( rand(4,4) ) + 1.0j*asmatrix( rand(4,4) )
cases.append( csr_matrix( temp) )
# reference implementation
def gold(A,x,b,iterations,sweep):
A = mat(A.todense())
AA = A.H*A
L = tril(AA,k=0)
U = triu(AA,k=0)
for i in range(iterations):
if sweep == 'forward':
x = x + (solve(L, A.H*(b - A*x) ))
elif sweep == 'backward':
x = x + (solve(U, A.H*(b - A*x) ))
else:
x = x + (solve(L, A.H*(b - A*x) ))
x = x + (solve(U, A.H*(b - A*x) ))
return x
for A in cases:
b = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x = asmatrix(rand(A.shape[0],1)) + 1.0j*asmatrix(rand(A.shape[0],1)).astype(A.dtype)
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='forward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='forward') )
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='backward')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='backward') )
x_copy = x.copy()
gauss_seidel_nr(A, x, b, iterations=1, sweep='symmetric')
assert_almost_equal( x, gold(A,x_copy,b,iterations=1,sweep='symmetric') )
#forward and backward passes should give same result with x=ones(N),b=zeros(N)
N = 100
A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N,format='csr')
A.data = A.data + 1.0j*A.data
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
b = zeros(N).astype(A.dtype)
gauss_seidel_nr(A,x,b,iterations=200,sweep='forward')
resid1 = numpy.linalg.norm(A*x,2)
x = ones(N).astype(A.dtype)
x = x + 1.0j*x
gauss_seidel_nr(A,x,b,iterations=200,sweep='backward')
resid2 = numpy.linalg.norm(A*x,2)
self.assert_(resid1 < 0.3 and resid2 < 0.3)
self.assert_(allclose(resid1,resid2))
# Test both complex and real arithmetic
# for block_jacobi and block_gauss_seidel
class TestBlockRelaxation(TestCase):
def test_block_jacobi(self):
scipy.random.seed(0)
# All real valued tests
cases = []
A = csr_matrix(scipy.zeros((1,1)))
cases.append( (A,1) )
A = csr_matrix(scipy.rand(1,1))
cases.append( (A,1) )
A = csr_matrix(scipy.zeros((2,2)))
cases.append( (A,1) )
cases.append( (A,2) )
A = csr_matrix(scipy.rand(2,2))
cases.append( (A,1) )
cases.append( (A,2) )
A = csr_matrix(scipy.zeros((3,3)))
cases.append( (A,1) )
cases.append( (A,3) )
A = csr_matrix(scipy.rand(3,3))
cases.append( (A,1) )
cases.append( (A,3) )
A = poisson( (4,4), format='csr')
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,4) )
A = array([[ 9.1, 9.8, 9.6, 0. , 3.6, 0. ],
[18.2, 19.6, 0. , 0. , 1.7, 2.8],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 4.2, 1. , 1.1],
[ 0. , 0. , 9.1, 0. , 0. , 9.3]])
A = csr_matrix(A)
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,3) )
# reference implementation of 1 iteration
def gold(A,x,b,blocksize,omega):
A = csr_matrix(A)
temp = x.copy()
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
D = get_block_diag(A, blocksize=blocksize, inv_flag=False)
A_no_D = A - bsr_matrix( (D, scipy.arange(Dinv.shape[0]), scipy.arange(Dinv.shape[0] + 1)), shape=A.shape)
A_no_D = csr_matrix(A_no_D)
for i in range(0, A.shape[0], blocksize):
r = A_no_D[i:(i+blocksize),:]*temp
r = scipy.mat(Dinv[i/blocksize,:,:])*scipy.mat(scipy.ravel(b[i:(i+blocksize)]) - scipy.ravel(r)).reshape(-1,1)
x[i:(i+blocksize)] = (1.0 - omega)*temp[i:(i+blocksize)] + omega*scipy.ravel(r)
return x
for A,blocksize in cases:
b = rand(A.shape[0])
x = rand(A.shape[0])
x_copy = x.copy()
block_jacobi(A, x, b, blocksize=blocksize, iterations=1, omega=1.1)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 1.1), decimal=4 )
# check for agreement between jacobi and block jacobi with blocksize=1
A = poisson( (4,5), format='csr')
b = rand(A.shape[0])
x = rand(A.shape[0])
x_copy = x.copy()
block_jacobi(A, x, b, blocksize=1, iterations=2, omega=1.1)
jacobi(A, x_copy, b, iterations=2, omega=1.1)
assert_almost_equal( x, x_copy, decimal=4 )
# complex valued tests
cases = []
A = csr_matrix(scipy.rand(3,3) + 1.0j*scipy.rand(3,3))
cases.append( (A,1) )
cases.append( (A,3) )
A = poisson( (4,4), format='csr')
A.data = A.data + 1.0j*scipy.rand(A.data.shape[0])
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,4) )
A = array([[ 9.1j, 9.8j, 9.6, 0. , 3.6, 0. ],
[18.2j, 19.6j, 0. , 0. , 1.7, 2.8],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 4.2, 1.0j, 1.1],
[ 0. , 0. , 9.1, 0. , 0. , 9.3]])
A = csr_matrix(A)
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,3) )
for A,blocksize in cases:
b = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
x = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
x_copy = x.copy()
block_jacobi(A, x, b, blocksize=blocksize, iterations=1, omega=1.1)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 1.1), decimal=4 )
def test_block_gauss_seidel(self):
scipy.random.seed(0)
# All real valued tests
cases = []
A = csr_matrix(scipy.zeros((1,1)))
cases.append( (A,1) )
A = csr_matrix(scipy.rand(1,1))
cases.append( (A,1) )
A = csr_matrix(scipy.zeros((2,2)))
cases.append( (A,1) )
cases.append( (A,2) )
A = csr_matrix(scipy.rand(2,2))
cases.append( (A,1) )
cases.append( (A,2) )
A = csr_matrix(scipy.zeros((3,3)))
cases.append( (A,1) )
cases.append( (A,3) )
A = csr_matrix(scipy.rand(3,3))
cases.append( (A,1) )
cases.append( (A,3) )
A = poisson( (4,4), format='csr')
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,4) )
A = array([[ 9.1, 9.8, 9.6, 0. , 3.6, 0. ],
[18.2, 19.6, 0. , 0. , 1.7, 2.8],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 4.2, 1. , 1.1],
[ 0. , 0. , 9.1, 0. , 0. , 9.3]])
A = csr_matrix(A)
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,3) )
# reference implementation of 1 iteration
def gold(A, x, b, blocksize, sweep):
A = csr_matrix(A)
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
D = get_block_diag(A, blocksize=blocksize, inv_flag=False)
A_no_D = A - bsr_matrix( (D, scipy.arange(Dinv.shape[0]), scipy.arange(Dinv.shape[0] + 1)), shape=A.shape)
A_no_D = csr_matrix(A_no_D)
if sweep == 'symmetric':
x = gold(A, x, b, blocksize, 'forward')
x = gold(A, x, b, blocksize, 'backward')
return x
elif sweep == 'forward':
start,stop,step = (0, A.shape[0], blocksize)
elif sweep == 'backward':
start,stop,step = (A.shape[0] - blocksize, -blocksize, -blocksize)
for i in range(start, stop, step):
r = A_no_D[i:(i+blocksize),:]*x
r = scipy.mat(Dinv[i/blocksize,:,:])*scipy.mat(scipy.ravel(b[i:(i+blocksize)]) - scipy.ravel(r)).reshape(-1,1)
x[i:(i+blocksize)] = scipy.ravel(r)
return x
for A,blocksize in cases:
b = rand(A.shape[0])
# forward
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'forward'), decimal=4 )
# backward
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='backward', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'backward'), decimal=4 )
# symmetric
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='symmetric', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'symmetric'), decimal=4 )
# check for aggreement between gauss_seidel and block gauss-seidel with blocksize=1
A = poisson( (4,5), format='csr')
b = rand(A.shape[0])
# forward
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=2, sweep='forward', blocksize=1)
gauss_seidel(A, x_copy, b, iterations=2, sweep='forward')
assert_almost_equal( x, x_copy, decimal=4 )
# backward
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=2, sweep='backward', blocksize=1)
gauss_seidel(A, x_copy, b, iterations=2, sweep='backward')
assert_almost_equal( x, x_copy, decimal=4 )
# symmetric
x = rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=2, sweep='symmetric', blocksize=1)
gauss_seidel(A, x_copy, b, iterations=2, sweep='symmetric')
assert_almost_equal( x, x_copy, decimal=4 )
# complex valued tests
cases = []
A = csr_matrix(scipy.rand(3,3) + 1.0j*scipy.rand(3,3))
cases.append( (A,1) )
cases.append( (A,3) )
A = poisson( (4,4), format='csr')
A.data = A.data + 1.0j*scipy.rand(A.data.shape[0])
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,4) )
A = array([[ 9.1j, 9.8j, 9.6, 0. , 3.6, 0. ],
[18.2j, 19.6j, 0. , 0. , 1.7, 2.8],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 4.2, 1.0j, 1.1],
[ 0. , 0. , 9.1, 0. , 0. , 9.3]])
A = csr_matrix(A)
cases.append( (A,1) )
cases.append( (A,2) )
cases.append( (A,3) )
for A,blocksize in cases:
b = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
# forward
x = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'forward'), decimal=4 )
# backward
x = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='backward', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'backward'), decimal=4 )
# symmetric
x = rand(A.shape[0]) + 1.0j*rand(A.shape[0])
x_copy = x.copy()
block_gauss_seidel(A, x, b, iterations=1, sweep='symmetric', blocksize=blocksize)
assert_almost_equal( x, gold(A, x_copy, b, blocksize, 'symmetric'), decimal=4 )
#class TestDispatch(TestCase):
# def test_string(self):
# from pyamg.relaxation import dispatch
#
# A = poisson( (4,), format='csr')
#
# cases = []
# cases.append( 'gauss_seidel' )
# cases.append( ('gauss_seidel',{'iterations':3}) )
#
# for case in cases:
# fn = dispatch(case)
# fn(A, ones(4), zeros(4))
|
{"hexsha": "29dc65050d6672664132d8fe3d1d1790960f4bfd", "size": 58860, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyamg/relaxation/tests/test_relaxation.py", "max_stars_repo_name": "pombreda/pyamg", "max_stars_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-03T15:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-03T15:32:01.000Z", "max_issues_repo_path": "pyamg/relaxation/tests/test_relaxation.py", "max_issues_repo_name": "pombreda/pyamg", "max_issues_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyamg/relaxation/tests/test_relaxation.py", "max_forks_repo_name": "pombreda/pyamg", "max_forks_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.347715736, "max_line_length": 129, "alphanum_fraction": 0.4942575603, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 18782}
|
#include <idmlib/tdt/temporal_kpe.h>
#include <boost/algorithm/string/split.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
#include <sf1common/ScdParser.h>
#include <idmlib/similarity/term_similarity.h>
#include "../TestResources.h"
using namespace idmlib;
using namespace idmlib::tdt;
using namespace idmlib::util;
using namespace boost::filesystem;
using namespace izenelib;
using izenelib::util::UString;
namespace po = boost::program_options;
int main(int ac, char** av)
{
po::options_description desc("Allowed options");
desc.add_options()
("help", "produce help message")
("resource-path,R", po::value<std::string>(), "system resource path")
("scd-path,S", po::value<std::string>(), "scd directory to be processed")
("output-file,O", po::value<std::string>(), "output key-phrases file, include the string representation and df")
("kma-path,K", po::value<std::string>(), "if we want to process Korean collection, specify this kma path")
("working-path,W", po::value<std::string>(), "temp working path used for kpe, default: ./kpe_scd_working")
("max-doc,M", po::value<uint32_t>(), "max doc count which will be processed.")
("exception-text-file,E", po::value<std::string>(), "exception text list")
;
std::string default_working_path = "./temporal_kpe_working";
izenelib::util::UString::EncodingType encoding = izenelib::util::UString::UTF_8;
po::variables_map vm;
po::store(po::parse_command_line(ac, av, desc), vm);
po::notify(vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return 1;
}
std::string resource_path;
if (vm.count("resource-path")) {
resource_path = vm["resource-path"].as<std::string>();
std::cout << "resource-path: " << resource_path <<std::endl;
}
else {
std::cerr<<"resource-path not set"<<std::endl;
std::cerr << desc << std::endl;
return -1;
}
std::vector<izenelib::util::UString> exception_text_list;
std::string exception_file;
if (vm.count("exception-text-file")) {
std::string exception_file = vm["exception-text-file"].as<std::string>();
std::cout << "exception-text-file: " << exception_file <<std::endl;
std::ifstream ifs(exception_file.c_str());
std::string line;
while ( getline ( ifs,line ) )
{
exception_text_list.push_back(izenelib::util::UString(line, izenelib::util::UString::UTF_8));
}
ifs.close();
}
std::string scd_path;
std::vector<std::string> scdfile_list;
if (vm.count("scd-path")) {
scd_path = vm["scd-path"].as<std::string>();
try{
if (!is_directory(scd_path))
{
std::cerr<<scd_path<<"is not directory"<<std::endl;
return -1;
}
directory_iterator kItrEnd;
for (directory_iterator itr(scd_path); itr != kItrEnd; ++itr)
{
std::string file_name = itr->path().filename().string();
if (ScdParser::checkSCDFormat(file_name) )
{
SCD_TYPE scd_type = ScdParser::checkSCDType(file_name);
if( scd_type == INSERT_SCD ||scd_type == UPDATE_SCD )
{
scdfile_list.push_back(itr->path().string() );
}
}
}
}
catch(std::exception& ex)
{
std::cerr<<"fs error"<<std::endl;
return -1;
}
if( scdfile_list.size()==0 )
{
std::cout<<"no scd file under "<<scd_path<<std::endl;
return 1;
}
std::cout << "scd-path: " << scd_path <<std::endl;
}
else {
std::cerr<<"scd-path not set"<<std::endl;
std::cerr << desc << std::endl;
return -1;
}
std::string output_file;
if (vm.count("output-file")) {
output_file = vm["output-file"].as<std::string>();
std::cout << "output-file: " << output_file <<std::endl;
}
else {
std::cerr<<"output-file not set"<<std::endl;
std::cerr << desc << std::endl;
return -1;
}
idmlib::util::IDMAnalyzer* analyzer = new idmlib::util::IDMAnalyzer(idmlib::util::IDMAnalyzerConfig::GetCommonTgConfig(WISEKMA_KNOWLEDGE,"",IZENEJMA_KNOWLEDGE));
std::string working_path;
if (vm.count("working-path")) {
working_path = vm["working-path"].as<std::string>();
std::cout << "working-path: " << working_path <<std::endl;
}
else {
std::cout<<"working-path not set, use default "<<default_working_path<<std::endl;
working_path = default_working_path;
}
try
{
boost::filesystem::remove_all(working_path);
boost::filesystem::create_directories(working_path);
}
catch(std::exception& ex)
{
std::cerr<<"delete "<<working_path<<" error"<<std::endl;
return -1;
}
uint32_t max_doc = 0;
if (vm.count("max-doc")) {
max_doc = vm["max-doc"].as<uint32_t>();
std::cout << "max-doc: " << max_doc <<std::endl;
}
std::string kpe_resource_path = resource_path+"/kpe";
std::string rig_resource_path = resource_path+"/sim/rig";
if( !analyzer->LoadT2SMapFile(kpe_resource_path+"/cs_ct") )
{
return -1;
}
DateRange date_range;
date_range.start = boost::gregorian::from_string("2011-01-14");
date_range.end = boost::gregorian::from_string("2011-04-22");
TemporalKpe* kpe = new TemporalKpe(working_path, analyzer, date_range, rig_resource_path, 200000);
if( !kpe->load(kpe_resource_path) )
{
return -1;
}
for(uint32_t i=0;i<exception_text_list.size();i++)
{
kpe->AddException(exception_text_list[i]);
}
// kpe->set_tracing(izenelib::util::UString("油价", izenelib::util::UString::UTF_8));
uint32_t docid = 0;
for(uint32_t i=0;i<scdfile_list.size();i++)
{
std::string scd_file = scdfile_list[i];
ScdParser scd_parser(encoding);
if(!scd_parser.load(scd_file) )
{
std::cerr<<"load scd file failed."<<std::endl;
return -1;
}
ScdParser::iterator it = scd_parser.begin();
for( ;it!= scd_parser.end();++it )
{
SCDDocPtr doc = (*it);
if(!doc)
{
std::cerr<<"scd parsing error"<<std::endl;
break;
}
std::vector<std::pair<std::string, std::string> >::iterator p;
bool valid = true;
for (p = doc->begin(); p != doc->end(); p++)
{
if( p->second == "null")
{
valid = false;
break;
}
if( p->second == "(null)")
{
valid = false;
break;
}
}
if(!valid) continue;
izenelib::util::UString title;
izenelib::util::UString content;
std::string tdt_str = "1";
// uint32_t time = 0;
boost::gregorian::date date;
for (p = doc->begin(); p != doc->end(); p++)
{
izenelib::util::UString property_name(p->first, izenelib::util::UString::UTF_8);
property_name.toLowerString();
if( property_name == izenelib::util::UString("docid", encoding) )
{
docid++;
if( max_doc>0 && docid > max_doc ) break;
if( docid % 1000 == 0 )
{
std::cout<<"Processing "<<docid<<std::endl;
}
}
else if( property_name == izenelib::util::UString("title", encoding))
{
title = UString(p->second, UString::UTF_8);
}
else if( property_name == izenelib::util::UString("content", encoding))
{
content = UString(p->second, UString::UTF_8);
}
else if( property_name == izenelib::util::UString("date", encoding))
{
std::string date_str = p->second;
// std::cout<<docid<<" "<<date_str<<std::endl;
date = boost::gregorian::from_string(date_str);
// std::cout<<"time:"<<time<<std::endl;
}
else if( property_name == izenelib::util::UString("tdt", encoding))
{
tdt_str = p->second;
// std::cout<<docid<<" "<<date_str<<std::endl;
}
}
if(tdt_str=="1" && date_range.Contains(date))
{
kpe->Insert(date, docid, title, content);
}
}
}
kpe->close();
delete kpe;
delete analyzer;
// boost::filesystem::remove_all(working_path);
return 0;
}
|
{"hexsha": "272c153e09f73329e3a60313ffdaa09a18877a98", "size": 8239, "ext": "cc", "lang": "C++", "max_stars_repo_path": "test/tdt/tdt_scd_tool.cc", "max_stars_repo_name": "izenecloud/idmlib", "max_stars_repo_head_hexsha": "ec6afd44490170a70ef980afa6d21fba8c77ed9d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-11-14T06:37:25.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-14T06:37:25.000Z", "max_issues_repo_path": "test/tdt/tdt_scd_tool.cc", "max_issues_repo_name": "izenecloud/idmlib", "max_issues_repo_head_hexsha": "ec6afd44490170a70ef980afa6d21fba8c77ed9d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/tdt/tdt_scd_tool.cc", "max_forks_repo_name": "izenecloud/idmlib", "max_forks_repo_head_hexsha": "ec6afd44490170a70ef980afa6d21fba8c77ed9d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2015-09-06T05:59:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-17T06:11:24.000Z", "avg_line_length": 30.7425373134, "max_line_length": 163, "alphanum_fraction": 0.5874499332, "num_tokens": 2306}
|
import cv2
import base as bs
import numpy as np
def sobel_filter(img, K_size=3, sigma=1.3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
img = np.expand_dims(img, axis=-1)
H, W, C = img.shape
##padding
pad = K_size // 2
out_v = np.zeros((H + pad * 2, W + pad * 2, C), dtype=np.float)
out_v[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
out_h = np.zeros((H + pad * 2, W + pad * 2, C), dtype=np.float)
out_h[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
##kernel
K_h = np.zeros((K_size, K_size), dtype=np.float)
K_v = np.zeros((K_size, K_size), dtype=np.float)
K_h[:, 0] = -1
K_h[:, 2] = 1
K_h[1, 0] = -2
K_h[1, 2] = 2
K_v[0, :] = -1
K_v[2, :] = 1
K_v[0, 1] = -2
K_v[2, 1] = 2
tmp = out_v.copy()
for y in range(H):
for x in range(W):
for c_i in range(C):
out_v[pad + y, pad + x, c_i] = np.sum(K_v * tmp[y: y + K_size, x: x + K_size, c_i])
out_h[pad + y, pad + x, c_i] = np.sum(K_h * tmp[y: y + K_size, x: x + K_size, c_i])
out_v = np.clip(out_v, 0, 255)
out_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)
out_h = np.clip(out_h, 0, 255)
out_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)
return out_v, out_h
def pool2d(_img, w_size=8):
out = img.copy()
h, w, c = _img.shape
nh = int(h / w_size)
nw = int(w / w_size)
for y in range(nh):
for x in range(nw):
for c_i in range(c):
out[w_size * y:w_size * (y + 1), w_size * x: w_size * (x + 1), c_i] = np.max(_img[w_size * y:w_size * (y + 1), w_size * x: w_size * (x + 1), c_i])
return out
img = cv2.imread("imori.jpg")
img_gray = bs.conv_to_gray(img)
q_img_v, q_img_h = sobel_filter(img_gray)
bs.show_img(q_img_v)
bs.show_img(q_img_h)
|
{"hexsha": "ca074265094ce38327ecdfc044fc031a8e308127", "size": 1889, "ext": "py", "lang": "Python", "max_stars_repo_path": "MyAns/q16.py", "max_stars_repo_name": "mtbkb/Gasyori100knock", "max_stars_repo_head_hexsha": "03b9c85dde2c467403185521620ee9823f1d52e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MyAns/q16.py", "max_issues_repo_name": "mtbkb/Gasyori100knock", "max_issues_repo_head_hexsha": "03b9c85dde2c467403185521620ee9823f1d52e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MyAns/q16.py", "max_forks_repo_name": "mtbkb/Gasyori100knock", "max_forks_repo_head_hexsha": "03b9c85dde2c467403185521620ee9823f1d52e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1940298507, "max_line_length": 162, "alphanum_fraction": 0.5267337215, "include": true, "reason": "import numpy", "num_tokens": 709}
|
[STATEMENT]
lemma fcomp_fconst_on_fid_on[simp]: "fconst_on A c \<circ>\<^sub>\<bullet> fid_on A = fconst_on A c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fconst_on A c \<circ>\<^sub>\<bullet> fid_on A = fconst_on A c
[PROOF STEP]
by auto
|
{"llama_tokens": 106, "file": "CZH_Foundations_czh_sets_CZH_Sets_FBRelations", "length": 1}
|
import numpy as np
p = [[1, 0], [0, 1]]
q = [[1, 2], [3, 4]]
print("original matrix:")
print(p)
print(q)
result = np.outer(p, q)
print("Outer product of the said two vectors:")
print(result)
|
{"hexsha": "9aa3b46aa790b9694d95e85036034e4d49a1ed12", "size": 198, "ext": "py", "lang": "Python", "max_stars_repo_path": "outer.py", "max_stars_repo_name": "Abhi-thecoder/Inner-Outer-Product-of-Vectors", "max_stars_repo_head_hexsha": "d28f25d7a76d5e2bb4ad067c7673420ffe089169", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-18T14:25:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-18T14:25:33.000Z", "max_issues_repo_path": "outer.py", "max_issues_repo_name": "Abhi-thecoder/Inner-Outer-Product-of-Vectors-", "max_issues_repo_head_hexsha": "d28f25d7a76d5e2bb4ad067c7673420ffe089169", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "outer.py", "max_forks_repo_name": "Abhi-thecoder/Inner-Outer-Product-of-Vectors-", "max_forks_repo_head_hexsha": "d28f25d7a76d5e2bb4ad067c7673420ffe089169", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0, "max_line_length": 48, "alphanum_fraction": 0.595959596, "include": true, "reason": "import numpy", "num_tokens": 67}
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pylith/tests/Fault.py
##
## @brief Check fault output from PyLith.
import numpy
import h5py
from spatialdata.units.NondimElasticQuasistatic import NondimElasticQuasistatic
def check_vertex_fields(testcase, filename, mesh, fieldNames):
"""
Check properties.
"""
h5 = h5py.File(filename, "r", driver="sec2")
# Check cells
cells = h5['topology/cells'][:]
(ncells, ncorners) = cells.shape
testcase.assertEqual(mesh['ncells'], ncells)
testcase.assertEqual(mesh['ncorners'], ncorners)
# Check vertices
vertices = h5['geometry/vertices'][:]
(nvertices, spaceDim) = vertices.shape
testcase.assertEqual(mesh['nvertices'], nvertices)
testcase.assertEqual(mesh['spaceDim'], spaceDim)
# Check fault information
tolerance = 1.0e-5
normalizer = NondimElasticQuasistatic()
normalizer._configure()
for name in fieldNames:
valuesE = testcase.calcFaultField(name, vertices)
values = h5['vertex_fields/%s' % name][:]
(nstepsE, nverticesE, dimE) = valuesE.shape
(nsteps, nvertices, dim) = values.shape
testcase.assertEqual(nstepsE, nsteps)
testcase.assertEqual(nverticesE, nvertices)
testcase.assertEqual(dimE, dim)
scale = 1.0
if name == "traction_change" or name == "traction":
scale *= normalizer.pressureScale().value
for istep in xrange(nsteps):
for idim in xrange(dim):
okay = numpy.zeros((nvertices,), dtype=numpy.bool)
maskR = numpy.abs(valuesE[istep,:,idim]) > 0.0
ratio = numpy.abs(1.0 - values[istep,maskR,idim]/valuesE[istep,maskR,idim])
if len(ratio) > 0:
okay[maskR] = ratio < tolerance
maskD = ~maskR
diff = numpy.abs(values[istep,maskD,idim] - valuesE[istep,maskD,idim]) / scale
if len(diff) > 0:
okay[maskD] = diff < tolerance
if numpy.sum(okay) != nvertices:
print "Error in component %d of field '%s' for timestep %d." % (idim, name, istep)
print "Expected values:",valuesE
print "Output values:",values
print "Expected values (not okay): ",valuesE[istep,~okay,idim]
print "Computed values (not okay): ",values[istep,~okay,idim]
print "Coordinates (not okay): ",vertices[~okay,:]
h5.close()
testcase.assertEqual(nvertices, numpy.sum(okay))
h5.close()
return
def check_data(testcase, filename, mesh, fieldNames):
"""
Check properties.
"""
h5 = h5py.File(filename, "r", driver="sec2")
# Check cells
cells = h5['topology/cells'][:]
(ncells, ncorners) = cells.shape
testcase.assertEqual(mesh['ncells'], ncells)
testcase.assertEqual(mesh['ncorners'], ncorners)
# Check vertices
vertices = h5['geometry/vertices'][:]
(nvertices, spaceDim) = vertices.shape
testcase.assertEqual(mesh['nvertices'], nvertices)
testcase.assertEqual(mesh['spaceDim'], spaceDim)
# Check fault information
tolerance = 1.0e-5
for name in fieldNames:
valuesE = testcase.calcFaultInfo(name, data['vertices'])
values = h5['vertex_fields/%s' % name][:]
(nverticesE, dim) = valuesE.shape
values = values.reshape( (nvertices, dim) )
testcase.assertEqual(nverticesE, nvertices)
for i in xrange(dim):
ratio = numpy.abs(1.0 - values[:,i]/valuesE[:,i])
diff = numpy.abs(values[:,i] - valuesE[:,i])
mask = valuesE[:,i] != 0.0
okay = mask*(ratio < tolerance) + ~mask*(diff < tolerance)
if numpy.sum(okay) != nvertices:
print "Error in component %d of field '%s'." % (i, name)
print "Expected values:",valuesE
print "Output values:",values
testcase.assertEqual(nvertices, numpy.sum(okay))
h5.close()
return
# End of file
|
{"hexsha": "aadb48bda250d0860d99054d2ca79f3e6cbac622", "size": 4207, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylith/tests/Fault.py", "max_stars_repo_name": "joegeisz/pylith", "max_stars_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-20T17:18:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T17:18:28.000Z", "max_issues_repo_path": "pylith/tests/Fault.py", "max_issues_repo_name": "joegeisz/pylith", "max_issues_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylith/tests/Fault.py", "max_forks_repo_name": "joegeisz/pylith", "max_forks_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4855072464, "max_line_length": 92, "alphanum_fraction": 0.6382220109, "include": true, "reason": "import numpy", "num_tokens": 1131}
|
import pickle
import pandas as pd
import chardet
import re
import numpy as np
from nltk.stem.cistem import Cistem
from statistics import mean
stemmer = Cistem()
with open('../preprocessing/wordfreq.pkl', 'rb') as f:
dereko = pickle.load(f)
INPUT = "../data/input.xlsx"
#with open(IMPORT_FILE, 'rb') as f:
# encodingdetector = chardet.detect(f.read())
df = pd.read_excel(INPUT)
df['id'] = [str(df['Title'][el]) + ' by ' + str(df['Artist'][el]) for el in range(len(df['Title']))]
EXISTING = 'results.xlsx'
existingdf = pd.read_excel(EXISTING)
def tokenise(text):
text = re.sub('[^a-zA-ZäöüÄÖÜß]'," ", str(text))
tokens = text.lower().split()
tokens_stemmed = [stemmer.stem(el) for el in tokens]
return tokens_stemmed
def score_text(songid):
text = df[df['id'] == songid]['Lyrics'].values[0]
tokens = tokenise(text)
unique_tokens = np.unique(tokens)
values = []
for el in unique_tokens:
try:
value = dereko[el]
except:
value = 0
values.append(value)
return mean(values)
existing_songs = set(existingdf['Song'])
for el in df['id']:
if el not in existing_songs:
score = score_text(el)
existingdf.loc[len(existingdf.index)] = [el, score]
existingdf.to_excel(EXISTING, index = False)
|
{"hexsha": "225934b1ec4a4dbd24acf50a23b570ea00844b20", "size": 1224, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/model.py", "max_stars_repo_name": "moritzlschuler/learngermanwithsongs", "max_stars_repo_head_hexsha": "a7f0e470e8688de42a4c437cc3dc93ea0f1108a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/model.py", "max_issues_repo_name": "moritzlschuler/learngermanwithsongs", "max_issues_repo_head_hexsha": "a7f0e470e8688de42a4c437cc3dc93ea0f1108a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/model.py", "max_forks_repo_name": "moritzlschuler/learngermanwithsongs", "max_forks_repo_head_hexsha": "a7f0e470e8688de42a4c437cc3dc93ea0f1108a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4736842105, "max_line_length": 100, "alphanum_fraction": 0.6862745098, "include": true, "reason": "import numpy", "num_tokens": 351}
|
# Autogenerated wrapper script for YASM_jll for x86_64-w64-mingw32
export vsyasm, yasm, ytasm
using NASM_jll
JLLWrappers.@generate_wrapper_header("YASM")
JLLWrappers.@declare_executable_product(vsyasm)
JLLWrappers.@declare_executable_product(yasm)
JLLWrappers.@declare_executable_product(ytasm)
function __init__()
JLLWrappers.@generate_init_header(NASM_jll)
JLLWrappers.@init_executable_product(
vsyasm,
"bin\\vsyasm.exe",
)
JLLWrappers.@init_executable_product(
yasm,
"bin\\yasm.exe",
)
JLLWrappers.@init_executable_product(
ytasm,
"bin\\ytasm.exe",
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "257866152f55f90e159d19bcbbaf409ad4ef4914", "size": 692, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_stars_repo_name": "JuliaBinaryWrappers/YASM_jll.jl", "max_stars_repo_head_hexsha": "cba1e0ac48aa3406aa50efbd7e5b8f946bc0e42f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_issues_repo_name": "JuliaBinaryWrappers/YASM_jll.jl", "max_issues_repo_head_hexsha": "cba1e0ac48aa3406aa50efbd7e5b8f946bc0e42f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/x86_64-w64-mingw32.jl", "max_forks_repo_name": "JuliaBinaryWrappers/YASM_jll.jl", "max_forks_repo_head_hexsha": "cba1e0ac48aa3406aa50efbd7e5b8f946bc0e42f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7142857143, "max_line_length": 66, "alphanum_fraction": 0.7239884393, "num_tokens": 200}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 6 21:53:21 2017
@author: Gerardo A. Rivera Tello
"""
import numpy as np
import matplotlib.pyplot as plt
#%%
def plot_data(data,cbar=0,save_img=0):
plot,axs = plt.subplots()
raw_data = axs.imshow(data,interpolation="gaussian",cmap='jet')
if cbar == 1:
cbar = plot.colorbar(raw_data)
if save_img == 1:
plt.savefig("diplay_of_bytes.png",dpi=500,bbox_inches='tight')
#%%
num_lines = 23
num_samples = 84
time_interval = 96
#Abro el archivo para extraer los datos
with open('oscar.bin','rb') as in_bin:
#Matriz contenedora de los datos leidos
array = np.zeros([time_interval,num_lines,num_samples])
data = np.fromfile(in_bin,dtype=np.float32)
for t in range(time_interval):
for i in range(num_lines):
for j in range(num_samples):
array[t][abs(i-num_lines+1)][j]=data[(t*num_samples*num_lines+2*t+1)+(i*num_samples)+j]
# array = np.reshape(data[:,1:-1],[time_interval,num_lines*num_samples])
#%%
plot_data(array[0],1)
|
{"hexsha": "99badb6dbea5f582505cfcb570306a411d39b743", "size": 1078, "ext": "py", "lang": "Python", "max_stars_repo_path": "NETCDF scripts/Time Steps of Binary Data File/read_bin.py", "max_stars_repo_name": "DangoMelon0701/PyRemote-Sensing", "max_stars_repo_head_hexsha": "fa12545b89c937baf5f1be39a4b2f4eebf714a9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-18T22:01:20.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-18T22:01:20.000Z", "max_issues_repo_path": "NETCDF scripts/Time Steps of Binary Data File/read_bin.py", "max_issues_repo_name": "DangoMelon0701/PyRemote-Sensing", "max_issues_repo_head_hexsha": "fa12545b89c937baf5f1be39a4b2f4eebf714a9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NETCDF scripts/Time Steps of Binary Data File/read_bin.py", "max_forks_repo_name": "DangoMelon0701/PyRemote-Sensing", "max_forks_repo_head_hexsha": "fa12545b89c937baf5f1be39a4b2f4eebf714a9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3684210526, "max_line_length": 103, "alphanum_fraction": 0.6623376623, "include": true, "reason": "import numpy", "num_tokens": 313}
|
[STATEMENT]
lemma map_le_on_disj_left:
"\<lbrakk> h' \<subseteq>\<^sub>m h ; h\<^sub>0 \<bottom> h\<^sub>1 ; h' = h\<^sub>0 ++ h\<^sub>1 \<rbrakk> \<Longrightarrow> h\<^sub>0 \<subseteq>\<^sub>m h"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>h' \<subseteq>\<^sub>m h; h\<^sub>0 \<bottom> h\<^sub>1; h' = h\<^sub>0 ++ h\<^sub>1\<rbrakk> \<Longrightarrow> h\<^sub>0 \<subseteq>\<^sub>m h
[PROOF STEP]
unfolding map_le_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>a\<in>dom h'. h' a = h a; h\<^sub>0 \<bottom> h\<^sub>1; h' = h\<^sub>0 ++ h\<^sub>1\<rbrakk> \<Longrightarrow> \<forall>a\<in>dom h\<^sub>0. h\<^sub>0 a = h a
[PROOF STEP]
by (rule ballI, erule_tac x=a in ballE, auto simp: map_add_eval_left)+
|
{"llama_tokens": 328, "file": "Separation_Algebra_Map_Extra", "length": 2}
|
#include <iostream>
#include "DebugPlotVisualization.hpp"
#include <QLabel>
#include "qcustomplot.h"
#include <deque>
#include <Eigen/Core>
#include <QAction>
#include <mutex>
using namespace vizkit3d;
struct DebugPlotVisualization::Data {
std::deque<Eigen::Vector2d> data;
std::mutex dataMutex;
QDockWidget* dock;
QCustomPlot* plot;
std::string plotName;
QAction* autoScrollAction;
QTimer timer;
const int maxSamples = 20000; //maximum number of samples that is displayed
const int removeSamples = 2000; //number of samples that is removd if maxSamples is reached
};
DebugPlotVisualization::DebugPlotVisualization()
: p(new Data)
{
p->plot = new QCustomPlot();
p->plot->addGraph();
p->plot->setContextMenuPolicy(Qt::CustomContextMenu);
p->plot->setMinimumHeight(200);
p->dock = new QDockWidget("default name");
p->dock->setWidget(p->plot);
p->autoScrollAction = new QAction("auto scroll", p->plot);
p->autoScrollAction->setCheckable(true);
p->autoScrollAction->setChecked(true);
p->autoScrollAction->setToolTip("Use mouse to zoom and drag if auto scroll is disabled");
connect(p->autoScrollAction, SIGNAL(triggered()), this, SLOT(autoScrollChecked()));
connect(p->plot, SIGNAL(customContextMenuRequested(QPoint)), this, SLOT(contextMenuRequest(QPoint)));
connect(&p->timer, SIGNAL(timeout()), this, SLOT(updateUi()));
p->timer.start(250); //QTimer executes in ui thread
}
void DebugPlotVisualization::autoScrollChecked()
{
setAutoscroll(p->autoScrollAction->isChecked());
}
void DebugPlotVisualization::setAutoscroll(bool enable)
{
if(!enable)
{
p->plot->setRangeDrag(Qt::Horizontal | Qt::Vertical);
p->plot->setRangeZoom(Qt::Horizontal | Qt::Vertical);
}
else
{
p->plot->setRangeDrag(0);
p->plot->setRangeZoom(0);
}
}
void DebugPlotVisualization::contextMenuRequest(QPoint pos)
{
QMenu *menu = new QMenu(p->plot);
// menu->setAttribute(Qt::WA_DeleteOnClose);
menu->addAction(p->autoScrollAction);
menu->popup(p->plot->mapToGlobal(pos));
}
DebugPlotVisualization::~DebugPlotVisualization()
{
//dtor needs to be defined for unique_ptr pimpl idiom to compile
//if dock is not managed by qt, delete it manually
if(p->dock->parent() == nullptr)
delete p->dock;
}
osg::ref_ptr<osg::Node> DebugPlotVisualization::createMainNode()
{
return new osg::Group();
}
void DebugPlotVisualization::updateUi()
{
const bool plotNeedsRedraw = !p->data.empty();
while(!p->data.empty())
{
Eigen::Vector2d dataPoint;
{
std::lock_guard<std::mutex> lock(p->dataMutex);
if(p->data.empty())//some other thread might clear the data while we wait for the lock
break;
dataPoint = p->data.front();
p->data.pop_front();
}
p->plot->graph(0)->addData(dataPoint.x(), dataPoint.y());
if(p->plot->graph(0)->data()->size() > p->maxSamples)
{
//removing data is expensive, therefore we remove bigger batches at once
const double removeKey = (p->plot->graph(0)->data()->begin() + p->removeSamples)->key;
p->plot->graph(0)->removeDataBefore(removeKey);
}
//if auto scroll
if(p->autoScrollAction->isChecked())
{
p->plot->xAxis->setRange(dataPoint.x() - 6, dataPoint.x() + 1);
p->plot->graph(0)->rescaleValueAxis();
}
}
if(plotNeedsRedraw)
{
//invoke to avoid calling repaint from a timer thread
//For some reason this works while emitting a signal does not, no idea why
QMetaObject::invokeMethod(p->plot, "replot", Qt::QueuedConnection);
}
}
void DebugPlotVisualization::updateMainNode(osg::Node* node)
{
p->dock->setWindowTitle(QString(p->plotName.c_str()));
}
void DebugPlotVisualization::clearData()
{
//clear all data points that have not been drawn, yet
{
std::lock_guard<std::mutex> lock(p->dataMutex);
p->data.clear();
}
p->plot->graph(0)->clearData();
QMetaObject::invokeMethod(p->plot, "replot", Qt::QueuedConnection);
}
void DebugPlotVisualization::updateDataIntern(vizkit3dDebugDrawings::PlotDataPoint const& value)
{
{
std::lock_guard<std::mutex> lock(p->dataMutex);
p->data.push_back(value.data);
}
if(p->plotName != value.name)
{
p->plotName = value.name;
setPluginName(QString("Debug Plot: ") + QString(p->plotName.c_str()));
}
}
void DebugPlotVisualization::createDockWidgets()
{
dockWidgets.push_back(p->dock);
}
//Macro that makes this plugin loadable in ruby, this is optional.
VizkitQtPlugin(DebugPlotVisualization)
|
{"hexsha": "9bb876ca8725d85ed8ed074460a69dc8aa3f0d57", "size": 4873, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "viz/DebugPlotVisualization.cpp", "max_stars_repo_name": "pierrewillenbrockdfki/gui-vizkit3d_debug_drawings", "max_stars_repo_head_hexsha": "553b0bac93ef1f410e4b9842e8d9aa7391e5ddab", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "viz/DebugPlotVisualization.cpp", "max_issues_repo_name": "pierrewillenbrockdfki/gui-vizkit3d_debug_drawings", "max_issues_repo_head_hexsha": "553b0bac93ef1f410e4b9842e8d9aa7391e5ddab", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "viz/DebugPlotVisualization.cpp", "max_forks_repo_name": "pierrewillenbrockdfki/gui-vizkit3d_debug_drawings", "max_forks_repo_head_hexsha": "553b0bac93ef1f410e4b9842e8d9aa7391e5ddab", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5310734463, "max_line_length": 105, "alphanum_fraction": 0.6400574595, "num_tokens": 1221}
|
"""
Packing module
==============
:synopsis: Prepares packed spheres for tessellation.
.. moduleauthor:: Pavel Ferkl <pavel.ferkl@gmail.com>
.. moduleauthor:: Mohammad Marvi-Mashhadi <mohammad.marvi@imdea.org>
"""
from __future__ import division, print_function
import struct
import os
import time
import random
import subprocess
import numpy as np
import pandas as pd
from scipy.stats import lognorm
import matplotlib.pyplot as plt
import spack
def simple_packing(diam):
"""Simple and fast algorithm for packing.
Often leads to overlapping spheres. Can lead to infinite loop, thus it
raises Exception after 10 s. Use of this algorithm at this stage is
discouraged.
Args:
diam (ndarray): array of sphere diameters
Returns:
DataFrame: center positions and diameters of spheres
Raises:
Exception: when running for more than 10 s
"""
number_of_cells = len(diam)
rads = diam / 2
rads.sort()
vol = sum((2 * rads)**3)
vol = vol * 1.40
lch = vol**(1.00 / 3.00)
centers = np.zeros((number_of_cells, 3))
finished = False
while not finished:
j = -1
timeout = time.time() + 10
while number_of_cells >= j:
if time.time() > timeout:
raise Exception('Timed out!')
j = j + 1
if j == number_of_cells:
finished = True
break
# pick new coordinates
pick_x = lch * random.random()
pick_y = lch * random.random()
pick_z = lch * random.random()
while (rads[j] < pick_x <= lch - rads[j] and
rads[j] < pick_y <= lch - rads[j] and
rads[j] < pick_z <= lch - rads[j]):
pick_x = lch * random.random()
pick_y = lch * random.random()
pick_z = lch * random.random()
centers[j][0] = pick_x
centers[j][1] = pick_y
centers[j][2] = pick_z
# new sphere must not overlap with already existing sphere
if j > 0:
for i in range(0, j):
if ((((((pick_x - centers[i][0])**2) +
((pick_y - centers[i][1])**2) +
((pick_z - centers[i][2])**2))**0.5) -
(rads[j] + rads[i])) < 0) and i != j:
centers[j][0], centers[j][0], centers[j][0] = 0, 0, 0
j = j - 1
break
dtf = pd.DataFrame(centers, columns=('x', 'y', 'z'))
dtf['d'] = 2 * rads
return dtf
def create_input(npart, domain=1.0):
"""Create input file for packing-generation program.
Function creates ``generation.conf`` file with some default inputs.
Args:
npart (int): number of spheres
domain (float, optional): size of domain
"""
txt = """Particles count: {0}
Packing size: {1} {1} {1}
Generation start: 1
Seed: 341
Steps to write: 1000
Boundaries mode: 1
Contraction rate: 1.328910e-005
""".format(npart, domain)
with open('generation.conf', 'w') as fout:
fout.write(txt)
def make_csd(shape, scale, npart):
"""Create cell size distribution and save it to file.
Log-normal distribution from scipy is used. Creates ``diameters.txt`` file
with sphere diameters.
Args:
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
npart (int): number of spheres
Returns:
ndarray: array of sphere diameters
"""
if shape == 0:
diam = [scale + 0 * x for x in range(npart)]
else:
diam = lognorm.rvs(shape, scale=scale, size=npart)
with open('diameters.txt', 'w') as fout:
for rad in diam:
fout.write('{0}\n'.format(rad))
return diam
def save_csd(fname, diam, shape, scale, show_plot=False):
"""Save cell size distribution plot.
Creates files ``*.Packing_histogram.png`` and ``*.Packing_histogram.pdf``
with cell size distribution histogram and continual probability density
function.
Args:
fname (str): base filename
diam (ndarray): array of sphere diameters
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
show_plot (bool, optional): create window with plot
"""
if shape == 0:
xpos = np.linspace(scale / 2, scale * 2, 100)
else:
xpos = np.linspace(lognorm.ppf(0.01, shape, scale=scale),
lognorm.ppf(0.99, shape, scale=scale), 100)
plt.figure(figsize=(12, 8))
plt.rcParams.update({'font.size': 16})
plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale), lw=3, label='input')
plt.hist(diam, density=True, label='spheres')
plt.grid()
plt.xlabel('Size')
plt.ylabel('Probability density function')
plt.legend()
plt.savefig(fname + 'Packing_histogram.png', dpi=300)
plt.savefig(fname + 'Packing_histogram.pdf')
if show_plot:
plt.show()
def read_results():
"""Reads results of packing algorithm.
Packing results are read from ``packing.nfo`` and ``packing.xyzd`` files.
Returns:
DataFrame: center positions and diameters of spheres
"""
with open("packing.nfo", "r") as fin:
fin.readline()
fin.readline()
por_theory = float(fin.readline().split()[2])
por_final = float(fin.readline().split()[2])
print('Theoretical porosity:', por_theory)
print('Final porosity:', por_final)
data = pd.DataFrame(columns=('x', 'y', 'z', 'd'))
with open("packing.xyzd", "rb") as fin:
btxt = fin.read()
txt = list(struct.unpack("<" + "d" * (len(btxt) // 8), btxt))
data = pd.DataFrame(np.reshape(txt, (-1, 4)),
columns=('x', 'y', 'z', 'd'))
data['d'] = data['d'] * ((1 - por_final) / (1 - por_theory))**(1 / 3)
return data
def render_packing(fname, data, domain=1.0, pixels=1000):
"""Save picture of packed domain.
Uses `spack <https://pyspack.readthedocs.io/en/latest/>`_. Creates
``*Packing.png`` file.
Args:
fname (str): base filename
data (DataFrame): center positions and diameters of spheres
domain (float, optional): size of domain
pixels (int, optional): picture resolution
"""
pack = spack.Packing(data[['x', 'y', 'z']], data['d'], L=domain)
print(pack.contacts())
scene = pack.scene(rot=np.pi / 4, camera_height=0.5,
camera_dist=2.5e1, angle=4, cmap='autumn',
floater_color=None)
scene.render(fname + 'Packing.png', width=pixels,
height=pixels, antialiasing=0.0001)
def generate_structure(flag):
"""Runs the packing algorithm.
``PackingGeneration.exe`` must exist. ``generation.conf`` must exist.
Args:
flag (str): argument to be passed to packing-generation program
"""
if os.path.isfile("packing.nfo"):
os.remove("packing.nfo")
subprocess.Popen(['PackingGeneration.exe', flag]).wait()
def clean_files():
"""Delete unnecessary files."""
flist = [
'contraction_energies.txt',
'diameters.txt',
'generation.conf',
'packing_init.xyzd',
'packing.nfo',
'packing_prev.xyzd',
'packing.xyzd',
]
for fil in flist:
if os.path.exists(fil):
os.remove(fil)
def pack_spheres(fname, shape, scale, number_of_cells, algorithm, maxit,
render, clean):
"""Packs spheres into periodic domain.
Creates file ending ``Packing.csv`` with sphere centers and radii. Simple
model is implemented directly, other algorithms use Vasili Baranov's `code
<https://github.com/VasiliBaranov/packing-generation>`_.
Args:
fname (str): base filename
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
number_of_cells (int): number of spheres
algorithm (str): name of packing algorithm
maxit (int): number of tries for packing algorithm
render (bool): save picture of packing if True
clean (bool): delete redundant files if True
Raises:
Exception: when maximum number of iterations was reached
"""
if algorithm == 'simple':
diam = make_csd(shape, scale, number_of_cells)
data = simple_packing(diam)
else:
create_input(number_of_cells)
for i in range(maxit):
print('Iteration: {}'.format(i + 1))
diam = make_csd(shape, scale, number_of_cells)
generate_structure('-' + algorithm)
if os.path.isfile("packing.nfo"):
break
if not os.path.isfile("packing.nfo"):
raise Exception(
'Packing algorithm failed. ' +
'Try to change number of particles or size distribution.')
data = read_results()
save_csd(fname, diam, shape, scale)
data.to_csv(fname + 'Packing.csv', index=None)
if render:
render_packing(fname, data)
if clean:
clean_files()
|
{"hexsha": "2680220ffc3ae1b3d6c4012806b556af44d6a387", "size": 9560, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/foamgen/packing.py", "max_stars_repo_name": "japaf/foamgen", "max_stars_repo_head_hexsha": "6f456796e79de344eefb21a1ad121fd869f9fd9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-07T12:08:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T12:08:37.000Z", "max_issues_repo_path": "src/foamgen/packing.py", "max_issues_repo_name": "japaf/foamgen", "max_issues_repo_head_hexsha": "6f456796e79de344eefb21a1ad121fd869f9fd9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-06-02T13:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-22T22:35:46.000Z", "max_forks_repo_path": "src/foamgen/packing.py", "max_forks_repo_name": "japaf/foamgen", "max_forks_repo_head_hexsha": "6f456796e79de344eefb21a1ad121fd869f9fd9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-02T07:22:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-02T07:22:02.000Z", "avg_line_length": 34.1428571429, "max_line_length": 79, "alphanum_fraction": 0.5732217573, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2307}
|
import numpy as np
import pandas as pd
#Read in data
def read_files(input_data, holdout_data):
"""Both options can be either df or csv files and are parsed here.
Input:
input_data: string, name of table in database
holdout_data: The holdout data as string filename, df
Return:
holdout_data: dataframe
"""
# Check the type of the input data
if type(input_data) != str:
raise Exception("Need to specify the name of the table that contains the dataset in your database "\
"frame in parameter 'input_data'")
# Now read the holdout data
if (type(holdout_data) == pd.core.frame.DataFrame):
df_holdout = holdout_data
elif (type(holdout_data) == str):
df_holdout = pd.read_csv(holdout_data)
else:
raise Exception("Holdout_data shoule be a dataframe or a directory")
df_holdout.columns = map(str, df_holdout.columns)
return df_holdout
#Check if holdout file is legal
def check_holdout_file(df, treatment_column_name, outcome_column_name):
'''
This function processes the parameters passed to FLAME_db
'''
# Confirm that the treatment column name exists.
if (treatment_column_name not in df.columns):
raise Exception('Invalid input error. Treatment column name does not'\
' exist')
# Confirm that the outcome column name exists.
if (outcome_column_name not in df.columns):
raise Exception('Invalid input error. Outcome column name does not'\
' exist')
# column only has 0s and 1s.
if (set(df[treatment_column_name].unique()) != {0,1}):
raise Exception('Invalid input error. All rows in the treatment '\
'column must have either a 0 or a 1 value.')
return
def check_stops(early_stop_un_c_frac, early_stop_un_t_frac, early_stop_pe, early_stop_pe_frac,
early_stop_iterations = None):
"""Check the parameters passed to DAME/FLAME relating to early stopping"""
# todo: add check for epsilon on FLAME
if ((early_stop_un_t_frac > 1.0) or (early_stop_un_t_frac < 0.0)):
raise Exception('The value provided for the early stopping critera '\
'of proportion of unmatched treatment units needs to '\
'be between 0.0 and 1.0')
if ((early_stop_un_c_frac > 1.0) or (early_stop_un_c_frac < 0.0)):
raise Exception('The value provided for the early stopping critera '\
'of proportion of unmatched control units needs to '\
'be between 0.0 and 1.0')
if (early_stop_pe < 0.0):
raise Exception('The value provided for the early stopping critera '\
'of PE needs to be non-negative ')
if ((early_stop_pe_frac > 1.0) or (early_stop_pe_frac < 0.0)):
raise Exception('The value provided for the early stopping critera of'\
' proportion of PE needs to be between 0.0 and 1.0')
return
#Check all other hyperparameters:
def check_parameters(df,adaptive_weights,weight_array,C, k, ratio, matching_option,verbose,alpha, max_depth,
random_state, missing_data_replace, missing_holdout_replace, missing_holdout_imputations=None):
# Checks on the weight array...if the weight array needs to exist
if (adaptive_weights == False):
# Confirm that weight array has the right number of values in it
# Subtracting 2 because one col is the treatment and one is outcome.
if (len(weight_array) != (len(df.columns)-2)):
raise Exception('Invalid input error. Weight array size not equal'\
' to number of columns in dataframe')
# Confirm that weights in weight vector add to 1.
if (abs(sum(weight_array) - 1.0) >= 0.001):
# I do this weird operation instead of seeing if it equals one
# to avoid floatig point addition errors that can occur.
raise Exception('Invalid input error. Weight array values must '\
'sum to 1.0')
else:
if (adaptive_weights != "ridge" and
adaptive_weights != "decisiontree"):
raise Exception("Invalid input error. The acceptable values for "\
"the adaptive_weights parameter are 'ridge', "\
"'decisiontree'. Additionally, "\
"adaptive-weights may be 'False' along "\
"with a weight array")
if(C < 0.0):
raise Exception('The C, or the hyperparameter to trade-off between'\
' balancing factor and predictive error must be '\
' nonnegative. ')
if k < 0.0 or (not isinstance(k, int)):
raise Exception('Invalid input error. The k must be'\
'a postive integer.')
if(ratio > 1.0 or ratio < 0.0):
raise Exception('Invalid input error. ratio value must '\
'be positive and smaller than 1.0 \n'\
'Recommended 0.01 and please do not adjust it unless necessary ')
if (matching_option not in [0,1,2,3]):
raise Exception('Invalid input error. matching_option value must '\
'be 0, 1, 2 or 3')
if (verbose not in [0,1,2,3]):
raise Exception('Invalid input error. The verbose option must be'\
'the integer 0,1,2 or 3.')
if alpha < 0.0 or not (isinstance(alpha, int) or isinstance(alpha, float)):
raise Exception('Invalid input error. The alpha needs to be '\
'positive for ridge regressions.')
if max_depth < 0.0 or not (isinstance(max_depth, int) or isinstance(max_depth, float)):
raise Exception('Invalid input error. The max_depth must be'\
'a postive integer.')
if (random_state!= None and random_state < 0.0) or not (isinstance(random_state, int) or random_state == None) :
raise Exception('Invalid input error. The random_state must be'\
'a postive integer or None.')
if missing_data_replace not in [0,1,2]:
raise Exception('Invalid input error. missing_data_replace value must '\
'be 0, 1 or 2')
if missing_holdout_replace not in [0,1]:
raise Exception('Invalid input error. missing_holdout_replace value must '\
'be 0, or 1')
# if missing_holdout_imputations <= 0.0 or (not isinstance(missing_holdout_imputations, int)):
# raise Exception('Invalid input error. The missing_holdout_imputations must be'\
# 'a postive integer.')
return
#Check if input file in the database is legal
def check_input_file(df, cur, conn, treatment_column_name, outcome_column_name):
db_name = df
# Confirm that the treatment column name exists in database.
cur.execute('''(SELECT {1} FROM {0})'''.format(db_name,treatment_column_name))
res = np.array(cur.fetchall())
# column only has 0s and 1s.
if (set(np.unique(res)) != {0,1}):
raise Exception('Invalid input error. All rows in the treatment '\
'column must have either a 0 or a 1 value.')
# Confirm that the outcome column name exists in database.
cur.execute('''(SELECT {1} FROM {0})'''.format(db_name,outcome_column_name))
res = np.array(cur.fetchall())
return
def check_missings(db_name, df_holdout, conn, missing_data_replace,
missing_holdout_replace,treatment_column_name, outcome_column_name):
cov_l = df_holdout.columns
cur = conn.cursor()
mice_on_holdout = False
if (missing_data_replace == 0):
cur.execute(''' select count(*) from {1}
where {0}'''.format(' OR '.join([ '{1}.{0} is NULL'.format(v, db_name) for v in cov_l ]),
db_name))
num_null = cur.fetchall()[0][0]
if num_null>0:
print('There is missing data in this dataset. The default missing '\
'data handling is being done, so we are not matching on '\
'any missing values in the matching set')
missing_data_replace = 2
if (missing_data_replace == 1):
cur.execute(''' delete from {1}
where {0}'''.format(' OR '.join([ '{1}.{0} is null'.format(v, db_name) for v in cov_l ]),
db_name))
conn.commit()
if (missing_data_replace == 2):
# so replacing with large unique values will only work if columns
# are in order!!
pass
if (missing_holdout_replace == 0 and
df_holdout.isnull().values.any() == True):
print('There is missing data in this dataset. The default missing '\
'data handling is being done, so we will drop units with missinig data')
missing_holdout_replace = 1
if (missing_holdout_replace == 1):
df_holdout = df_holdout.dropna()
return df_holdout
|
{"hexsha": "f4e28aaf19652a529c63ce56332fe9d75eb72793", "size": 9498, "ext": "py", "lang": "Python", "max_stars_repo_path": "dame_flame/flame_db/checker.py", "max_stars_repo_name": "ALEXLANGLANG/DAME-FLAME-Python-Package", "max_stars_repo_head_hexsha": "5fdcaa71cb3708418326348876b1070fc540c65e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dame_flame/flame_db/checker.py", "max_issues_repo_name": "ALEXLANGLANG/DAME-FLAME-Python-Package", "max_issues_repo_head_hexsha": "5fdcaa71cb3708418326348876b1070fc540c65e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dame_flame/flame_db/checker.py", "max_forks_repo_name": "ALEXLANGLANG/DAME-FLAME-Python-Package", "max_forks_repo_head_hexsha": "5fdcaa71cb3708418326348876b1070fc540c65e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0265486726, "max_line_length": 119, "alphanum_fraction": 0.5881238155, "include": true, "reason": "import numpy", "num_tokens": 2142}
|
# coding=utf8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet Train/Eval module.
"""
import time
import sys
import os
# import glob
import numpy as np
import dataloader
import json
from tqdm import tqdm
from collections import Counter
import densenet
import resnet
from PIL import Image
import torchvision
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tools import parse
from glob import glob
from skimage import measure
import sys
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
reload(sys)
sys.setdefaultencoding('utf8')
import traceback
# from moxing.framework import file
args = parse.args
# anchor大小
args.anchors = [8, 12, 18, 27, 40, 60]
args.stride = 8
args.image_size = [1300,64]
#args.image_size = [64, 64]
datadir = parse.datadir
#datadir = '/home/work/user-job-dir/ocr_densenet/data'
data_dir_obs = 's3://densenet-214/data/dataset'
model_dir='/home/work/user-job-dir/model_15'
model_dir_obs='s3://densenet-214/out/model_15'
###事先执行#####
#print 'datadir is exist ? ', os.path.exists(datadir)
print 'datadir path', args.data_dir
print 'data_dir_obs path', data_dir_obs
print 'current path', os.getcwd()
print '================================================'
print os.listdir(os.getcwd())
# file.copy_parallel(data_dir_obs, args.data_dir)
# file.copy_parallel(model_dir_obs, model_dir)
print 'batch-size:',args.batch_size
print '===================data_dir============================='
# print os.listdir(args.data_dir)
class DenseNet121(nn.Module):
"""Model modified.
The architecture of our model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, out_size):
super(DenseNet121, self).__init__()
self.inplanes = 1024
self.densenet121 = densenet.densenet121(pretrained=False, small=args.small)
num_ftrs = self.densenet121.classifier.in_features
self.classifier_font = nn.Sequential(
# 这里可以用fc做分类
# nn.Linear(num_ftrs, out_size)
# 这里可以用1×1卷积做分类
nn.Conv2d(num_ftrs, out_size, kernel_size=1, bias=False)
)
self.train_params = []
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
# 用于构建Resnet中的4个blocks
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
# 定义数据在层之间的流动顺序
def forward(self, x, phase='train'):
feats = self.densenet121(x) # (32, 1024, 2, 16)
if not args.small:
feats = F.max_pool2d(feats, kernel_size=2, stride=2) # (32, 1024, 1, 8)
out = self.classifier_font(feats) # (32, 1824, 1, 8)
out_size = out.size()
# print out.size()
out = out.view(out.size(0), out.size(1), -1) # (32, 1824, 8)
# print out.size()
if phase == 'train':
out = F.adaptive_max_pool1d(out, output_size=(1)).view(out.size(0), -1) # (32, 1824)
return out
else:
out = out.transpose(1, 2).contiguous()
out = out.view(out_size[0], out_size[2], out_size[3], out_size[1]) # (32, 1, 8, 1824)
return out, feats
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
self.classify_loss = nn.BCELoss()
self.sigmoid = nn.Sigmoid()
self.regress_loss = nn.SmoothL1Loss()
def forward(self, font_output, font_target, weight=None, use_hard_mining=False):
font_output = self.sigmoid(font_output)
font_loss = F.binary_cross_entropy(font_output, font_target, weight)
# hard_mining
if use_hard_mining:
font_output = font_output.view(-1)
font_target = font_target.view(-1)
pos_index = font_target > 0.5
neg_index = font_target == 0
# pos
pos_output = font_output[pos_index]
pos_target = font_target[pos_index]
num_hard_pos = max(len(pos_output) / 4, min(5, len(pos_output)))
if len(pos_output) > 5:
pos_output, pos_target = hard_mining(pos_output, pos_target, num_hard_pos, largest=False)
pos_loss = self.classify_loss(pos_output, pos_target) * 0.5
# neg
num_hard_neg = len(pos_output) * 2
neg_output = font_output[neg_index]
neg_target = font_target[neg_index]
neg_output, neg_target = hard_mining(neg_output, neg_target, num_hard_neg, largest=True)
neg_loss = self.classify_loss(neg_output, neg_target) * 0.5
font_loss += pos_loss + neg_loss
else:
pos_loss, neg_loss = font_loss, font_loss
return [font_loss, pos_loss, neg_loss]
def _forward(self, font_output, font_target, weight, bbox_output=None, bbox_label=None, seg_output=None,
seg_labels=None):
font_output = self.sigmoid(font_output)
font_loss = F.binary_cross_entropy(font_output, font_target, weight)
acc = []
if bbox_output is not None:
# bbox_loss = 0
bbox_output = bbox_output.view((-1, 4))
bbox_label = bbox_label.view((-1, 4))
pos_index = bbox_label[:, -1] >= 0.5
pos_index = pos_index.unsqueeze(1).expand(pos_index.size(0), 4)
neg_index = bbox_label[:, -1] <= -0.5
neg_index = neg_index.unsqueeze(1).expand(neg_index.size(0), 4)
# 正例
pos_label = bbox_label[pos_index].view((-1, 4))
pos_output = bbox_output[pos_index].view((-1, 4))
lx, ly, ld, lc = pos_label[:, 0], pos_label[:, 1], pos_label[:, 2], pos_label[:, 3]
ox, oy, od, oc = pos_output[:, 0], pos_output[:, 1], pos_output[:, 2], pos_output[:, 3]
regress_loss = [
self.regress_loss(ox, lx),
self.regress_loss(oy, ly),
self.regress_loss(od, ld),
]
pc = self.sigmoid(oc)
acc.append((pc >= 0.5).data.cpu().numpy().astype(np.float32).sum())
acc.append(len(pc))
# print pc.size(), lc.size()
classify_loss = self.classify_loss(pc, lc) * 0.5
# 负例
neg_label = bbox_label[neg_index].view((-1, 4))
neg_output = bbox_output[neg_index].view((-1, 4))
lc = neg_label[:, 3]
oc = neg_output[:, 3]
pc = self.sigmoid(oc)
acc.append((pc <= 0.5).data.cpu().numpy().astype(np.float32).sum())
acc.append(len(pc))
# print pc.size(), lc.size()
classify_loss += self.classify_loss(pc, lc + 1) * 0.5
# seg_loss
seg_output = seg_output.view(-1)
seg_labels = seg_labels.view(-1)
pos_index = seg_labels > 0.5
neg_index = seg_labels < 0.5
seg_loss = 0.5 * self.classify_loss(seg_output[pos_index], seg_labels[pos_index]) + \
0.5 * self.classify_loss(seg_output[neg_index], seg_labels[neg_index])
seg_tpr = (seg_output[pos_index] > 0.5).data.cpu().numpy().astype(np.float32).sum() / len(
seg_labels[pos_index])
seg_tnr = (seg_output[neg_index] < 0.5).data.cpu().numpy().astype(np.float32).sum() / len(
seg_labels[neg_index])
# print seg_output[neg_index]
# print seg_labels[neg_index]
else:
return font_loss
if args.model == 'resnet':
loss = font_loss + classify_loss + seg_loss
else:
loss = font_loss + classify_loss + seg_loss
for reg in regress_loss:
loss += reg
# if args.model == 'resnet':
# loss = seg_loss
return [loss, font_loss, seg_loss, classify_loss] + regress_loss + acc + [seg_tpr, seg_tnr]
font_num = font_target.sum(0).data.cpu().numpy()
font_loss = 0
for di in range(font_num.shape[0]):
if font_num[di] > 0:
font_output_i = font_output[:, di]
font_target_i = font_target[:, di]
pos_font_index = font_target_i > 0.5
font_loss += 0.5 * self.classify_loss(font_output_i[pos_font_index], font_target_i[pos_font_index])
neg_font_index = font_target_i < 0.5
if len(font_target_i[neg_font_index]) > 0:
font_loss += 0.5 * self.classify_loss(font_output_i[neg_font_index], font_target_i[neg_font_index])
font_loss = font_loss / (font_num > 0).sum()
return font_loss
# '''
def hard_mining(neg_output, neg_labels, num_hard, largest=True):
num_hard = min(max(num_hard, 10), len(neg_output))
_, idcs = torch.topk(neg_output, min(num_hard, len(neg_output)), largest=largest)
neg_output = torch.index_select(neg_output, 0, idcs)
neg_labels = torch.index_select(neg_labels, 0, idcs)
return neg_output, neg_labels
def save_model(save_dir, phase, name, epoch, f1score, model):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_dir = os.path.join(save_dir, args.model)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_dir = os.path.join(save_dir, phase)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
state_dict = model.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
state_dict_all = {
'state_dict': state_dict,
'epoch': epoch,
'f1score': f1score,
}
saveStr = '{:s}.ckpt'.format(name)
torch.save(state_dict_all, os.path.join(save_dir, saveStr))
# file.copy(os.path.join(save_dir, saveStr), os.path.join(args.save_dir_obs, saveStr))
if 'best' in name and f1score > 0.3:
bestStr = '{:s}_{:s}.ckpt'.format(name, str(epoch))
torch.save(state_dict_all, os.path.join(save_dir, bestStr))
# file.copy(os.path.join(save_dir, bestStr),
# os.path.join(args.save_dir_obs, bestStr))
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def test(model, train_loader, phase='test'):
print '\ntest {:s}_files, epoch: {:d}'.format(phase, 1)
mkdir('../../../intermediate_file/recognize_result')
model.eval()
f1score_list = []
recall_list = []
precision_list = []
word_index_dict = json.load(open(args.word_index_json, 'r'))
index_word_dict = {v: k for k, v in word_index_dict.items()}
# result_file = file.File(datadir + '/data/result/{:d}_{:s}_result.csv'.format(epoch, phase), 'w')
#modify here for single word
# result_file = open(datadir + '/data/result/{:d}_{:s}_result_single_64.csv'.format(1, phase), 'w')
#origin_csv is here
result_file = open('../../../intermediate_file/recognize_result/result.csv', 'w')
result_file.write('name,content\n')
name_f1score_dict = dict()
# 保存densenet生成的feature
# feat_dir = args.data_dir.replace('dataset', 'feats')
# mkdir(feat_dir)
# feat_dir = os.path.join(feat_dir, phase)
# print feat_dir
# mkdir(feat_dir)
names = []
probs_all = []
for i, data in enumerate(train_loader):
if i % 50 == 0:
print('step[{:d}] OK...'.format(i))
name = data[0][0].split('/')[-1].split('.seg')[0]
names.append(name)
images, labels = [Variable(x.cuda(async=True)) for x in data[1:3]]
# print 'images.shape:',images.shape,'len(images.size()):',len(images.size())
if len(images.size()) == 5:
images = images[0]
probs, feats = model(images, 'test')
probs_all.append(probs.data.cpu().numpy().max(2).max(1).max(0))
# pdb.set_trace()
probs_temp=probs.data.cpu().numpy()
#====================================
probs_temp = probs_temp.max(1)
max_preds_line=np.max(probs_temp,axis=2)#max preds among 9115 words
max_preds_index=np.argmax(probs_temp , axis=2)
# print 'max_preds_index.shape',max_preds_index.shape
# print 'max_preds_line.shape',
# print 'max_preds_index:',max_preds_index.shape,'\nmax_preds_line:',max_preds_line
probs_temp=probs_temp*0-float('inf')
# max_preds_index[0,]
for i in range(max_preds_index.shape[1]):
probs_temp[0,i,max_preds_index[0,i]]=max_preds_line[0,i]
preds = probs_temp > 0.0 # (-1, 8, 1824)
# result_file.write(name+',')
result = u''
# last_set = set()
# all_set = set()
# if args.feat:
# # 保存所有的feat
# feats = feats.data.cpu().numpy()
# if i == 0:
# print feats.shape
# np.save(os.path.join(feat_dir, name.replace('.jpg', '.npy')), feats)
# if len(feats) > 1: # feats: [-1, 1024, 1, 8]
# # 多个patch
# new_feats = []
# for i, feat in enumerate(feats):
# if i == 0:
# # 第一个patch,保存前6个
# new_feats.append(feat[:, :, :6])
# elif i == len(feats) - 1:
# # 最后一个patch,保存后6个
# new_feats.append(feat[:, :, 2:])
# else:
# # 保存中间4个
# new_feats.append(feat[:, :, 2:6])
# feats = np.concatenate(new_feats, 2)
# 这种方法用于检测不同区域的同一个字,当同一个字同一个区域出现时,可能检测不到多次
# print 'name',name,'before preds.shape:',preds.shape,'type(preds):',type(preds)
# preds = preds.max(1) # 沿着竖直方向pooling
# print 'max_preds_index:',max_preds_index.shape,'\nmax_preds_line:',max_preds_line
# print preds
# if len(preds) > 1:
# print name
# print 'name',name,'after preds.shape:',preds.shape,'type(preds):',type(preds)
lines_words_indexs=[]
# lines_list=[]
# print 'name',name
for patch_i, patch_pred in enumerate(preds):#patch_pred stand for per image feature map 64*9115
# print 'patch_i:',patch_i
# each_feature_map_words=set()
num=0
for part_i, part_pred in enumerate(patch_pred):#part_i:0-64 part_pred:1*9115
# print 'name',name,'part_pred.shape:',part_pred.shape,'type(part_pred):',type(part_pred)
# print part_i
# new_set = set()
# print result
for idx, p in enumerate(part_pred):#idx:0-9114
if p:
w=''
# w = index_word_dict[idx]
lines_words_indexs.append([part_i,w,idx])
# print part_i,':',w
# new_set.add(w)
# # if word_temp!=w
# # count+=1
# if w not in all_set:
# # 从没见过的字
# all_set.add(w)
# result += w
# # count_list.append(count)
# # count=0
# # elif w!=result[-1]:
# # result += w
# # print 'result[-1]:',result[-1]
# elif w not in last_set:
# # not in last line
# if patch_i == 0: #patch always 0
# # 第一个patch # 上一个部分没有这个字
# result += w
# elif part_i >= preds.shape[1] / 2:
# # 后续patch的后一半,不写 # 上一个部分没有这个字
# result += w
# last_set = new_set
words_total_list=[]
word_list=[]
lines_list=[]
line_list=[]
index_list=[]
indexes_list=[]
neigbour=2
for i,line_word_index in enumerate(lines_words_indexs):
if i==0:
line_list.append(line_word_index[0])
word_list.append(line_word_index[1])
index_list.append(line_word_index[2])
elif (line_word_index[0]-line_list[-1])<neigbour:
line_list.append(line_word_index[0])
word_list.append(line_word_index[1])
index_list.append(line_word_index[2])
elif (line_word_index[0]-line_list[-1])>=neigbour:
lines_list.append(line_list)
words_total_list.append(word_list)
indexes_list.append(index_list)
line_list=[]
word_list=[]
index_list=[]
line_list.append(line_word_index[0])
word_list.append(line_word_index[1])
index_list.append(line_word_index[2])#clustering according the line number
if i==(len(lines_words_indexs)-1):
lines_list.append(line_list)
words_total_list.append(word_list)
indexes_list.append(index_list)
# for i,word in enumerate(words_total_list):#find the most common appear word in each small cluster word list
# c=Counter(word)
# x=c.most_common()
# # print 'x',x
# word=[item[0] for item in x if item[1]==x[0][1]]
# words_total_list[i]=word#
for i,index in enumerate(indexes_list):#find the most common appear word_index in each small cluster index list
c=Counter(index)
x=c.most_common()
# print 'x',x
index=[item[0] for item in x if item[1]==x[0][1]]
indexes_list[i]=index
for i,indexes in enumerate(indexes_list):#find the biggest probability word index in each small cluster index list
if len(indexes)>1:
# print 'lines_list[i]:',lines_list[i]
# print probs_temp.shape,type(probs_temp)
prob_temp=np.max(probs_temp[0,lines_list[i],:],0)#become 1*9105
result_word_index = np.argmax(prob_temp,0)
indexes=[]
indexes.append(result_word_index)
indexes_list[i]=indexes
for index in indexes_list:#map the word index to the word result
result+=index_word_dict[index[0]]
# probs_temp[lines_list[i]]
# c=Counter(index)
# x=c.most_common()
# # print 'x',x
# index=[item[0] for item in x if item[1]==x[0][1]]
# indexes_list[i]=index
# word=[]
# print 'lines_list:',lines_list
# print lines_list
# print json.dumps(words_total_list, encoding="UTF-8", ensure_ascii=False)
# print indexes_list
result = result.replace(u'"', u'')
if u',' in result:
result = '"' + result + '"'
if len(result) == 0:
global_prob = probs.data.cpu().numpy().max(0).max(0).max(0)
max_index = global_prob.argmax()
result = index_word_dict[max_index]
print 'name',name, 'result',result
result_file.write(name + ',' + result + '\n')
# result_file.write('\n')
if phase == 'test':
continue
result_file.close()
# import pandas as pd
# re = pd.read_csv(datadir + '/data/result/{:d}_{:s}_result.csv'.format(epoch, phase))
# re.columns = ['target_file', 'text']
# submit = pd.read_csv(datadir + '/submission.csv')
# submit = pd.merge(submit, re, how='left', on=['target_file'])
# submit = submit.drop(['target_file'], axis=1)
# submit = submit.replace(to_replace='None', value=20)
# submit = submit.fillna('上')
# submit.to_csv(datadir + '/predict.csv', header=True, index=None, encoding='utf-8')
# file.copy(datadir + '/predict.csv', args.data_dir_obs + '/predict.csv')
def get_weight(labels):
labels = labels.data.cpu().numpy()
weights = np.zeros_like(labels)
# weight_false = 1.0 / ((labels<0.5).sum() + 10e-20)
# weight_true = 1.0 / ((labels>0.5).sum() + 10e-20)
weight_false = 1.0 / ((labels < 0.5).sum(0) + 10e-20)
label_true = (labels > 0.5).sum(0)
for i in range(labels.shape[1]):
label_i = labels[:, i]
weight_i = np.ones(labels.shape[0]) * weight_false[i]
# weight_i = np.ones(labels.shape[0]) * weight_false
if label_true[i] > 0:
weight_i[label_i > 0.5] = 1.0 / label_true[i]
weights[:, i] = weight_i
weights *= np.ones_like(labels).sum() / (weights.sum() + 10e-20)
weights[labels < -0.5] = 0
return weights
def train_eval(epoch, model, train_loader, loss, optimizer, best_f1score=0, phase='train'):
print '\n', epoch, phase
if 'train' in phase:
model.train()
else:
model.eval()
loss_list = []
f1score_list = []
recall_list = []
precision_list = []
for i, data in enumerate(train_loader):
images, labels = [Variable(x.cuda(async=True)) for x in data[1:3]]
weights = torch.from_numpy(get_weight(labels)).cuda(async=True)
probs = model(images)
# 训练阶段
if 'train' in phase:
loss_output = loss(probs, labels, weights, args.hard_mining)
try:
optimizer.zero_grad()
loss_output[0].backward()
optimizer.step()
loss_list.append([x.data.cpu().numpy() for x in loss_output])
except:
# pass
traceback.print_exc()
# 计算 f1score, recall, precision
'''
x = probs.data.cpu().numpy()
l = labels.data.cpu().numpy()
print (get_weight(labels) * l).sum()
l = 1 - l
print (get_weight(labels) * l).sum()
print x.max()
print x.min()
print x.mean()
print
# '''
preds = probs.data.cpu().numpy() > 0
labels = labels.data.cpu().numpy()
for pred, label in zip(preds, labels):
pred[label < 0] = -1
if label.sum() < 0.5:
continue
tp = (pred + label == 2).sum()
tn = (pred + label == 0).sum()
fp = (pred - label == 1).sum()
fn = (pred - label == -1).sum()
precision = 1.0 * tp / (tp + fp + 10e-20)
recall = 1.0 * tp / (tp + fn + 10e-20)
f1score = 2. * precision * recall / (precision + recall + 10e-20)
precision_list.append(precision)
recall_list.append(recall)
f1score_list.append(f1score)
if 'train' in phase and i % 50 == 0:
loss_mean = np.array(loss_list).mean(0)
print('step[{:d}] loss: {:3.4f} pos loss: {:3.4f} neg loss: {:3.4f}'.format(i, loss_mean[0],
loss_mean[1],
loss_mean[2]))
# 保存中间结果到 data/middle_result,用于分析
if i == 0:
images = images.data.cpu().numpy() * 128 + 128
if phase == 'pretrain':
bbox_labels = bbox_labels.data.cpu().numpy()
seg_labels = seg_labels.data.cpu().numpy()
seg_output = seg_output.data.cpu().numpy()
for ii in range(len(images)):
middle_dir = os.path.join(args.save_dir, 'middle_result')
if not os.path.exists(middle_dir):
os.mkdir(middle_dir)
middle_dir = os.path.join(middle_dir, phase)
if not os.path.exists(middle_dir):
os.mkdir(middle_dir)
Image.fromarray(images[ii].astype(np.uint8).transpose(1, 2, 0)).save(
os.path.join(middle_dir, str(ii) + '.image.png'))
if phase == 'pretrain':
segi = seg_labels[ii]
_segi = np.array([segi, segi, segi]) * 255
segi = np.zeros([3, _segi.shape[1] * 2, _segi.shape[2] * 2])
for si in range(segi.shape[1]):
for sj in range(segi.shape[2]):
segi[:, si, sj] = _segi[:, si / 2, sj / 2]
Image.fromarray(segi.transpose(1, 2, 0).astype(np.uint8)).save(
os.path.join(middle_dir, str(ii) + '.seg.png'))
segi = seg_output[ii]
_segi = np.array([segi, segi, segi]) * 255
segi = np.zeros([3, _segi.shape[1] * 2, _segi.shape[2] * 2])
for si in range(segi.shape[1]):
for sj in range(segi.shape[2]):
segi[:, si, sj] = _segi[:, si / 2, sj / 2]
Image.fromarray(segi.transpose(1, 2, 0).astype(np.uint8)).save(
os.path.join(middle_dir, str(ii) + '.seg.out.png'))
f1score = np.mean(f1score_list)
print 'f1score', f1score
print 'recall', np.mean(recall_list)
print 'precision', np.mean(precision_list)
if 'train' in phase:
loss_mean = np.array(loss_list).mean(0)
print 'loss: {:3.4f} pos loss: {:3.4f} neg loss: {:3.4f}'.format(loss_mean[0], loss_mean[1], loss_mean[2])
# 保存模型
if ('eval' in phase or 'pretrain' in phase) and best_f1score < 2:
if args.small:
save_dir = os.path.join(args.save_dir, 'models-small')
else:
save_dir = os.path.join(args.save_dir, 'models')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if epoch % 3 == 0:
save_model(save_dir, phase, str(epoch), epoch, f1score, model)
if f1score > best_f1score:
save_model(save_dir, phase, 'best_f1score', epoch, f1score, model)
if args.model == 'resnet':
tpnr = loss[11] + loss[12]
# 这里用 best_f1score 也当tpnr好了,懒得改
if tpnr > best_f1score:
best_f1score = tpnr
save_model(save_dir, phase, 'best_tpnr', epoch, f1score, model)
print 'best tpnr', best_f1score
else:
best_f1score = max(best_f1score, f1score)
if best_f1score < 1:
print '\n\t{:s}\tbest f1score {:3.4f}\n'.format(phase, best_f1score)
return best_f1score
def main():
word_index_dict = json.load(open(args.word_index_json, 'r'))
num_classes = len(word_index_dict)
image_label_dict = json.load(open(args.image_label_json, 'r'))
cudnn.benchmark = True
if args.model == 'densenet':
# 两千多种字符,multi-label分类
model = DenseNet121(num_classes).cuda()
elif args.model == 'resnet':
# resnet主要用于文字区域的segmentation以及object detection操作
model = resnet.ResNet(num_classes=num_classes, args=args).cuda()
else:
return
##优化器
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# model = torch.nn.DataParallel(model).cuda()
loss = Loss().cuda()
if args.resume:
state_dict = torch.load(args.resume)
model.load_state_dict(state_dict['state_dict'])
best_f1score = state_dict['f1score']
start_epoch = state_dict['epoch'] + 1
else:
best_f1score = 0
if args.model == 'resnet':
start_epoch = 100
else:
start_epoch = 1
print 'best_f1score', best_f1score
# 划分数据集
# test_filelist1 = sorted(glob(os.path.join('/media/scut214/file/HLG/Chinese_Recognition', 'crop_test1', '*')))
#data set for single word
# test_filelist1 = sorted(glob(os.path.join('/media/scut214/file/HLG/Chinese_Recognition', 'crop_test1', '*')))
test_filelist1 = sorted(glob(os.path.join('../../../intermediate_file', 'images_to_recognition', '*')))
# test_filelist=os.listdir('/Net/competition/OCR/ocr_densenet/data/dataset/crop')
# trainval_filelist = sorted(glob(os.path.join(args.data_dir, 'train', '*')))
# 两种输入size训练
# 修改为自身数据集的尺寸。
# train_filelist1: 长宽比小于8:1的图片,经过padding后变成 64*512 的输入
# train_filelist2: 长宽比大于8:1的图片,经过padding,crop后变成 64*1024的输入
train_filelist1, train_filelist2 = [], []
print len(test_filelist1)
# 黑名单,这些图片的label是有问题的
black_list = set(json.load(open(args.black_json, 'r'))['black_list'])
image_hw_ratio_dict = json.load(open(args.image_hw_ratio_json, 'r'))
test_filelist=[]
for f in test_filelist1:
image = f.split('/')[-1]
if image in black_list:
continue
# r = image_hw_ratio_dict[image]
# if r == 1 or r==2:
test_filelist.append(f)
# print r
# else:
# train_filelist2.append(f)
train_val_filelist = train_filelist1 + train_filelist2
val_filelist = train_filelist1[-2048:]
train_filelist1 = train_filelist1[:-2048]
train_filelist2 = train_filelist2
image_size = [1300, 64]
print len(test_filelist)
if args.phase in ['test', 'val', 'train_val']:
# 测试输出文字检测结果
test_dataset = dataloader.DataSet(
test_filelist,
image_label_dict,
num_classes,
# transform=train_transform,
args=args,
image_size=image_size,
phase='test')
test_loader = DataLoader(
dataset=test_dataset,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=True)
# train_filelist = train_filelist1[-2048:]
# train_dataset = dataloader.DataSet(
# train_filelist,
# image_label_dict,
# num_classes,
# image_size=image_size,
# args=args,
# phase='test')
# train_loader = DataLoader(
# dataset=train_dataset,
# batch_size=1,
# shuffle=False,
# num_workers=8,
# pin_memory=True)
# val_dataset = dataloader.DataSet(
# val_filelist,
# image_label_dict,
# num_classes,
# image_size=image_size,
# args=args,
# phase='test')
# val_loader = DataLoader(
# dataset=val_dataset,
# batch_size=1,
# shuffle=False,
# num_workers=8,
# pin_memory=True)
# train_val_dataset = dataloader.DataSet(
# train_val_filelist,
# image_label_dict,
# num_classes,
# image_size=image_size,
# args=args,
# phase='test')
# train_val_loader = DataLoader(
# dataset=train_val_dataset,
# batch_size=1,
# shuffle=False,
# num_workers=8,
# pin_memory=True)
if args.phase == 'test':
# test(start_epoch - 1, model, val_loader, 'val')
test(model, test_loader, 'test')
# test(start_epoch - 1, model, train_val_loader, 'train_val')
# elif args.phase == 'val':
# test(start_epoch - 1, model, train_loader, 'train')#valid set
# test(start_epoch - 1, model, val_loader, 'val')#test set
# elif args.phase == 'train_val':
# test(start_epoch - 1, model, train_val_loader, 'train_val')
return
elif args.phase == 'train':
train_dataset1 = dataloader.DataSet(
train_filelist1,
image_label_dict,
num_classes,
image_size=image_size,
args=args,
phase='train')
train_loader1 = DataLoader(
dataset=train_dataset1,
batch_size=args.batch_size,
shuffle=True,
num_workers=8,
pin_memory=True)
# train_dataset2 = dataloader.DataSet(
# train_filelist2,
# image_label_dict,
# num_classes,
# image_size=(1024,64),
# args=args,
# phase='train')
# train_loader2 = DataLoader(
# dataset=train_dataset2,
# batch_size=args.batch_size / 2,
# shuffle=True,
# num_workers=8,
# pin_memory=True)
val_dataset = dataloader.DataSet(
val_filelist,
image_label_dict,
num_classes,
image_size=image_size,
args=args,
phase='val')
val_loader = DataLoader(
dataset=val_dataset,
batch_size=min(8, args.batch_size),
shuffle=False,
num_workers=8,
pin_memory=True)
best_f1score = 0
# eval_mode = 'pretrain-2'
eval_mode = 'eval'
for a in range(start_epoch, args.epochs):
args.epoch = epoch
if eval_mode == 'eval':
if best_f1score > 0.8:
args.lr = 0.0001
if best_f1score > 0.7:
args.hard_mining = 1
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
train_eval(epoch, model, train_loader1, loss, optimizer, 2., 'train-1')
best_f1score = train_eval(epoch, model, val_loader, loss, optimizer, best_f1score,
'eval-{:d}-{:d}'.format(args.batch_size, args.hard_mining))
print 'best_f1score:',best_f1score
continue
if __name__ == '__main__':
print 'eval-{:d}-{:d}'.format(args.batch_size, args.hard_mining)
main()
|
{"hexsha": "2decab061cf948379822a09b7eeafa95921dd29b", "size": 35028, "ext": "py", "lang": "Python", "max_stars_repo_path": "recognition/densenet/code/main.py", "max_stars_repo_name": "HLIG/HUAWEI_OCR2019", "max_stars_repo_head_hexsha": "1070d6291072e0223c2624f686766d0f3065e9c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2019-04-17T07:55:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-02T06:00:04.000Z", "max_issues_repo_path": "recognition/densenet/code/main.py", "max_issues_repo_name": "HLIG/HUAWEI_OCR2019", "max_issues_repo_head_hexsha": "1070d6291072e0223c2624f686766d0f3065e9c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-04-24T03:22:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-18T13:12:38.000Z", "max_forks_repo_path": "recognition/densenet/code/main.py", "max_forks_repo_name": "HLIG/HUAWEI_OCR2019", "max_forks_repo_head_hexsha": "1070d6291072e0223c2624f686766d0f3065e9c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2019-04-17T11:30:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T13:37:02.000Z", "avg_line_length": 38.7477876106, "max_line_length": 122, "alphanum_fraction": 0.5517871417, "include": true, "reason": "import numpy", "num_tokens": 8943}
|
[STATEMENT]
lemma tendsto_dist [tendsto_intros]:
fixes l m :: "'a::metric_space"
assumes f: "(f \<longlongrightarrow> l) F"
and g: "(g \<longlongrightarrow> m) F"
shows "((\<lambda>x. dist (f x) (g x)) \<longlongrightarrow> dist l m) F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. dist (f x) (g x)) \<longlongrightarrow> dist l m) F
[PROOF STEP]
proof (rule tendstoI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>e. 0 < e \<Longrightarrow> \<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
fix e :: real
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>e. 0 < e \<Longrightarrow> \<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
assume "0 < e"
[PROOF STATE]
proof (state)
this:
0 < e
goal (1 subgoal):
1. \<And>e. 0 < e \<Longrightarrow> \<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < e
[PROOF STEP]
have e2: "0 < e/2"
[PROOF STATE]
proof (prove)
using this:
0 < e
goal (1 subgoal):
1. 0 < e / 2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < e / 2
goal (1 subgoal):
1. \<And>e. 0 < e \<Longrightarrow> \<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
from tendstoD [OF f e2] tendstoD [OF g e2]
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in F. dist (f x) l < e / 2
\<forall>\<^sub>F x in F. dist (g x) m < e / 2
[PROOF STEP]
show "eventually (\<lambda>x. dist (dist (f x) (g x)) (dist l m) < e) F"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in F. dist (f x) l < e / 2
\<forall>\<^sub>F x in F. dist (g x) m < e / 2
goal (1 subgoal):
1. \<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
proof (eventually_elim)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>dist (f x) l < e / 2; dist (g x) m < e / 2\<rbrakk> \<Longrightarrow> dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
case (elim x)
[PROOF STATE]
proof (state)
this:
dist (f x) l < e / 2
dist (g x) m < e / 2
goal (1 subgoal):
1. \<And>x. \<lbrakk>dist (f x) l < e / 2; dist (g x) m < e / 2\<rbrakk> \<Longrightarrow> dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
dist (f x) l < e / 2
dist (g x) m < e / 2
[PROOF STEP]
show "dist (dist (f x) (g x)) (dist l m) < e"
[PROOF STATE]
proof (prove)
using this:
dist (f x) l < e / 2
dist (g x) m < e / 2
goal (1 subgoal):
1. dist (dist (f x) (g x)) (dist l m) < e
[PROOF STEP]
unfolding dist_real_def
[PROOF STATE]
proof (prove)
using this:
dist (f x) l < e / 2
dist (g x) m < e / 2
goal (1 subgoal):
1. \<bar>dist (f x) (g x) - dist l m\<bar> < e
[PROOF STEP]
using dist_triangle2 [of "f x" "g x" "l"]
and dist_triangle2 [of "g x" "l" "m"]
and dist_triangle3 [of "l" "m" "f x"]
and dist_triangle [of "f x" "m" "g x"]
[PROOF STATE]
proof (prove)
using this:
dist (f x) l < e / 2
dist (g x) m < e / 2
dist (f x) (g x) \<le> dist (f x) l + dist (g x) l
dist (g x) l \<le> dist (g x) m + dist l m
dist l m \<le> dist (f x) l + dist (f x) m
dist (f x) m \<le> dist (f x) (g x) + dist (g x) m
goal (1 subgoal):
1. \<bar>dist (f x) (g x) - dist l m\<bar> < e
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
dist (dist (f x) (g x)) (dist l m) < e
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in F. dist (dist (f x) (g x)) (dist l m) < e
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1635, "file": null, "length": 17}
|
# coding: utf-8
""" Generate a grid of initial conditions for freqmap'ing """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import sys
# Third-party
from astropy import log as logger
import gary.potential as gp
import matplotlib.pyplot as plt
import numpy as np
# Project
from streammorphology import project_path
import streammorphology.initialconditions as ic
def main(potential_name, E, ic_func, run_name=None, output_path=None, overwrite=False, plot=False, **kwargs):
""" Calls one of the grid-making utility functions to generate a
grid of initial conditions for frequency mapping, and saves the
grid to a file.
"""
# read the potential from the registry
potential_path = os.path.join(project_path, "potentials/{0}.yml".format(potential_name))
potential = gp.load(potential_path)
# create path
if output_path is None:
output_path = os.path.join(project_path, "output")
if run_name is None:
run_name = 'E{:.3f}_{}'.format(E, ic_func.func_name)
path = os.path.join(output_path, 'freqmap', potential_name, run_name)
else:
path = output_path
logger.info("Caching to: {}".format(path))
if not os.path.exists(path):
os.makedirs(path)
# path to initial conditions cache
w0path = os.path.join(path, 'w0.npy')
pot_path = os.path.join(path, 'potential.yml')
if os.path.exists(w0path) and overwrite:
os.remove(w0path)
if not os.path.exists(w0path):
# initial conditions
w0 = ic_func(E=E, potential=potential, **kwargs)
np.save(w0path, w0)
logger.info("Create initial conditions file:\n\t{}".format(w0path))
# save potential
potential.save(pot_path)
else:
w0 = np.load(w0path)
logger.info("Initial conditions file already exists!\n\t{}".format(w0path))
if plot:
fig,ax = plt.subplots(1, 1, figsize=(8,8))
ax.plot(w0[:,0], w0[:,2], marker='.', linestyle='none')
fig.savefig(os.path.join(path, 'w0.png'))
logger.info("Number of initial conditions: {}".format(len(w0)))
if __name__ == '__main__':
from argparse import ArgumentParser
import logging
import inspect
# list of possible potentials
all_potentials = [x.rstrip('.yml') for x in os.listdir(os.path.join(project_path, 'potentials'))]
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-o", "--overwrite", action="store_true", dest="overwrite",
default=False, help="DESTROY. DESTROY. (default = False)")
parser.add_argument("--plot", action="store_true", dest="plot",
default=False, help="Plot dat ish.")
parser.add_argument("-p","--output-path", dest="output_path", default=None,
help="Path to the 'output' directory.")
parser.add_argument("--run-name", dest="run_name", default=None,
help="Name of the run.")
parser.add_argument("-E", "--energy", dest="energy", type=float, required=True,
help="Energy of the orbits.")
parser.add_argument("--potential", dest="potential_name", type=str, required=True,
help="Name of the potential from the potential registry. Can be "
"one of: {}".format(",".join(all_potentials)))
parser.add_argument("--ic-func", dest="ic_func", type=str, required=True,
help="Name of the initial condition function to use. Can be "
"one of: {}".format(",".join([f for f in dir(ic) if 'grid' in f])))
# automagically add arguments for different initial condition grid functions
for fn_name in dir(ic):
if 'grid' not in fn_name:
continue
argspec = inspect.getargspec(getattr(ic,fn_name))
if argspec.defaults is not None:
pad = len(argspec.args) - len(argspec.defaults)
defaults = [None]*pad + list(argspec.defaults)
else:
defaults = [None]*len(argspec.args)
for arg,default in zip(argspec.args,defaults):
if arg in ['E','potential']:
continue
if isinstance(default, list):
parser.add_argument("--{}".format(arg), dest=arg, type=float, nargs='+',
help="[float] Used in initial condition function: {0}".format(fn_name))
else:
typ = type(default).__name__
helpstr = "[{0}] Used in initial condition function: {1}".format(typ, fn_name)
parser.add_argument("--{0}".format(arg), dest=arg,
type=type(default), help=helpstr)
args = parser.parse_args()
# now actually pull out the relevant arguments for the initial condition function
argspec = inspect.getargspec(getattr(ic,args.ic_func))
arg_dict = dict()
for arg in argspec.args:
if arg in ['E','potential']:
continue
arg_dict[arg] = getattr(args, arg)
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
main(potential_name=args.potential_name,
E=args.energy,
ic_func=getattr(ic,args.ic_func),
overwrite=args.overwrite,
output_path=args.output_path,
run_name=args.run_name,
plot=args.plot,
**arg_dict)
sys.exit(0)
|
{"hexsha": "46f5a8330c4251aec53d7975ed1bb8233830a71b", "size": 5946, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/make_grid.py", "max_stars_repo_name": "adrn/StreamMorphology", "max_stars_repo_head_hexsha": "99a2da560b58e6e47259d1cd2f0cc9ba1641424d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/make_grid.py", "max_issues_repo_name": "adrn/StreamMorphology", "max_issues_repo_head_hexsha": "99a2da560b58e6e47259d1cd2f0cc9ba1641424d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:37:06.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-28T18:37:06.000Z", "max_forks_repo_path": "scripts/make_grid.py", "max_forks_repo_name": "adrn/StreamMorphology", "max_forks_repo_head_hexsha": "99a2da560b58e6e47259d1cd2f0cc9ba1641424d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1625, "max_line_length": 109, "alphanum_fraction": 0.6146989573, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1348}
|
using FEM
using Test
@testset "FEM.jl" begin
# Write your tests here.
end
|
{"hexsha": "b33de4686e1f2a22202ebb2f082346bfaa4091e9", "size": 79, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "Andre-Fontenelle/FEM.jl", "max_stars_repo_head_hexsha": "5189e57310cb8c791914b4ec0e3c2d33ff037750", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "Andre-Fontenelle/FEM.jl", "max_issues_repo_head_hexsha": "5189e57310cb8c791914b4ec0e3c2d33ff037750", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "Andre-Fontenelle/FEM.jl", "max_forks_repo_head_hexsha": "5189e57310cb8c791914b4ec0e3c2d33ff037750", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.2857142857, "max_line_length": 28, "alphanum_fraction": 0.6962025316, "num_tokens": 26}
|
# -*- encoding: utf-8 -*-
import numpy as np
import warnings
from sklearn.metrics.classification import type_of_target
from sklearn.base import BaseEstimator
import sklearn.utils
import scipy.sparse
import autosklearn.automl
from autosklearn.metrics import f1_macro, accuracy, r2
from autosklearn.constants import *
from autosklearn.util.backend import create
class AutoMLDecorator(object):
def __init__(self, automl):
self._automl = automl
def fit(self, *args, **kwargs):
self._automl.fit(*args, **kwargs)
def refit(self, X, y):
"""Refit all models found with fit to new data.
Necessary when using cross-validation. During training, auto-sklearn
fits each model k times on the dataset, but does not keep any trained
model and can therefore not be used to predict for new data points.
This methods fits all models found during a call to fit on the data
given. This method may also be used together with holdout to avoid
only using 66% of the training data to fit the final model.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The targets.
Returns
-------
self
"""
return self._automl.refit(X, y)
def fit_ensemble(self, y, task=None, metric=None, precision='32',
dataset_name=None, ensemble_nbest=None,
ensemble_size=None):
return self._automl.fit_ensemble(y, task, metric, precision,
dataset_name, ensemble_nbest,
ensemble_size)
def predict(self, X, batch_size=None, n_jobs=1):
return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
def score(self, X, y):
return self._automl.score(X, y)
def show_models(self):
"""Return a representation of the final ensemble found by auto-sklearn.
Returns
-------
str
"""
return self._automl.show_models()
def get_models_with_weights(self):
"""Return a list of the final ensemble found by auto-sklearn.
Returns
-------
[(weight_1, model_1), ..., (weight_n, model_n)]
"""
return self._automl.get_models_with_weights()
@property
def cv_results_(self):
return self._automl.cv_results_
@property
def trajectory_(self):
return self._automl.trajectory_
@property
def fANOVA_input_(self):
return self._automl.fANOVA_input_
def sprint_statistics(self):
return self._automl.sprint_statistics()
class AutoSklearnEstimator(AutoMLDecorator, BaseEstimator):
def __init__(self,
time_left_for_this_task=3600,
per_run_time_limit=360,
initial_configurations_via_metalearning=25,
ensemble_size=50,
ensemble_nbest=50,
seed=1,
ml_memory_limit=3072,
include_estimators=None,
exclude_estimators=None,
include_preprocessors=None,
exclude_preprocessors=None,
resampling_strategy='holdout',
resampling_strategy_arguments=None,
tmp_folder=None,
output_folder=None,
delete_tmp_folder_after_terminate=True,
delete_output_folder_after_terminate=True,
shared_mode=False,
disable_evaluator_output=False,
configuration_mode='SMAC'):
"""
Parameters
----------
time_left_for_this_task : int, optional (default=3600)
Time limit in seconds for the search of appropriate
models. By increasing this value, *auto-sklearn* has a higher
chance of finding better models.
per_run_time_limit : int, optional (default=360)
Time limit for a single call to the machine learning model.
Model fitting will be terminated if the machine learning
algorithm runs over the time limit. Set this value high enough so
that typical machine learning algorithms can be fit on the
training data.
initial_configurations_via_metalearning : int, optional (default=25)
Initialize the hyperparameter optimization algorithm with this
many configurations which worked well on previously seen
datasets. Disable if the hyperparameter optimization algorithm
should start from scratch.
ensemble_size : int, optional (default=50)
Number of models added to the ensemble built by *Ensemble
selection from libraries of models*. Models are drawn with
replacement.
ensemble_nbest : int, optional (default=50)
Only consider the ``ensemble_nbest`` models when building an
ensemble. Implements `Model Library Pruning` from `Getting the
most out of ensemble selection`.
seed : int, optional (default=1)
ml_memory_limit : int, optional (3072)
Memory limit in MB for the machine learning algorithm.
`auto-sklearn` will stop fitting the machine learning algorithm if
it tries to allocate more than `ml_memory_limit` MB.
include_estimators : list, optional (None)
If None, all possible estimators are used. Otherwise specifies
set of estimators to use.
exclude_estimators : list, optional (None)
If None, all possible estimators are used. Otherwise specifies
set of estimators not to use. Incompatible with include_estimators.
include_preprocessors : list, optional (None)
If None all possible preprocessors are used. Otherwise specifies set
of preprocessors to use.
exclude_preprocessors : list, optional (None)
If None all possible preprocessors are used. Otherwise specifies set
of preprocessors not to use. Incompatible with
include_preprocessors.
resampling_strategy : string, optional ('holdout')
how to to handle overfitting, might need 'resampling_strategy_arguments'
* 'holdout': 67:33 (train:test) split
* 'holdout-iterative-fit': 67:33 (train:test) split, calls iterative
fit where possible
* 'cv': crossvalidation, requires 'folds'
resampling_strategy_arguments : dict, optional if 'holdout' (train_size default=0.67)
Additional arguments for resampling_strategy
``train_size`` should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
* 'holdout': {'train_size': float}
* 'holdout-iterative-fit': {'train_size': float}
* 'cv': {'folds': int}
tmp_folder : string, optional (None)
folder to store configuration output and log files, if ``None``
automatically use ``/tmp/autosklearn_tmp_$pid_$random_number``
output_folder : string, optional (None)
folder to store predictions for optional test set, if ``None``
automatically use ``/tmp/autosklearn_output_$pid_$random_number``
delete_tmp_folder_after_terminate: string, optional (True)
remove tmp_folder, when finished. If tmp_folder is None
tmp_dir will always be deleted
delete_output_folder_after_terminate: bool, optional (True)
remove output_folder, when finished. If output_folder is None
output_dir will always be deleted
shared_mode: bool, optional (False)
Run smac in shared-model-node. This only works if arguments
``tmp_folder`` and ``output_folder`` are given and both
``delete_tmp_folder_after_terminate`` and
``delete_output_folder_after_terminate`` are set to False.
disable_evaluator_output: bool or list, optional (False)
If True, disable model and prediction output. Cannot be used
together with ensemble building. ``predict()`` cannot be used when
setting this True. Can also be used as a list to pass more
fine-grained information on what to save. Allowed elements in the
list are:
* ``'y_optimization'`` : do not save the predictions for the
optimization/validation set, which would later on be used to build
an ensemble.
* ``'model'`` : do not save any model files
configuration_mode : ``SMAC`` or ``ROAR``
Defines the configuration mode as described in the paper
`Sequential Model-Based Optimization for General Algorithm
Configuration <http://aad.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf>`_:
* ``SMAC`` (default): Sequential Model-based Algorithm
Configuration, which is a Bayesian optimization algorithm
* ``ROAR``: Random Online Aggressive Racing, which is basically
random search
Attributes
----------
cv_results\_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
Not all keys returned by scikit-learn are supported yet.
"""
self.time_left_for_this_task = time_left_for_this_task
self.per_run_time_limit = per_run_time_limit
self.initial_configurations_via_metalearning = initial_configurations_via_metalearning
self.ensemble_size = ensemble_size
self.ensemble_nbest = ensemble_nbest
self.seed = seed
self.ml_memory_limit = ml_memory_limit
self.include_estimators = include_estimators
self.exclude_estimators = exclude_estimators
self.include_preprocessors = include_preprocessors
self.exclude_preprocessors = exclude_preprocessors
self.resampling_strategy = resampling_strategy
self.resampling_strategy_arguments = resampling_strategy_arguments
self.tmp_folder = tmp_folder
self.output_folder = output_folder
self.delete_tmp_folder_after_terminate = delete_tmp_folder_after_terminate
self.delete_output_folder_after_terminate = delete_output_folder_after_terminate
self.shared_mode = shared_mode
self.disable_evaluator_output = disable_evaluator_output
self.configuration_mode = configuration_mode
super(AutoSklearnEstimator, self).__init__(None)
def build_automl(self):
if self.shared_mode:
self.delete_output_folder_after_terminate = False
self.delete_tmp_folder_after_terminate = False
if self.tmp_folder is None:
raise ValueError("If shared_mode == True tmp_folder must not "
"be None.")
if self.output_folder is None:
raise ValueError("If shared_mode == True output_folder must "
"not be None.")
backend = create(temporary_directory=self.tmp_folder,
output_directory=self.output_folder,
delete_tmp_folder_after_terminate=self.delete_tmp_folder_after_terminate,
delete_output_folder_after_terminate=self.delete_output_folder_after_terminate)
automl = autosklearn.automl.AutoML(
backend=backend,
time_left_for_this_task=self.time_left_for_this_task,
per_run_time_limit=self.per_run_time_limit,
log_dir=backend.temporary_directory,
initial_configurations_via_metalearning=
self.initial_configurations_via_metalearning,
ensemble_size=self.ensemble_size,
ensemble_nbest=self.ensemble_nbest,
seed=self.seed,
ml_memory_limit=self.ml_memory_limit,
include_estimators=self.include_estimators,
exclude_estimators=self.exclude_estimators,
include_preprocessors=self.include_preprocessors,
exclude_preprocessors=self.exclude_preprocessors,
resampling_strategy=self.resampling_strategy,
resampling_strategy_arguments=self.resampling_strategy_arguments,
delete_tmp_folder_after_terminate=self.delete_tmp_folder_after_terminate,
delete_output_folder_after_terminate=
self.delete_output_folder_after_terminate,
shared_mode=self.shared_mode,
configuration_mode=self.configuration_mode,
disable_evaluator_output=self.disable_evaluator_output)
return automl
def fit(self, *args, **kwargs):
self._automl = self.build_automl()
super(AutoSklearnEstimator, self).fit(*args, **kwargs)
def fit_ensemble(self, y, task=None, metric=None, precision='32',
dataset_name=None, ensemble_nbest=None,
ensemble_size=None):
"""Fit an ensemble to models trained during an optimization process.
All parameters are ``None`` by default. If no other value is given,
the default values which were set in a call to ``fit()`` are used.
Parameters
----------
y : array-like
Target values.
task : int
A constant from the module ``autosklearn.constants``. Determines
the task type (binary classification, multiclass classification,
multilabel classification or regression).
metric : callable, optional
An instance of :class:`autosklearn.metrics.Scorer` as created by
:meth:`autosklearn.metrics.make_scorer`. These are the `Built-in
Metrics`_.
precision : str
Numeric precision used when loading ensemble data. Can be either
``'16'``, ``'32'`` or ``'64'``.
dataset_name : str
Name of the current data set.
ensemble_nbest : int
Determines how many models should be considered from the ensemble
building. This is inspired by a concept called library pruning
introduced in `Getting Most out of Ensemble Selection`.
ensemble_size : int
Size of the ensemble built by `Ensemble Selection`.
Returns
-------
self
"""
if self._automl is None:
self._automl = self.build_automl()
return self._automl.fit_ensemble(y, task, metric, precision,
dataset_name, ensemble_nbest,
ensemble_size)
class AutoSklearnClassifier(AutoSklearnEstimator):
"""
This class implements the classification task.
"""
def build_automl(self):
automl = super(AutoSklearnClassifier, self).build_automl()
return AutoMLClassifier(automl)
def fit(self, X, y,
metric=None,
feat_type=None,
dataset_name=None):
"""Fit *auto-sklearn* to given training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target classes.
metric : callable, optional (default='autosklearn.metrics.accuracy')
An instance of :class:`autosklearn.metrics.Scorer` as created by
:meth:`autosklearn.metrics.make_scorer`. These are the `Built-in
Metrics`_.
feat_type : list, optional (default=None)
List of str of `len(X.shape[1])` describing the attribute type.
Possible types are `Categorical` and `Numerical`. `Categorical`
attributes will be automatically One-Hot encoded. The values
used for a categorical attribute must be integers, obtained for
example by `sklearn.preprocessing.LabelEncoder
<http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html>`_.
dataset_name : str, optional (default=None)
Create nicer output. If None, a string will be determined by the
md5 hash of the dataset.
Returns
-------
self
"""
return super(AutoSklearnClassifier, self).fit(X=X, y=y, metric=metric,
feat_type=feat_type,
dataset_name=dataset_name)
def predict(self, X, batch_size=None, n_jobs=1):
"""Predict classes for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_labels]
The predicted classes.
"""
return super(AutoSklearnClassifier, self).predict(
X, batch_size=batch_size, n_jobs=n_jobs)
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""Predict probabilities of classes for all samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples, n_classes] or [n_samples, n_labels]
The predicted class probabilities.
"""
return self._automl.predict_proba(
X, batch_size=batch_size, n_jobs=n_jobs)
class AutoSklearnRegressor(AutoSklearnEstimator):
"""
This class implements the regression task.
"""
def build_automl(self):
automl = super(AutoSklearnRegressor, self).build_automl()
return AutoMLRegressor(automl)
def fit(self, X, y,
metric=None,
feat_type=None,
dataset_name=None):
"""Fit *autosklearn* to given training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The regression target.
metric : callable, optional (default='autosklearn.metrics.r2')
An instance of :class:`autosklearn.metrics.Scorer` as created by
:meth:`autosklearn.metrics.make_scorer`. These are the `Built-in
Metrics`_.
feat_type : list, optional (default=None)
List of str of `len(X.shape[1])` describing the attribute type.
Possible types are `Categorical` and `Numerical`. `Categorical`
attributes will be automatically One-Hot encoded.
dataset_name : str, optional (default=None)
Create nicer output. If None, a string will be determined by the
md5 hash of the dataset.
Returns
-------
self
"""
# Fit is supposed to be idempotent!
# But not if we use share_mode.
return super(AutoSklearnRegressor, self).fit(X=X, y=y, metric=metric,
feat_type=feat_type,
dataset_name=dataset_name)
def predict(self, X, batch_size=None, n_jobs=1):
"""Predict regression target for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
return super(AutoSklearnRegressor, self).predict(
X, batch_size=batch_size, n_jobs=n_jobs)
class AutoMLClassifier(AutoMLDecorator):
def __init__(self, automl):
self._classes = []
self._n_classes = []
self._n_outputs = 0
super(AutoMLClassifier, self).__init__(automl)
def fit(self, X, y,
metric=None,
loss=None,
feat_type=None,
dataset_name=None):
X = sklearn.utils.check_array(X, accept_sparse="csr",
force_all_finite=False)
y = sklearn.utils.check_array(y, ensure_2d=False)
if scipy.sparse.issparse(X):
X.sort_indices()
y_task = type_of_target(y)
task_mapping = {'multilabel-indicator': MULTILABEL_CLASSIFICATION,
'multiclass': MULTICLASS_CLASSIFICATION,
'binary': BINARY_CLASSIFICATION}
task = task_mapping.get(y_task)
if task is None:
raise ValueError('Cannot work on data of type %s' % y_task)
if metric is None:
if task == MULTILABEL_CLASSIFICATION:
metric = f1_macro
else:
metric = accuracy
y = self._process_target_classes(y)
return self._automl.fit(X, y, task, metric, feat_type, dataset_name)
def fit_ensemble(self, y, task=None, metric=None, precision='32',
dataset_name=None, ensemble_nbest=None,
ensemble_size=None):
self._process_target_classes(y)
return self._automl.fit_ensemble(y, task, metric, precision, dataset_name,
ensemble_nbest, ensemble_size)
def _process_target_classes(self, y):
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
sklearn.utils.DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self._n_outputs = y.shape[1]
y = np.copy(y)
self._classes = []
self._n_classes = []
for k in range(self._n_outputs):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self._classes.append(classes_k)
self._n_classes.append(classes_k.shape[0])
self._n_classes = np.array(self._n_classes, dtype=np.int)
if y.shape[1] == 1:
y = y.flatten()
return y
def predict(self, X, batch_size=None, n_jobs=1):
predicted_probabilities = self._automl.predict(
X, batch_size=batch_size, n_jobs=n_jobs)
if self._n_outputs == 1:
predicted_indexes = np.argmax(predicted_probabilities, axis=1)
predicted_classes = self._classes[0].take(predicted_indexes)
return predicted_classes
else:
predicted_indices = (predicted_probabilities > 0.5).astype(int)
n_samples = predicted_probabilities.shape[0]
predicted_classes = np.zeros((n_samples, self._n_outputs))
for k in range(self._n_outputs):
output_predicted_indexes = predicted_indices[:, k].reshape(-1)
predicted_classes[:, k] = self._classes[k].take(output_predicted_indexes)
return predicted_classes
def predict_proba(self, X, batch_size=None, n_jobs=1):
return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
class AutoMLRegressor(AutoMLDecorator):
def fit(self, X, y,
metric=None,
feat_type=None,
dataset_name=None,
):
if metric is None:
metric = r2
return self._automl.fit(X=X, y=y, task=REGRESSION, metric=metric,
feat_type=feat_type, dataset_name=dataset_name)
|
{"hexsha": "37c6d12eb75d1ee75c2731a0f52a6eb9f18429e1", "size": 24093, "ext": "py", "lang": "Python", "max_stars_repo_path": "autosklearn/estimators.py", "max_stars_repo_name": "jimgoo/auto-sklearn", "max_stars_repo_head_hexsha": "a263efb49f7b7f597963bc1e787105ea7615ea75", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-04T05:33:23.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-04T05:33:23.000Z", "max_issues_repo_path": "autosklearn/estimators.py", "max_issues_repo_name": "jimgoo/auto-sklearn", "max_issues_repo_head_hexsha": "a263efb49f7b7f597963bc1e787105ea7615ea75", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autosklearn/estimators.py", "max_forks_repo_name": "jimgoo/auto-sklearn", "max_forks_repo_head_hexsha": "a263efb49f7b7f597963bc1e787105ea7615ea75", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0015772871, "max_line_length": 105, "alphanum_fraction": 0.616818163, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4925}
|
#pragma once
#include "common.hpp"
#include "sessions.hpp"
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/beast/http/message.hpp>
#include <boost/beast/http/string_body.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/url/url_view.hpp>
#include <string>
#include <string_view>
namespace crow
{
struct Request
{
boost::beast::http::request<boost::beast::http::string_body>& req;
boost::beast::http::fields& fields;
std::string_view url{};
boost::urls::url_view urlView{};
boost::urls::url_view::params_type urlParams{};
bool isSecure{false};
const std::string& body;
boost::asio::io_context* ioService{};
boost::asio::ip::address ipAddress{};
std::shared_ptr<persistent_data::UserSession> session;
std::string userRole{};
Request(
boost::beast::http::request<boost::beast::http::string_body>& reqIn) :
req(reqIn),
fields(reqIn.base()), body(reqIn.body())
{}
boost::beast::http::verb method() const
{
return req.method();
}
std::string_view getHeaderValue(std::string_view key) const
{
return req[key];
}
std::string_view getHeaderValue(boost::beast::http::field key) const
{
return req[key];
}
std::string_view methodString() const
{
return req.method_string();
}
std::string_view target() const
{
return req.target();
}
unsigned version() const
{
return req.version();
}
bool isUpgrade() const
{
return boost::beast::websocket::is_upgrade(req);
}
bool keepAlive() const
{
return req.keep_alive();
}
};
} // namespace crow
|
{"hexsha": "44ed2e30f5269141f5a873019ec5ff23fe7c5b6c", "size": 1728, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "http/http_request.hpp", "max_stars_repo_name": "pachu-nc/bmcweb", "max_stars_repo_head_hexsha": "aab0d90061b1b3ebd15f0976e188d59facb0a956", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-15T13:47:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T13:47:32.000Z", "max_issues_repo_path": "http/http_request.hpp", "max_issues_repo_name": "pachu-nc/bmcweb", "max_issues_repo_head_hexsha": "aab0d90061b1b3ebd15f0976e188d59facb0a956", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-09-30T16:46:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-30T16:46:27.000Z", "max_forks_repo_path": "http/http_request.hpp", "max_forks_repo_name": "pachu-nc/bmcweb", "max_forks_repo_head_hexsha": "aab0d90061b1b3ebd15f0976e188d59facb0a956", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.5714285714, "max_line_length": 78, "alphanum_fraction": 0.6290509259, "num_tokens": 423}
|
# coding: utf-8
# # Exercício - Theo - Marcus Leandro
#
# ### Objetivo
# - Resolver exercícios mencionados no link https://stonepgto.slack.com/archives/CHH394R4Z/p1555332079003900
#
#
# ### Resumo comando das questões
#
# 11. Reajuste de salário baseado em condição e apresentação descritiva da relação de nova e antiga informação
#
# 12. Cálculo descritivo folha de pagamento. Variáveis como desconto e FGTS.
#
# 13. Conversão número em dia da semana
#
# 14. Interpretação e status de avaliação dado duas notas
# ## Questão 11.
# In[2]:
11.
# importando algumas bibliotecas que costumo usar
import pandas as pd
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
#input usuário
input_monthly_payment = int(input("Qual valor de salário você gostaria de saber informações sobre reajuste? R$") )
#inserção do valor salário
monthly_payment = input_monthly_payment
#condição de reajuste
if monthly_payment <= 280:
new_monthly_payment = monthly_payment * 1.2
elif monthly_payment < 700:
new_monthly_payment = monthly_payment * 1.15
elif monthly_payment < 1500:
new_monthly_payment = monthly_payment * 1.1
else:
new_monthly_payment = monthly_payment * 1.05
#variacao_absoluta
monthly_payment_delta = new_monthly_payment - monthly_payment
#variacao percentual
monthly_payment_percentage = (monthly_payment_delta/monthly_payment)
#formatando valor. fonte: https://stackoverflow.com/questions/5306756/how-to-show-percentage-in-python
formated_monthly_payment_percentage = '{:.1%}'.format(monthly_payment_percentage)
#formatação da tabela de visualização
d = {'salario_antes_reajuste': [monthly_payment], 'percentual_aumento': [formated_monthly_payment_percentage], 'valor_aumento': [monthly_payment_delta], 'novo_salario': [new_monthly_payment]}
df = pd.DataFrame(data=d)
df.rename(index={0:'Colaborador'}, inplace=True)
#apresentando informação
df
# ## Questão 12.
# In[3]:
12.
# importando algumas bibliotecas que costumo usar
import pandas as pd
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
#input usuário
input_money_per_hour_payment = int(input("Qual o valor da sua hora? R$") )
amount_hours = int(input("Qual a quantidade de horas trabalhas no mês? "))
#cálculo do salário dado quantidade de horas e valor por hora
salary = input_money_per_hour_payment * amount_hours
#condição de desconto do IR
if salary <= 900:
ir_discount_salary = salary * 1.0
elif salary <= 1500:
ir_discount_salary = salary * 0.05
elif salary <= 2500:
ir_discount_salary = salary * 1.1
else:
ir_discount_salary = salary * 0.2
#condição de desconto INSS
inss_discount_salary = salary * 0.1
#contribuição forçada FGTS
fgts_deduction = salary * 0.11
# total de descontos
total_discounts = ir_discount_salary + inss_discount_salary
#salário líquido
net_salary = salary - total_discounts
# detalhe cálculo salário
detailed_salary = "Salário Bruto: ({} * {})".format(amount_hours, input_money_per_hour_payment)
#formatação da tabela de visualização
d = {detailed_salary: [salary], '(-) IR (5%)': [ir_discount_salary], '(-) INSS (10%)': [inss_discount_salary],
'FGTS (11%)': [fgts_deduction], 'Total de descontos': [total_discounts], 'Salário Líquido': [net_salary]}
df = pd.DataFrame(data=d)
df.rename(index={0:''}, inplace=True)
#apresentando informação. Vou realizar transpose apenas para ficar mais semelhante ao exemplo,
#apesar de preferir as variáveis nas colunas
df.transpose()
# ## Questão 13.
# In[5]:
# importando algumas bibliotecas que costumo usar
import pandas as pd
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
numero_semana = int(input("Qual o numero da semana? "))
if numero_semana == 1:
dia_semana = "Domingo"
elif numero_semana == 2:
dia_semana = "Segunda"
elif numero_semana == 3:
dia_semana = "Terça"
elif numero_semana == 4:
dia_semana = "Quarta"
elif numero_semana == 5:
dia_semana = "Quinta"
elif numero_semana == 6:
dia_semana = "Sexta"
elif numero_semana == 7:
dia_semana = "Sabado"
else:
dia_semana = "valor inválido"
print(dia_semana)
# ## Questão 14.
# In[8]:
# importando algumas bibliotecas que costumo usar
import pandas as pd
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
number_grade_1 = round(float(input("Qual foi sua primeira nota parcial do semestre? ")),2)
number_grade_2 = round(float(input("Qual foi sua segunda nota parcial do semestre? ")),2)
name_just_for_fun = input("Qual o seu nome? ")
average = round((number_grade_1 + number_grade_2) / 2,3)
if average <= 4:
letter_grade = "E"
elif average <= 6:
letter_grade = "D"
elif average <= 7.5:
letter_grade = "C"
elif average <= 9:
letter_grade = "B"
else:
letter_grade = "A"
if letter_grade == "E":
final_status = "REPROVADO"
elif letter_grade == "D":
final_status = "REPROVADO"
else:
final_status = "APROVADO"
d = {"Nota_01": [number_grade_1], 'Nota_02': [number_grade_2], 'Média': [average],
'Conceito': [letter_grade], 'Status_final': [final_status]}
df = pd.DataFrame(data=d)
df.rename(index={0:name_just_for_fun}, inplace=True)
df.transpose()
# ## Comentários das questões
# 11. Um dos dois exercício mais legais. Erro de português 'contraram' ao invés de 'contrataram'
# 12. Talvez o exercício mais interessante. Comando da questão está super confuso nas primeiras 04 linhas. Achei mal escrito nesse primeiro parágrafo.
# 13. Achei chato. Fiquei curioso para saber outras maneiras de se realizar isso, de forma mais sofisticada, com menor quantidade de 'ifs'
# 14. Divertidinha. Mas bem parecido com as primeiras 02 questões.
|
{"hexsha": "74e32619f8e4f670da17e8fed4862597e0c959d4", "size": 5639, "ext": "py", "lang": "Python", "max_stars_repo_path": "exercicios_abril_19/Exercicio Python - Theo - Marcus Leandro 19 abril.py", "max_stars_repo_name": "theocarvalho/aula_python_marcus_leandro", "max_stars_repo_head_hexsha": "119158680e97ae8dc8bbb1a2aa357aabe14e38c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exercicios_abril_19/Exercicio Python - Theo - Marcus Leandro 19 abril.py", "max_issues_repo_name": "theocarvalho/aula_python_marcus_leandro", "max_issues_repo_head_hexsha": "119158680e97ae8dc8bbb1a2aa357aabe14e38c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercicios_abril_19/Exercicio Python - Theo - Marcus Leandro 19 abril.py", "max_forks_repo_name": "theocarvalho/aula_python_marcus_leandro", "max_forks_repo_head_hexsha": "119158680e97ae8dc8bbb1a2aa357aabe14e38c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6421568627, "max_line_length": 191, "alphanum_fraction": 0.7446355737, "include": true, "reason": "import numpy", "num_tokens": 1634}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
import ase.io
from ase import Atoms, Atom
def write_xyz(*args,**kwargs):
"""positions in cartesian (AA) and forces in eV/AA"""
# symbols and positions are required
if 'symbols' in kwargs.keys():
symbols = kwargs['symbols']
else:
raise ValueError('symbols is a required key.')
if 'positions' in kwargs.keys():
positions = kwargs['positions']
else:
raise ValueError('positions is a required key.')
# check number of atoms
natoms = len(symbols)
# implented properties
comment_properties = [\
'description',\
'step',\
'pbc','Lattice',\
'energy','free_energy',\
'stress','virial'\
]
atomic_properties = ['species','pos','forces']
# check args
comment_content = ''
if len(kwargs) > 0:
for key in comment_properties:
if key in kwargs.keys():
value = kwargs[key]
if key in ['energy','free_energy']:
# float
value = float(value)
comment_content += ("{:<s}="+"{:<.4f}"+" ") \
.format(key,value)
elif key in ['Lattice','stress','virial']:
# list of float properties
value = list(np.array(value,dtype=float).ravel())
if len(value) != 9:
raise ValueError(\
'Lattice/stress/virial must have 9 components.')
comment_content += ("{:<s}="+"\""+"{:<.4f} "*len(value)+"\""+\
" ").format(key,*value)
elif key in ['pbc']:
# list of strings
comment_content += ("{:<s}="+"\""+"{:<s} "*len(value)+"\""+" ") \
.format(key,*value)
elif key in ['step']:
value = int(value)
comment_content += ("{:<s}="+"{:<d} "+" ") \
.format(key,value)
elif key in ['description']:
comment_content += ("{:<s}="+"\"{:<s}\""+" ") \
.format(key,value)
#else:
# raise ValueError('Unsupported properties in extended-xyz.')
else:
pass
# write content
content = "{:<d}\n".format(natoms)
if 'forces' in kwargs.keys():
forces = kwargs['forces']
comment_content += 'Properties=species:S:1:pos:R:3:forces:R:3\n'
content += comment_content
for i in range(natoms):
content += ('{:<4s} '+'{:>12.6f} '*6+'\n')\
.format(symbols[i],*list(positions[i]),*list(forces[i]))
else:
comment_content += 'Properties=species:S:1:pos:R:3\n'
content += comment_content
for i in range(natoms):
content += ('{:<4s} '+'{:>12.6f} '*3+'\n')\
.format(symbols[i],*list(positions[i]))
return content
if __name__ == '__main__':
pass
|
{"hexsha": "80e5bb8d4790b2857423af39ba327109b1721689", "size": 3130, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/coreXYZ.py", "max_stars_repo_name": "hsulab/DailyScripts", "max_stars_repo_head_hexsha": "26b03cfb721fd66f39c86df50d2ec5866e651d6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-08T21:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-18T15:12:47.000Z", "max_issues_repo_path": "common/coreXYZ.py", "max_issues_repo_name": "hsulab/DailyScripts", "max_issues_repo_head_hexsha": "26b03cfb721fd66f39c86df50d2ec5866e651d6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/coreXYZ.py", "max_forks_repo_name": "hsulab/DailyScripts", "max_forks_repo_head_hexsha": "26b03cfb721fd66f39c86df50d2ec5866e651d6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6041666667, "max_line_length": 85, "alphanum_fraction": 0.4674121406, "include": true, "reason": "import numpy", "num_tokens": 724}
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import skimage.measure
class General(object):
def __init__(self, filePath_pointCloud_csv, raster_shape):
self.set_pointCloud(
filePath_pointCloud_csv, raster_shape
);
def set_pointCloud(self, filePath_csv, raster_shape):
# Read the feature point information from CSV-file
self.pdPointCloud = pd.read_csv(
filePath_csv, sep=';', index_col=0
);
# Generate a raster with values zero except for the locations of the
# feature points
self.rPointCloud = np.full(
raster_shape, 0, dtype=np.bool_
);
point_xy = self.pdPointCloud.loc[:,['Point_X','Point_Y']].values;
point_xy = point_xy.astype(np.int);
self.rPointCloud[point_xy[:,1], point_xy[:,0]] = 1;
class FromPointToGroupLabel(General):
def __init__(
self, filePath_pointCloud_csv, raster_shape, pixel_resolution_median):
super().__init__(
filePath_pointCloud_csv, raster_shape
);
self.pixel_resolution_median = pixel_resolution_median;
def group_points_in_labels(self, number_iterations=3):
# Apply a binary dilation to the points in the raster
rPointCloud_dil = ndi.binary_dilation(
self.rPointCloud, iterations=number_iterations
);
# Unique label for all groups of points
labels = skimage.measure.label(
rPointCloud_dil, return_num=False
);
# Paste on all original non-zero elements of the raster the label number
self.rPointCloud_labels = np.multiply(self.rPointCloud, labels);
# Add the labels for each non-zero element of the raster to the Pandas DF
npLabel = np.array([]);
pdPoint_xy = self.pdPointCloud.loc[:,['Point_X','Point_Y']];
for pdIx, row in pdPoint_xy.iterrows():
pointLabel = self.rPointCloud_labels[
np.int(row.loc['Point_Y']), np.int(row.loc['Point_X'])
];
npLabel = np.append(npLabel, pointLabel);
self.pdPointCloud.loc[:,'Group_Label'] = npLabel;
def calc_intertidalBarCharacteristics(self):
self.pdPointCloud.loc[:,'Intertidal_Bar_Centroid_X'] = -1;
self.pdPointCloud.loc[:,'Intertidal_Bar_Centroid_Y'] = -1;
self.pdPointCloud.loc[:,'Intertidal_Bar_Orientation_degrees'] = -1;
self.pdPointCloud.loc[:,'Intertidal_Bar_Width_m'] = -1;
regionprops = skimage.measure.regionprops(self.rPointCloud_labels);
for rp in regionprops:
npBarLabel = rp.label;
self.pdPointCloud.loc[
self.pdPointCloud['Group_Label'] == npBarLabel,
'Intertidal_Bar_Centroid_X'
] = np.int(rp.centroid[1]);
self.pdPointCloud.loc[
self.pdPointCloud['Group_Label'] == npBarLabel,
'Intertidal_Bar_Centroid_Y'
] = np.int(rp.centroid[0]);
# For difference in angles: see method apply_beachFilter of class DataAcquiredFromPlatform
bar_orientation = np.pi/2 - rp.orientation;
if bar_orientation < -np.pi/2:
bar_orientation += np.pi;
elif bar_orientation > np.pi/2:
bar_orientation -= np.pi;
self.pdPointCloud.loc[
self.pdPointCloud['Group_Label'] == npBarLabel,
'Intertidal_Bar_Orientation_degrees'
] = np.around(np.degrees(bar_orientation), 1);
self.pdPointCloud.loc[
self.pdPointCloud['Group_Label'] == npBarLabel,
'Intertidal_Bar_Width_m'
] = np.int(rp.major_axis_length * self.pixel_resolution_median);
def get_appendedInformation(self):
return self.pdPointCloud;
class BottomPointToChannel(General):
def __init__(self, filePath_pointCloud_csv, processingObject):
super().__init__(filePath_pointCloud_csv, processingObject);
|
{"hexsha": "dc3e0861ea764b24832b93564c59df4e9247ec9b", "size": 4172, "ext": "py", "lang": "Python", "max_stars_repo_path": "point_analysis/labelling.py", "max_stars_repo_name": "ComteDeLooz/protect", "max_stars_repo_head_hexsha": "d8b8b404315c6ba90cd56c1b394ce24c2118f8ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-07T11:26:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:51:55.000Z", "max_issues_repo_path": "point_analysis/labelling.py", "max_issues_repo_name": "ComteDeLooz/protect", "max_issues_repo_head_hexsha": "d8b8b404315c6ba90cd56c1b394ce24c2118f8ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "point_analysis/labelling.py", "max_forks_repo_name": "ComteDeLooz/protect", "max_forks_repo_head_hexsha": "d8b8b404315c6ba90cd56c1b394ce24c2118f8ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1153846154, "max_line_length": 103, "alphanum_fraction": 0.6136145733, "include": true, "reason": "import numpy,import scipy", "num_tokens": 913}
|
\section{Results}
\label{sec:Results}
In this section we describe the results of our methodologies on the observational and treatment data. We investigate the relations between features and symptoms of the real data provided in the files\footnote{See GitHub} and determine answers to our questions from the data.
\subsection{Observational Data: Task $1_a$}
\label{subsec:1_a_real}
\graphicspath{{pictures/task1a/}}
In this subsection we use the real data provided in the \textit{Observation-feature.csv} file.
%Methodology 1, observational data
Applying methodology\textsubscript{1} to the observational data, we obtain the following correlations between features and $Death$ shown in Figure \textbf{Figure \ref{fig:1a1r}}. We firstly observe that vaccines have relevant negative correlations, while for positive correlations $CovidPositive$ is the major influencer of $Death$.
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a1r.png}
\caption{Correlations between Death and other features for Observational Data.}
\label{fig:1a1r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a2r.png}
\caption{Auto-correlations among features for Observational Data.}
\label{fig:1a2r}
\end{figure}
The auto-correlation matrix in \textbf{Figure \ref{fig:1a2r}} shows high values of correlation between $CovidPositive$ and some symptoms: such as $No\_Taste/Smell$, $Pneumonia$ and $Blood\_Clots$. Hence, similarly to the synthetic data we found that it is necessary to work in a subset of the data containing only the $CovidPositive$ individuals to eliminate the dependence of symptoms on it. As our experiments with synthetic showed, this should allow us to determine the features which are actually predictive of death.
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a3r.png}
\caption{Correlations between Death and selected features for Observational Data.}
\label{fig:1a3r}
\end{figure}
Then we obtain the following correlations, where we can observe that vaccines are much more strongly predictive of survival. Perhaps even more notably, the features most predictive of death are no longer dominated solely by symptoms, but now show several genes which are especially predictive of death, namely $Gene_{16}$, $Gene_{27}$, $Gene_{63}$, $Gene_{77}$ in the top 5 features. The only symptom remaining among the top 5 predictive features was $Pneumonia$.
%Methodology 2, observational data 1a
Following from the useful indications from methodology\textsubscript{1}, we then use methodology\textsubscript{2}, yielding the conditional probability of death given explanatory features.
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a7r.png}
\caption{Conditional Probability of Death given $Feature = 0$.}
\label{fig:1a7r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a8r.png}
\caption{Conditional Probability of Death given $Feature = 1$.}
\label{fig:1a8r}
\end{figure}
\textbf{Figures \ref{fig:1a7r}} and \textbf{\ref{fig:1a8r}} show the conditional probability of death given the features from the dataset. We see in the plots that the probability of death increased by the greatest amount when conditioned on $Pneumonia$, from a mean of approximately $0.013$ to $0.025$. We also see a small increase in the probability of death when conditioned on the gene features, however the range of values when the genes are present versus not present overlap quite heavily. In this regard, we are not able to deduce that these genes affect the probability of death from methodology\textsubscript{2}.
%Methodology 3, observational data 1a
We then apply our final methodology to the observational data to determine our final conclusions with respect to the effect of genes and symptoms on death in the observational data.
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a9r.png}
\caption{Coefficient Values of Logistic Regression observational data, after randomized grid-search 500-fold CV.}
\label{fig:1a9r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1a10r.png}
\caption{95\% Confidence Interval for Logistic Regression Coefficient Values.}
\label{fig:1a10r}
\end{figure}
Following our procedure, the $Logistic$ $regression$ models were trained on the top 10 negatively correlated and top 10 positively correlated features from observational data. When trained with all features, including $CovidPositive$, the logistic regression models were unable to predict $Death$ at any better than $0.5$ (i.e. random guess). When the models were trained on a subset containing only the $CovidPositive$ population without feature selection, the best model had an accuracy of $0.71$. Finally, when using the 10 most negatively and 10 most positively correlated features, the best scoring model had an accuracy of $0.87$. This approach therefore remains the same in our subsequent tasks and models trained for methodology\textsubscript{3}.
The resulting coefficient values are as shown in \ref{fig:1a9r}, with $95\%$ confidence intervals for the logistic regression shown in \textbf{Figure \ref{fig:1a10r}}. This is to say that we are $95\%$ confident that the true value of the coefficient will be within the intervals shown in the figure. From this, we can conclude that if a feature has a lower range of possible values, then it is less predictive of death (or more predictive of death if the range is higher than other features).
Based on these results along with the previous methodologies, we return to our first set of questions:
\begin{itemize}
\item \textit{Can we predict death rate given age, gender, income, genes and comorbidities?}
The logistic regression coefficients and accuracy show that it is possible to train a model which is able to accurately predict death.
\item Our second question (\textit{Which explanatory features are best to predict death?})
is answered by these results as well, showing that the symptoms and comorbidities which are most effective in predicting death are $Gene_{16}$, $Gene_{63}$ and $Gene_{27}$. $Pneumonia$ did not arise in methodology\textsubscript{3} as a top predictor of death, and $Gene_7$ does not appear from methodologies $1$ and $2$, so we conclude that these features are most likely comparatively less important predictors of death (though likely still risk factors).
\end{itemize}
\subsection{Observational Data: Task $1_b, 1_c$}
In this section we apply our methodologies for estimating the efficacy of vaccines and investigate their possible side-effects. The results of methodology 1 are the same as from Subsection \ref{subsec:1_a_real}, but in this case we focus on the fact that each of the vaccines are highly negatively correlated to death (\textbf{Figure \ref{fig:1a3r}}), indicating that they are possibly effective in preventing it.
%Methodology 2, observational 1bc
On the other hand, methodology\textsubscript{2} differs from our previous analysis, as in this case we are modelling the conditional probability of side-effects (including death) from vaccines.
\graphicspath{{pictures/task1b/}}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1b5r.png}
\caption{Conditional Probability of Death given $Feature = 0$.}
\label{fig:1b5r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1b6r.png}
\caption{Conditional Probability of Death given $Feature = 1$.}
\label{fig:1b6r}
\end{figure}
\textbf{Figures \ref{fig:1b5r}} and \textbf{\ref{fig:1b6r}} show the conditional probabilities of side-effects where the patient is unvaccinated and vaccinated, respectively. The figures show that the probability of $Death$ is greatly reduced when conditioned on all of the vaccines, with $Vaccine1$ being the most effective (specifically, reducing the probability of death the most). In this case we also observe that the quantiles for the probability of death given vaccines are greatly below the probability of death without vaccines. Interestingly, while both this methodology and methodology\textsubscript{1} show $Vaccine1$ to have the greatest effect on reducing the probability of death, they differ on which of $Vaccine2$ or $Vaccine3$ is the second most effective in preventing death.
An additional aspect of our analysis with methodology\textsubscript{2} is the estimation of side effects from vaccines. With this methodology we are also able to calculate the conditional probability of symptoms given vaccines. \textbf{Figures \ref{fig:1c3r}} and \textbf{\ref{fig:1c4r}} show that the probability of $Fever$ and $Headache$ symptoms was greatly increased when conditioned on the vaccines. Without vaccines the mean probability of $Headache$ was $0.03$ and $Fever$ was $0.05$. $0.05$ for headache, $0.085$ for Fever. We did not find a similar association with any other side-effects.
The mean probability of $Headache$ symptom increased from approximately $0.03$ to $0.05$ when conditioned on each of the vaccines (all resulted in similar mean values), while the mean probability of $Fever$ increased from $0.05$ to $0.085$. For the probability of $Fever$ and $Headache$ conditioned on each of the vaccines, the quantiles of the probability estimate did not overlap with the probability estimate conditioned on not receiving the vaccine. For this reason we conclude that this is a reasonable indication that the vaccines are associated with $Fever$ and $Headache$ as side effects.
\graphicspath{{pictures/task1c/}}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1c3r.png}
\caption{Conditional Probability of Symptoms given $Vaccines = 0$.}
\label{fig:1c3r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{1c4r.png}
\caption{Conditional Probability of Symptoms given $Vaccines = 1$.}
\label{fig:1c4r}
\end{figure}
%Methodology 3, observational 1bc
Next, turning to methodology\textsubscript{3}, we train $Logistic$ $regression$ models predicting symptoms including death. As with methodology\textsubscript{1}, we refer to the same models described in \ref{subsec:1_a_real} to determine the answer to our second set of questions (Tasks 1b and 1c). The top 3 negative features in the logistic regression model are the three vaccines (\textbf{Figure} \ref{fig:1a9r}), indicating that they are the most predictive of survival. The order of greatest effectiveness can be loosely observed as $Vaccine1$, $Vaccine2$, $Vaccine3$. This Nonetheless, the confidence intervals on the effectiveness of each vaccine (as measured with the coefficient values) broadly overlap (\textbf{Figure \ref{fig:1a10r}}) and all three can be considered "effective" under this measure (and there may be no reason for a patient to, for instance, prefer $Vaccine1$ to $Vaccine3$ in practice).
%Side effects here
In conclusion with this, we answer the questions posed for Tasks 1b and 1c.
\begin{itemize}
\item \textit{Can we predict death rate (efficacy) of a vaccine?}
Yes, our methodologies indicate that all three vaccines are very effective in reducing the chance of death in the Covid-Positive population.
\item \textit{Which vaccine is most effective?}
Our analysis indicates that $Vaccine1$ is the most effective of the three. However, we cannot definitively say whether $Vaccine2$ or $Vaccine3$ is superior, as the results of our methodologies differ on which is more predictive of survival.
\item \textit{Can we predict a specific symptom(s) (side-effect(s)) of a vaccine?}
Through analyzing the conditional probability estimates obtained in methodology\textsubscript{2}, we predict that all three of the vaccines in the data are associated with side-effects of $Headache$ and $Fever$. In the case of features such as blood clots, the conditional probability estimates do not differ sufficiently from the conditional probability of death without the vaccines to conclude that the vaccines may cause them as side effects. Blood clots may be associated with $Vaccine2$ as shown in \ref{fig:1c4r}, but the overlap between this estimate and the baseline is too great to rule out the possibility of this being merely random chance.
\item \textit{Which side-effect(s) each vaccine produce?}
From our analysis it appears that all three vaccines produce $Fever$ and $Headache$ as side-effects, with the unconfirmed possibility of blood clots from $Vaccine3$, as noted above.
\end{itemize}
\subsection{Treatment Data: Task $2$}
\graphicspath{{pictures/task2/}}
In the final subsection we use the real data contained in the files 'treatment\_features.csv', 'treatment\_action.csv' and 'treatment\_outcome.csv' regarding the treatments.
%Methodology 1, treatment data (Task 2)
We again perform methodology\textsubscript{1} in order to obtain the correlations of features in the data. The results shown in \textbf{Figure \ref{fig:2_1r}} are similar to those in the observational data, with the new addition of $Treatment2$ as a variable which is highly negatively correlated with death. Notably, we do not observe $Treatment1$ in the top 10 negatively correlated variables.
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_1r.png}
\caption{Correlations between Death and selected features for Observational Data.}
\label{fig:2_1r}
\end{figure}
% Methodology 2, treatment data (Task 2)
We then proceed with analyzing \textbf{Figures \ref{fig:2_5r_top}} and \textbf{Figures \ref{fig:2_6r_top}} using methodology\textsubscript{2}. The results show that the probability of $Death$ conditioned on $Treatment2$ appears to be lowered, but there is some overlap in the estimates from this method. For this reason, these results may not be as indicative in establishing that $Treatment2$ is effective in preventing $Death$.
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_5r_top.png}
\caption{Conditional Probability of $Death$ given selected $Features = 0$.}
\label{fig:2_5r_top}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_6r_top.png}
\caption{Conditional Probability of $Death$ given selected $Features = 1$.}
\label{fig:2_6r_top}
\end{figure}
We also observe in \textbf{Figures \ref{fig:2_5r_after}} and \textbf{\ref{fig:2_6r_after}} that the probability of symptoms when conditioned on treatments are very similar to the probability when conditioned on the absence of treatment. The one notable feature of these charts is that the probability of $Blood\_clots$ conditioned on $Treatment 1$ is $0$. This is however a very marginal result and so we can not establish any evidence of side-effects from treatments using this method. We note that the amount of total samples in the data was very small, particularly with respect to certain symptoms. This may be a reason that this methodology does not reach a clear conclusion and estimates several probabilities precisely at $0$.
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_5r_after.png}
\caption{Conditional Probability of Symptoms given $Treatments = 0$.}
\label{fig:2_5r_after}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_6r_after.png}
\caption{Conditional Probability of Symptoms given $Treatments = 1$.}
\label{fig:2_6r_after}
\end{figure}
%Methodology 3, treatment data (Task 2)
Our last analysis of the treatment data is performed with methodology\textsubscript{3}. In this case, due to the scarcity of data points, we train the $Logistic$ $regression$ model using bootstrapping to re-sample the data, in order to balance the outcomes for the training process. The resulting best model accuracy is $0.98$ with confidence intervals as in \textbf{Figure \ref{fig:2_12r}}. We observe once again in \textbf{Figure \ref{fig:2_11r}} that the the coefficient for $Treatment1$ is not in the top selected features. On the other hand, $Treatment2$ is once again among the most negatively associated variables with $Death$. Combined with our observation from methodology\textsubscript{1}, this indicates that $Treatment2$ is effective at preventing $Death$, and very likely more effective than $Treatment1$.
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_11r.png}
\caption{Coefficient Values of Logistic Regression, after randomized grid-search 500-fold CV.}
\label{fig:2_11r}
\end{figure}
\begin{figure}[H]
\includegraphics[width=\linewidth]{2_12r.png}
\caption{95\% Confidence Intervals for Logistic Regression Coefficient Values.}
\label{fig:2_12r}
\end{figure}
We are then able to answer the questions we posed for Task 2.
\begin{itemize}
\item \textit{Can we predict death rate given a specific treatment?}
Yes, we are able to train a logistic regression model which is able to predict death among the $CovidPositive$ population with an cross-validated accuracy of $0.98$.
\item \textit{Which treatment is the most effective?}
We estimate that the probability of death for a patient given $Treatment2$ is lower than without treatment, as well as lower than when given $Treatment1$. While $Treatment1$ may still be effective in preventing death (we do not establish that it is not), our analysis indicates that $Treatment2$ is more effective.
\item \textit{Can we predict a precise symptom(s)(side-effect(s)) given a specific treatment?}
We were not able to establish that any side-effects were more likely given either of the treatments. We speculate that this may be due to the low number of samples for each symptom, such that our calculation of conditional probabilities was not able to make an precise estimate approximating the actual outcomes.
\item \textit{Which side-effect(s) does each treatment produce?}
As noted in the previous answer, we cannot establish any side-effects from the treatments, and conversely we also do not establish that the treatments do \textbf{not} cause side effects.
\end{itemize}
|
{"hexsha": "f1233b6839f8d5d16979fb31362fbfe49b7b33df", "size": 17817, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "project1/report/content/results.tex", "max_stars_repo_name": "fabiorodp/IN_STK5000_Adaptive_methods_for_data_based_decision_making", "max_stars_repo_head_hexsha": "f8c049ceed6e3123e8676bcd9b29afaba9bd1f9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project1/report/content/results.tex", "max_issues_repo_name": "fabiorodp/IN_STK5000_Adaptive_methods_for_data_based_decision_making", "max_issues_repo_head_hexsha": "f8c049ceed6e3123e8676bcd9b29afaba9bd1f9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project1/report/content/results.tex", "max_forks_repo_name": "fabiorodp/IN_STK5000_Adaptive_methods_for_data_based_decision_making", "max_forks_repo_head_hexsha": "f8c049ceed6e3123e8676bcd9b29afaba9bd1f9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T14:45:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T14:45:41.000Z", "avg_line_length": 86.0724637681, "max_line_length": 916, "alphanum_fraction": 0.7839142392, "num_tokens": 4377}
|
import os
import cv2
import torch
import numpy as np
import mxnet as mx
import torch.nn.functional as F
import torchvision.transforms as T
# torch.manual_seed(1234)
def get_person_id_category(record):
starting_piece_of_record = record.read_idx(0)
header_in_starting_piece_of_record, _ = mx.recordio.unpack(starting_piece_of_record)
flag_indicating_sample_storing = 0
flag_indicating_record_length = 2
if header_in_starting_piece_of_record.flag == flag_indicating_sample_storing:
keys_of_samples = record.keys
elif header_in_starting_piece_of_record.flag == flag_indicating_record_length:
keys_of_samples = range(1, int(header_in_starting_piece_of_record.label[0]))
else:
pass
category = {}
keys = set(record.keys)
for k in keys_of_samples:
if k in keys:
s = record.read_idx(k)
header, _ = mx.recordio.unpack(s)
if header.label in category:
category[header.label].append(k)
else:
category[header.label] = [k]
return category
transforms = torch.nn.Sequential(
T.RandomHorizontalFlip(p=0.3),
T.ConvertImageDtype(torch.float),
T.Normalize(0.5, 0.5)
)
def idx_to_data(record, idx, resize = None, channel = 'rgb'):
s = record.read_idx(idx)
if channel == 'rgb':
header, img = mx.recordio.unpack_img(s, iscolor = 1)
sample = np.flip(img, axis=2) # flip to change BGR to RGB
elif channel == 'rgbd':
header, buf = mx.recordio.unpack(s)
img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
sample = np.concatenate((np.flip(img[:, :, :3], axis=2), img[:, :, 3:]), axis=2)
elif channel == 'rgbdea':
header, buf = mx.recordio.unpack(s)
_, buf1, buf2 = buf.split(b'\x89PNG')
rgb = cv2.imdecode(np.frombuffer(b'\x89PNG' + buf1, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
dea = cv2.imdecode(np.frombuffer(b'\x89PNG' + buf2, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
sample = np.concatenate((np.flip(rgb, axis=2), dea), axis=2)
sample = transforms(torch.from_numpy(sample.copy().transpose(2, 0, 1)))
if resize != None:
sample = T.Resize(resize)(sample)
return sample, torch.tensor(header.label, dtype=torch.long)
import random
from itertools import chain
class MXFaceDataset(torch.utils.data.Dataset):
def __init__(self, source, resize = None, channel = 'rgb'):
super(MXFaceDataset, self).__init__()
self.record = mx.recordio.MXIndexedRecordIO(os.path.join(source, 'train.idx'),
os.path.join(source, 'train.rec'),
'r')
self.persons = get_person_id_category(self.record)
self.idx_to_data = lambda record, idx: idx_to_data(record, idx, resize, channel)
class MXFaceDatasetConventional(MXFaceDataset):
def __init__(self, source, resize = None, channel = 'rgb'):
super(MXFaceDatasetConventional, self).__init__(source, resize, channel)
self.sample_idx = list(chain(*self.persons.values()))
def __len__(self):
return len(self.sample_idx)
def __getitem__(self, index):
sample, label = self.idx_to_data(self.record, self.sample_idx[index])
return {'images':sample,
'person_ids':label
}
class MXFaceDatasetBalancedIntraInterClusters(MXFaceDataset):
def __init__(self, source, resize = None, channel = 'rgb'):
super(MXFaceDatasetBalancedIntraInterClusters, self).__init__(source, resize, channel)
# random.shuffle(self.persons)
persons_list = list(self.persons.values())
self.upper = list(chain(*persons_list[::2]))
self.lower = list(chain(*persons_list[1::2]))
def __len__(self):
return int(1e6)
def __getitem__(self, index):
same = random.random() > 0.5
if same:
pair = random.sample(random.choice(self.persons), 2)
else:
pair = (random.choice(self.upper), random.choice(self.lower))
sample, label = zip(*[self.idx_to_data(self.record, idx) for idx in pair])
sample = torch.stack(sample)
label = torch.stack(label)
return {'images':sample,
'person_ids':label
}
def collate_paired_data(batch):
batch = {k:torch.cat([b[k] for b in batch], dim = 0) for k in batch[0]}
return batch
class MXFaceDatasetTwin(MXFaceDataset):
def __init__(self, source, resize = None):
super(MXFaceDatasetTwin, self).__init__(source, resize)
# random.shuffle(self.persons)
persons_list = list(self.persons.values())
self.upper = list(chain(*persons_list[::2]))
self.lower = list(chain(*persons_list[1::2]))
def __len__(self):
return int(1e6)
def __getitem__(self, index):
same = random.random() > 0.5
if same:
pair = random.sample(random.choice(self.persons), 2)
else:
pair = (random.choice(self.upper), random.choice(self.lower))
sample, label = zip(*[self.idx_to_data(self.record, idx) for idx in pair])
sample = torch.cat(sample, dim = 0)
label = label[0] == label[1]
return {'images':sample,
'same':label
}
import pickle as pkl
class MXFaceDatasetFromBin(torch.utils.data.Dataset):
def __init__(self, source, dset, resize = None):
with open(os.path.join(source, dset + '.bin'), 'rb') as f:
bins, self.issame_list = pkl.load(f, encoding='bytes')
self.A = []
self.B = []
for a, b in zip(bins[0::2], bins[1::2]):
self.A.append(torch.from_numpy(mx.image.imdecode(a).asnumpy().transpose(2, 0, 1)))
self.B.append(torch.from_numpy(mx.image.imdecode(b).asnumpy().transpose(2, 0, 1)))
self.resize = resize
def __len__(self):
return len(self.A)
def __getitem__(self, index):
a = self.A[index]
b = self.B[index]
if self.resize != None:
a = T.Resize(self.resize)(a)
b = T.Resize(self.resize)(b)
return {'id':index,
'A':(a / 255 - 0.5) * 2,
'B':(b / 255 - 0.5) * 2,
'same':self.issame_list[index]
}
|
{"hexsha": "a8df952e224f329b2e713537e567b7b55c4fb3bf", "size": 5582, "ext": "py", "lang": "Python", "max_stars_repo_path": "facedataset.py", "max_stars_repo_name": "zhangruihan1/robust-face-recognition", "max_stars_repo_head_hexsha": "a7a03c7d260768fed2dfbe5a3af8dd65d6839ca5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "facedataset.py", "max_issues_repo_name": "zhangruihan1/robust-face-recognition", "max_issues_repo_head_hexsha": "a7a03c7d260768fed2dfbe5a3af8dd65d6839ca5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "facedataset.py", "max_forks_repo_name": "zhangruihan1/robust-face-recognition", "max_forks_repo_head_hexsha": "a7a03c7d260768fed2dfbe5a3af8dd65d6839ca5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6703296703, "max_line_length": 92, "alphanum_fraction": 0.7001074884, "include": true, "reason": "import numpy", "num_tokens": 1608}
|
#!/usr/bin/env python
import sys
import argparse
from astropy.io import fits
header_dict={'PROPID':'50A', 'PROPOSER':'20A', 'OBJECT':'100A', 'RA':'12A', 'DEC':'12A', 'EPOCH':'E', 'EQUINOX':'E', 'DATE-OBS':'10A', 'UTC-OBS':'12A', 'TIME-OBS':'12A', 'EXPTIME':'D', 'OBSMODE':'20A', 'DETMODE':'20A', 'CCDTYPE':'8A', 'NCCDS':'I', 'CCDSUM':'5A', 'GAINSET':'6A', 'ROSPEED':'4A', 'INSTRUME':'8A', 'TELHA':'11A', 'TELRA':'11A', 'TELDEC':'12A', 'TELPA':'E', 'TELAZ':'E', 'TELALT':'E', 'TRKX':'E', 'TRKY':'E', 'TRKZ':'E', 'TRKPHI':'E', 'TRKTHETA':'E', 'TRKRHO':'E', 'COLPHI':'E', 'COLTHETA':'E', 'TELTEM':'E', 'PAYLTEM':'E', 'AMPTEM':'E', 'DETSWV':'16A', 'BLOCKID':'E', 'BVISITID':'E'}
rss_dict = {'FILTER':'8A', 'LAMPID':'8A', 'CALFILT':'8A', 'CALND':'E', 'PELLICLE':'8A', 'INSTPORT':'8A', 'CF-STATE':'20A', 'SM-STATE':'20A', 'SM-STA':'8A', 'SM-STEPS':'J', 'SM-VOLTS':'E', 'SM-STA-S':'E', 'SM-STA-V':'E', 'MASKID':'16A', 'MASKTYP':'16A', 'WP-STATE':'20A', 'HWP-CMD':'16A', 'HW-STEPS':'J', 'HWP-STA':'E', 'QWP-CMD':'16A', 'QW-STEPS':'J', 'QWP-STA':'E', 'QWP-ANG':'E', 'HWP-ANG':'E', 'SH-STATE':'20A', 'FO-STATE':'20A', 'FO-POS':'E', 'FO-VOLTS':'E', 'FO-POS-S':'E', 'FO-POS-V':'E', 'GR-STATE':'20A', 'GR-STA':'10A', 'GR-ANGLE':'E', 'GM-STEPS':'J', 'GM-VOLTS':'E', 'GR-STA-S':'E', 'GR-STA-V':'E', 'GR-STEPS':'J', 'GRATING':'8A', 'GRTILT':'E', 'BS-STATE':'24A', 'FI-STATE':'20A', 'FI-STA':'7A', 'FM-STEPS':'J', 'FM-VOLTS':'E', 'FM-STA-S':'E', 'FM-STA-V':'E', 'AR-STATE':'24A', 'AR-STA':'16A', 'CAMANG':'E', 'AR-STA-S':'E', 'AR-ANGLE':'E', 'PROC':'20A', 'PCS-VER':'4A', 'WPPATERN':'20A','CCDTEM':'D', 'DEWTEM':'E', 'CENTEM':'E'}
hrs_dict = {'DETSIZE':'17A', 'DETNAM':'10A', 'DETSER':'10A', 'I2STAGE':'10A', 'EXP-TOT':'E', 'EXP-MEAN':'E', 'EXP-MID':'E', 'NODSHUFF':'E', 'NODPER':'E' , 'NODCOUNT':'E', 'PRE-DEW':'E' , 'PRE-VAC':'E' , 'FOC-BMIR':'E', 'FOC-RMIR':'E', 'TEM-AIR':'E' , 'TEM-VAC':'E' , 'TEM-RMIR':'E', 'TEM-COLL':'E', 'TEM-RCAM':'E', 'TEM-BCAM':'E', 'TEM-ECH':'E' , 'CCDTEMP':'E', 'TEM-OB':'E' , 'TEM-IOD':'E'}
scam_dict = {'FILPOS':'I', 'FILTER':'8A', 'CCDTEM':'D', 'DEWTEM':'E', 'CENTEM':'E'}
def create_header_dict_from_list(instrument):
"""From a list of header keywords and formats, create the dictionary of keywords
Parameters
----------
instrument: str
Name of the instrument
Returns
-------
fits_header_dict: dict
Dictionary of keywords
"""
fits_header_dict=header_dict
if instrument == 'RSS':
fits_header_dict.update(rss_dict)
if instrument == 'HRS':
fits_header_dict.update(hrs_dict)
if instrument == 'SCAM':
fits_header_dict.update(scam_dict)
return fits_header_dict
def create_header_dict_from_sdb(instr, sdb):
"""
"""
def fits_header_check(image, fits_header_dict=None, missing=False):
"""Check the header values in the image
This task will check the fits header values in the image
and confirm that all header entries are present and that
they have an appropriate value
Parameters
----------
image: str
Name of an input image
fits_header_dict: None, dict, or sdb_mysql
If None, fits_header_dict will be created from the header list. If an sdb_mysql instance,
it will be created from the sdb for the instrument. If a dictionary, it will use that
dictonary
missing: boolean
If True, it will report on keywords in the FITS file but not in the list.
Returns
-------
fits_header_check: dict
A dictionary of all FITS keywords that are missing or incorrect.
It will return an empty dictionary if everything is correct
"""
# open the file
hdu = fits.open(image)
# determine the instrument
instrument = hdu[0].header['INSTRUME']
if instrument not in ['RSS', 'SALTICAM', 'HRS']:
raise TypeError('{} is not for a SALT instrument or does not have an appropriate instrument keyword'.format(instrument))
# create the fits_header_dict to compare with
if fits_header_dict is None:
fits_header_dict = create_header_dict_from_list(instrument)
#elif isinstance(fits_header_dict, dict):
#pass
#elif isinstance(fits_header_dict, object):
# pass
else:
raise TypeError('{} is not None, dict, or sdb instance'.format(fits_header_dict))
# check for any missing or incomplete headers
missing_list=[]
empty_list=[]
for key in fits_header_dict:
if key in hdu[0].header:
value = hdu[0].header[key]
if not value:
if fits_header_dict[key] not in ["J", "E", "D", "I"]: empty_list.append(key)
else:
missing_list.append(key)
# check instrument specific keywords that have to be specified
wrong_list=[]
#if hdu[0].header['CCDTYPE'] Idu[0].header['RA']
if instrument == 'RSS':
# check LAMPID
if hdu[0].header['CCDTYPE'] == 'ARC' and hdu[0].header['LAMPID'].strip()=='NONE':
wrong_list.append('LAMPID')
if hdu[0].header['CCDTYPE'] == 'FLAT' and hdu[0].header['LAMPID'].strip()=='NONE':
wrong_list.append('LAMPID')
if instrument == 'HRS':
if hdu[0].header['OBJECT'] == 'Bias' and hdu[0].header['OBSTYPE']!='Bias':
wrong_list.append('OBSTYPE')
# check for extension keywords
# if missing, check for any headers not in the list
absent_list=[]
if missing:
for key in hdu[0].header:
if key not in fits_header_dict:
absent_list.append(key)
hdu.close()
if missing_list or empty_list or absent_list or wrong_list:
return missing_list, empty_list, wrong_list, absent_list
return
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Check SALT FITS Header')
parser.add_argument('infile', help='SALT HRS image')
parser.add_argument('-m', dest='missing', default=False, action='store_true', help='Warn about missing keywords')
parser.add_argument('-d', dest='database', default=False, action='store_true', help='Use database keywords')
args = parser.parse_args()
infile = args.infile
fits_header_dict = None
if args.database:
fits_header_dict='sdb'
results = fits_header_check(infile, fits_header_dict=fits_header_dict, missing=args.missing)
if results==None:
exit()
# print out results
missing, empty, wrong, absent = results
hdu = fits.open(infile)
print('{} {}'.format(infile, hdu[0].header['OBJECT']))
if missing: print("Keywords that are missing: {}\n".format(missing))
if empty: print("Keywords that are empty: {}\n".format(empty))
if wrong: print("Keywords that are wrong: {}\n".format(wrong))
hdu.close()
|
{"hexsha": "0be80480aa8fe4b6fc058c9354bae77f4cfaef35", "size": 6729, "ext": "py", "lang": "Python", "max_stars_repo_path": "plugins/fitsheadercheck.py", "max_stars_repo_name": "Richard-Tarbell/pysalt", "max_stars_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-02-22T08:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T11:32:34.000Z", "max_issues_repo_path": "plugins/fitsheadercheck.py", "max_issues_repo_name": "Richard-Tarbell/pysalt", "max_issues_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-02-24T18:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T12:52:59.000Z", "max_forks_repo_path": "plugins/fitsheadercheck.py", "max_forks_repo_name": "Richard-Tarbell/pysalt", "max_forks_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-20T14:46:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T18:30:01.000Z", "avg_line_length": 41.537037037, "max_line_length": 939, "alphanum_fraction": 0.6143557735, "include": true, "reason": "from astropy", "num_tokens": 2104}
|
#
# Copyright (c) 2017, UT-BATTELLE, LLC
# All rights reserved.
#
# This software is released under the BSD license detailed
# in the LICENSE file in the top level a-prime directory
#
#python script to plot wind stress vectors and magnitude over the oceans using
#CF variables TAUX and TAUY
import matplotlib as mpl
#changing the default backend to agg to resolve contouring issue on rhea
mpl.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import math
import numpy
from netCDF4 import Dataset
from get_season_name import get_season_name
from round_to_first import round_to_first
from get_reg_area_avg import get_reg_area_avg
from get_reg_area_avg_rmse import get_reg_area_avg_rmse
from read_climo_file import read_climo_file
from optparse import OptionParser
parser = OptionParser(usage = "python %prog [options]")
parser.add_option("--indir", dest = "indir",
help = "filepath to directory model data")
parser.add_option("-c", "--casename", dest = "casename",
help = "casename of the run")
parser.add_option("-f", "--field_name", dest = "field_name",
help = "variable name")
parser.add_option("--begin_yr", dest = "begin_yr", type = "int",
help = "begin year")
parser.add_option("--end_yr", dest = "end_yr", type = "int",
help = "end year")
parser.add_option("--begin_month", dest = "begin_month", type = "int",
help = "begin_month", default = 0)
parser.add_option("--end_month", dest = "end_month", type = "int",
help = "end_month", default = 11)
parser.add_option("--interp_grid", dest = "interp_grid",
help = "name of the interpolated grid of test case")
parser.add_option("--interp_method", dest = "interp_method",
help = "method used for interpolating the test case e.g. conservative_mapping")
parser.add_option("--ref_case_dir", dest = "ref_case_dir",
help = "filepath to ref_case directory")
parser.add_option("--ref_case", dest = "ref_case",
help = "reference casename")
parser.add_option("--ref_begin_yr", dest = "ref_begin_yr", type = "int",
help = "ref_case begin year")
parser.add_option("--ref_end_yr", dest = "ref_end_yr", type = "int",
help = "ref_case end year")
parser.add_option("--ref_interp_grid", dest = "ref_interp_grid",
help = "name of the interpolated grid of reference case")
parser.add_option("--ref_interp_method", dest = "ref_interp_method",
help = "method used for interpolating the reference case e.g. conservative_mapping")
parser.add_option("--plots_dir", dest = "plots_dir",
help = "filepath to plots directory")
(options, args) = parser.parse_args()
indir = options.indir
casename = options.casename
field_name = options.field_name
begin_yr = options.begin_yr
end_yr = options.end_yr
begin_month = options.begin_month
end_month = options.end_month
interp_grid = options.interp_grid
interp_method = options.interp_method
ref_case_dir = options.ref_case_dir
ref_case = options.ref_case
ref_begin_yr = options.ref_begin_yr
ref_end_yr = options.ref_end_yr
ref_interp_grid = options.ref_interp_grid
ref_interp_method = options.ref_interp_method
plots_dir = options.plots_dir
#Getting season name from begin_month and end_month
season = get_season_name(begin_month, end_month)
if field_name == 'TAU':
field_X_name = 'TAUX'
field_Y_name = 'TAUY'
field_mask_name = 'OCNFRAC'
#Read x and y components of vector field and mask field
#Reading mask field
field_mask, lat, lon, area, units = read_climo_file(indir = indir, \
casename = casename, \
season = season, \
field_name = field_mask_name, \
begin_yr = begin_yr, \
end_yr = end_yr, \
interp_grid = interp_grid, \
interp_method = interp_method, \
reg = 'global')
#Reading X component and masking grid boxes
field_X, lat, lon, area, units = read_climo_file(indir = indir, \
casename = casename, \
season = season, \
field_name = field_X_name, \
begin_yr = begin_yr, \
end_yr = end_yr, \
interp_grid = interp_grid, \
interp_method = interp_method, \
reg = 'global')
field_X_plot = numpy.ma.zeros((lat.shape[0], lon.shape[0]))
field_X_plot[:,:] = field_X[:,:]
field_X_plot.mask = numpy.where(field_mask[:,:] < 0.5, 1, 0)
#Reading Y component and masking grid boxes
field_Y, lat, lon, area, units = read_climo_file(indir = indir, \
casename = casename, \
season = season, \
field_name = field_Y_name, \
begin_yr = begin_yr, \
end_yr = end_yr, \
interp_grid = interp_grid, \
interp_method = interp_method, \
reg = 'global')
field_Y_plot = numpy.ma.zeros((lat.shape[0], lon.shape[0]))
field_Y_plot[:,:] = field_Y[:,:]
field_Y_plot.mask = numpy.where(field_mask[:,:] < 0.5, 1, 0)
#Computing an approximation of field magnitude from monthly averages
field_XY = numpy.ma.sqrt(numpy.ma.power(field_X_plot, 2.0) + numpy.ma.power(field_Y_plot, 2.0))
print
print 'Reading climo file for case: ', ref_case
print
field_ref_case_X, lat, lon, area, units = read_climo_file(indir = ref_case_dir, \
casename = ref_case, \
season = season, \
field_name = field_X_name, \
begin_yr = ref_begin_yr, \
end_yr = ref_end_yr, \
interp_grid = ref_interp_grid, \
interp_method = ref_interp_method, \
reg = 'global')
field_ref_case_Y, lat, lon, area, units = read_climo_file(indir = ref_case_dir, \
casename = ref_case, \
season = season, \
field_name = field_Y_name, \
begin_yr = ref_begin_yr, \
end_yr = ref_end_yr, \
interp_grid = ref_interp_grid, \
interp_method = ref_interp_method, \
reg = 'global')
field_ref_case_X_plot = numpy.ma.zeros((lat.shape[0], lon.shape[0]))
field_ref_case_Y_plot = numpy.ma.zeros((lat.shape[0], lon.shape[0]))
field_ref_case_X_plot[:,:] = field_ref_case_X[:,:]
field_ref_case_Y_plot[:,:] = field_ref_case_Y[:,:]
#Masking if the ref_case is also a model output
if ref_case != 'ERS':
field_ref_case_X_plot.mask = numpy.where(field_mask[:,:] < 0.5, 1, 0)
field_ref_case_Y_plot.mask = numpy.where(field_mask[:,:] < 0.5, 1, 0)
#Computing an approximation of field magnitude
field_ref_case_XY = numpy.ma.sqrt(numpy.ma.power(field_ref_case_X_plot, 2.0) + numpy.ma.power(field_ref_case_Y_plot, 2.0))
#Computing levels using mean and standard deviation
num = 11
max_plot = round_to_first(numpy.ma.mean(field_XY) + \
3.0 * numpy.ma.std(field_XY))
min_plot = 0.0
levels = numpy.linspace(min_plot, max_plot, num = num)
field_max_TAU = numpy.ma.max(field_XY)
field_min_TAU = numpy.ma.min(field_XY)
field_max_ERS_TAU = numpy.ma.max(field_ref_case_XY)
field_min_ERS_TAU = numpy.ma.min(field_ref_case_XY)
print 'mean, stddev, min_plot, max_plot: ', \
numpy.ma.mean(field_XY), numpy.ma.std(field_XY), min_plot, max_plot
print 'min, max: ', field_min_TAU, field_max_TAU
print 'levels:', levels
#PLOT CASE DATA
f = plt.figure(figsize=(8.5, 11))
plt.suptitle(field_name + ' (' + units + ') ' + season, fontsize = 20)
ax = f.add_subplot(3,1,1)
ax.set_title(casename)
m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=0,urcrnrlon=360,resolution='c')
m.drawcoastlines()
lons, lats = numpy.meshgrid(lon,lat)
x, y = m(lons,lats)
c = m.contourf( x, y, field_XY, \
cmap = 'gnuplot2_r', \
levels = levels, \
extend = 'both')
cb = m.colorbar(c)
q = m.quiver( x[::3,::3], y[::3,::3], \
field_X_plot[::3, ::3], field_Y_plot[::3, ::3], \
scale = 3.0)
text_data = 'min = ' + str(round(field_min_TAU, 2)) + ', ' + \
'max = ' + str(round(field_max_TAU, 2))
ax.text(0, -100, text_data, transform = ax.transData, fontsize = 10)
#PLOT REF CASE DATA
ax = f.add_subplot(3,1,2)
ax.set_title('ERS')
m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=0,urcrnrlon=360,resolution='c')
m.drawcoastlines()
c = m.contourf( x, y, field_ref_case_XY, \
cmap = 'gnuplot2_r', \
levels = levels, \
extend = 'both')
cb = m.colorbar(c)
q = m.quiver( x[::3,::3], y[::3,::3], \
field_ref_case_X_plot[::3, ::3], field_ref_case_Y_plot[::3, ::3], \
scale = 3.0)
text_data = 'min = ' + str(round(field_min_ERS_TAU, 2)) + ', ' + \
'max = ' + str(round(field_max_ERS_TAU, 2))
ax.text(0, -100, text_data, transform = ax.transData, fontsize = 10)
#PLOT DIFFERENCE
ax = f.add_subplot(3,1,3)
#ax.set_title(casename + ' - ' + ref_case)
ax.set_title('Difference')
field_diff_XY = field_XY - field_ref_case_XY
field_diff_max_TAU = numpy.ma.max(field_diff_XY)
field_diff_min_TAU = numpy.ma.min(field_diff_XY)
#Computing levels using mean and standard deviation
num = 11
max_plot = round_to_first(3.0 * numpy.ma.std(field_diff_XY))
levels_diff = numpy.linspace(-max_plot, max_plot, num = num)
print 'For difference plot: '
print 'mean, stddev, max_plot: ', \
numpy.ma.mean(field_diff_XY), numpy.ma.std(field_diff_XY), max_plot
print 'min, max: ', numpy.ma.min(field_diff_XY), numpy.ma.max(field_diff_XY)
print 'contour levels: ', levels
#Computing difference vectors
field_diff_X = field_X_plot - field_ref_case_X_plot
field_diff_Y = field_Y_plot - field_ref_case_Y_plot
m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=0,urcrnrlon=360,resolution='c')
m.drawcoastlines()
c = m.contourf(x, y, field_diff_XY, \
cmap = 'seismic', \
levels = levels_diff, \
extend = 'both')
cb = m.colorbar(c)
q = m.quiver( x[::3,::3], y[::3,::3], \
field_diff_X[::3, ::3], field_diff_Y[::3, ::3], \
scale = 1.0)
text_data = 'min = ' + str(round(field_diff_min_TAU, 2)) + ', ' + \
'max = ' + str(round(field_diff_max_TAU, 2))
ax.text(0, -100, text_data, transform = ax.transData, fontsize = 10)
#SAVING PLOT
mpl.rcParams['savefig.dpi']=300
outfile = plots_dir + '/' + casename + '-' + ref_case + '_' \
+ field_name + '_climo_' + season + '.png'
plt.savefig(outfile)
#plt.show()
|
{"hexsha": "bea1dae4c9b1c89164e717e58f9e898fd6b79371", "size": 11195, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/plot_climo_vector.py", "max_stars_repo_name": "E3SM-Project/a-prime", "max_stars_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-07T13:13:32.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-07T13:13:32.000Z", "max_issues_repo_path": "python/plot_climo_vector.py", "max_issues_repo_name": "ACME-Climate/a-prime", "max_issues_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2017-06-07T00:26:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-09T17:03:15.000Z", "max_forks_repo_path": "python/plot_climo_vector.py", "max_forks_repo_name": "ACME-Climate/a-prime", "max_forks_repo_head_hexsha": "a8c084ab6f727904a2b38d8a93b9c83e2f978e3f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-05T23:43:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-05T23:43:59.000Z", "avg_line_length": 32.9264705882, "max_line_length": 122, "alphanum_fraction": 0.6108977222, "include": true, "reason": "import numpy", "num_tokens": 2948}
|
using BaseBenchmarks
using BenchmarkTools
using Compat
using Compat.Test
if VERSION >= v"0.7.0-DEV.2954"
using Distributed
end
addprocs(1)
BaseBenchmarks.loadall!()
@test begin
run(BaseBenchmarks.SUITE, verbose = true, samples = 1,
evals = 2, gctrial = false, gcsample = false);
true
end
|
{"hexsha": "714c7dae2da15ce2efe1645df59c71fecfa6c9fc", "size": 312, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "mschauer/BaseBenchmarks.jl", "max_stars_repo_head_hexsha": "08baef1618ebf33f53a905bb131a51c3e53e1eb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "mschauer/BaseBenchmarks.jl", "max_issues_repo_head_hexsha": "08baef1618ebf33f53a905bb131a51c3e53e1eb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "mschauer/BaseBenchmarks.jl", "max_forks_repo_head_hexsha": "08baef1618ebf33f53a905bb131a51c3e53e1eb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.4210526316, "max_line_length": 58, "alphanum_fraction": 0.7083333333, "num_tokens": 100}
|
module utils_mod
contains
subroutine get_file_name(file_name)
use, intrinsic :: iso_fortran_env, only : error_unit
implicit none
character(len=*), intent(out) :: file_name
character(len=1024) :: argv
if (command_argument_count() < 1) then
write (unit=error_unit, fmt="(A)") "# error: no file name given"
stop
end if
call get_command_argument(1, argv)
file_name = trim(argv)
end subroutine get_file_name
subroutine get_dataset_name(dataset_name)
use, intrinsic :: iso_fortran_env, only : error_unit
implicit none
character(len=*), intent(out) :: dataset_name
character(len=1024) :: argv
if (command_argument_count() < 2) then
write (unit=error_unit, fmt="(A)") &
"# error: no dataset name given"
stop
end if
call get_command_argument(2, argv)
dataset_name = trim(argv)
end subroutine get_dataset_name
subroutine get_block_size(block_size)
use, intrinsic :: iso_fortran_env, only : error_unit
implicit none
integer, intent(out) :: block_size
character(len=1024) :: argv
if (command_argument_count() < 3) then
write (unit=error_unit, fmt="(A)") &
"# error: no block size given"
stop
end if
call get_command_argument(3, argv)
read (argv, "(I10)") block_size
end subroutine get_block_size
end module utils_mod
|
{"hexsha": "88f89b871bcd2dbcf9c749875beafd76477a6927", "size": 1530, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "OpenMP/MultiLevel/LinearAlgebra/src/utils_mod.f90", "max_stars_repo_name": "Gjacquenot/training-material", "max_stars_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 115, "max_stars_repo_stars_event_min_datetime": "2015-03-23T13:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T00:27:21.000Z", "max_issues_repo_path": "OpenMP/MultiLevel/LinearAlgebra/src/utils_mod.f90", "max_issues_repo_name": "Gjacquenot/training-material", "max_issues_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2015-02-25T15:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T07:42:48.000Z", "max_forks_repo_path": "OpenMP/MultiLevel/LinearAlgebra/src/utils_mod.f90", "max_forks_repo_name": "Gjacquenot/training-material", "max_forks_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 59, "max_forks_repo_forks_event_min_datetime": "2015-11-26T11:44:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:27:22.000Z", "avg_line_length": 32.5531914894, "max_line_length": 76, "alphanum_fraction": 0.6058823529, "num_tokens": 360}
|
import numpy as np
from ..space import Box, Discrete
class BoxWrapper(Box):
DEFAULT_INF_CEILING = 100
def __init__(self, gym_box, discretization_shape=None, inf_ceiling=None):
self.inf_ceiling = BoxWrapper.DEFAULT_INF_CEILING if inf_ceiling is None else inf_ceiling
self.gym_space = gym_box
self.gym_shape = self.gym_space.shape
lows = self._clip_inf(self.gym_space.low.reshape(-1))
highs = self._clip_inf(self.gym_space.high.reshape(-1))
if discretization_shape is None:
discretization_shape = tuple([100 for _ in range(lows.shape[0])])
if lows.shape[0] != len(discretization_shape):
raise ValueError(f'Dimension mismatch: Gym Box has {lows.shape[0]} dimensions, while the Edge '
f'one has {len(discretization_shape)}')
super(BoxWrapper, self).__init__(lows, highs, discretization_shape)
def _clip_inf(self, a):
return np.clip(a, a_min=-self.inf_ceiling, a_max=self.inf_ceiling)
def to_gym(self, index):
return self[index].reshape(self.gym_shape)
def from_gym(self, gym_element):
return gym_element.reshape(-1)
class DiscreteWrapper(Discrete):
def __init__(self, gym_discrete):
super(DiscreteWrapper, self).__init__(gym_discrete.n, 0, gym_discrete.n - 1)
self.gym_space = gym_discrete
def to_gym(self, index):
return int(self[index])
def from_gym(self, gym_element):
return float(gym_element)
|
{"hexsha": "b308068498e909d185b62d25f8365f8568ce930c", "size": 1509, "ext": "py", "lang": "Python", "max_stars_repo_path": "edge/gym_wrappers/space_wrapper.py", "max_stars_repo_name": "Data-Science-in-Mechanical-Engineering/edge", "max_stars_repo_head_hexsha": "586eaba2f0957e75940f4f19fa774603f57eae89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "edge/gym_wrappers/space_wrapper.py", "max_issues_repo_name": "Data-Science-in-Mechanical-Engineering/edge", "max_issues_repo_head_hexsha": "586eaba2f0957e75940f4f19fa774603f57eae89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "edge/gym_wrappers/space_wrapper.py", "max_forks_repo_name": "Data-Science-in-Mechanical-Engineering/edge", "max_forks_repo_head_hexsha": "586eaba2f0957e75940f4f19fa774603f57eae89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8048780488, "max_line_length": 107, "alphanum_fraction": 0.6792577866, "include": true, "reason": "import numpy", "num_tokens": 377}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import the TensorFlow and output the verion
get_ipython().system('pip install tensorflow==1.14.0')
import tensorflow as tf
print("\n\nTensorFlow version:", tf.__version__)
# In[2]:
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
# In[3]:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
# In[4]:
# By default, the tf.layers.dense() function uses Xavier initialization (with uniform distribution)
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# In[5]:
# You can change this to He initialization by using the variance_scaling_initializer() function like this:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
he_init = tf.contrib.layers.variance_scaling_initializer()
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu,
kernel_initializer=he_init, name="hidden1")
# In[6]:
# TensorFlow offers an elu() function that you can use to build your neural network. Simply set the activation
# argument when calling the dense() function like this:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, name="hidden1")
# In[7]:
# TensorFlow does not have a predefined function for leaky ReLUs, but it is easy to define:
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
def leaky_relu(z, name=None):
return tf.maximum(0.01 * z, z, name=name)
hidden1 = tf.layers.dense(X, n_hidden1, activation=leaky_relu, name="hidden1")
# # Implementing Batch Normalization with TensorFlow
# Another way to reduce the problem of vanishing/exploding gradients is to use **batch normalization**.
# TensorFlow provides a **tf.nn.batch_normalization()** function that simply centers and normalizes the inputs, but
# you must compute the mean and standard deviation yourself and pass them as parameters to this function, and you must also handle the creation of the scaling and offset parameters (and pass them to this function). Another option is to use the **tf.layers.batch_normalization()** function, which handles all this for you as in the following code:
# In[8]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name="training")
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = tf.layers.batch_normalization(logits_before_bn, training=training, momentum=0.9)
# ###### The code is quite repetitive, with the same batch normalization parameters appearing over and over again. To avoid this repetition, you can use the partial() function from the functools module. It creates a thin wrapper aroud a function and allows you to define default values for some parameters. The creation of the network layers in the preceding code can be modified as follows:
# In[9]:
tf.reset_default_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name="training")
my_batch_norm_layer = partial(tf.layers.batch_normalization, training=training, momentum=0.9)
hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = my_batch_norm_layer(hidden1)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = my_batch_norm_layer(hidden2)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
# #### Let's build a neural net for MNIST, using the ELU activation function and Batch Normalization at each layer:
# In[10]:
tf.reset_default_graph()
from functools import partial
batch_norm_momentum = 0.9
learning_rate = 0.01
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
training = tf.placeholder_with_default(False, shape=(), name="training")
with tf.name_scope("dnn"):
he_init = tf.contrib.layers.variance_scaling_initializer()
my_batch_norm_layer = partial(tf.layers.batch_normalization, training=training, momentum=batch_norm_momentum)
my_dense_layer = partial(tf.layers.dense, kernel_initializer=he_init)
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
bn1 = tf.nn.elu(my_batch_norm_layer(hidden1))
hidden2 = my_dense_layer(bn1, n_hidden2, name="hidden2")
bn2 = tf.nn.elu(my_batch_norm_layer(hidden2))
logits_before_bn = my_dense_layer(bn2, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[11]:
n_epochs = 40
batch_size = 64
# In[6]:
import numpy as np
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
# In[13]:
def shuffle_batch(X, y, batch_size):
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
# In[14]:
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run([training_op, extra_update_ops], feed_dict={training: True, X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Gradient Clipping
# A popular technique to lessen the exploding gradients problem is to simply clip the gradients during backpropagation so they never exceed some threshold. This is called **Gradient Clipping**. In TensorFlow, the optimizer's minimize() function takes care of both computing the gradients and applying them, so you must instead call the optimizer's **compute_gradients()** methods first, then create an operation to clip the gradients using the **clip_by_value()** function, and finally create an operation to apply the clipped gradients using the optimizer's **apply_gradients()** method:
# In[31]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 50
n_hidden5 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name= "y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.relu, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# In[32]:
learning_rate = 0.01
threshold = 1.0
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -threshold, threshold), var) for grad, var in grads_and_vars]
training_op = optimizer.apply_gradients(capped_gvs)
# It will compute the gradients, clip them between -1.0 and 1.0, and apply them.
# In[33]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
# In[34]:
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[35]:
n_epochs = 40
batch_size = 64
# In[36]:
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Reusing Pretrained Layers
# ## Reusing a TensorFlow Model
# If the original model was trained using TensorFlow, you can simply restore it and train it on the new task. You can use the **import_meta_graph()** function to import the operations into the default graph. This returns a **Saver** that you can use later to load the model's state (i.e., the variable values):
# In[37]:
tf.reset_default_graph()
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
# In[38]:
# To list all the operations you can use the graph's get_operation() method:
for op in tf.get_default_graph().get_operations():
print(op.name)
# In[39]:
# Once you know which operations you need, you can get a handle on them using the graph's get_operation_by_name()
# and get_tensor_by_name() methods:
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
accuracy = tf.get_default_graph().get_tensor_by_name("eval/accuracy:0")
training_op = tf.get_default_graph().get_operation_by_name("train/GradientDescent")
# **Note:** Name of tensor is the name of the operation that outputs it followed by :0(or :1 if it is the second output, :2 if it is the third, and so on).
# In[40]:
# If you are the author of the original model, you could make things easier for people who will reuse your model
# by giving operations very clear names and documenting them. Another approach is to create a collection containing
# all the important operations that will want to get a handle on:
for op in (X, y, accuracy, training_op):
tf.add_to_collection("my_important_ops", op)
# In[41]:
# This way people who will reuse your model will be able to simple write:
X, y, accuracy, training_op = tf.get_collection("my_important_ops")
# In[42]:
# You can then restore the model's state using the Saver and continue training using your own data:
with tf.Session() as sess:
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# In general you will want to reuse only the lower layers. If you are using **import_meta_graph()** it will load the whole graph, but you can simply ignore the parts you do not need. In this example, we add a new 4th hidden layer on top of the pretrained 3rd layer (ignoring the old 4th hidden layer). We also build a new output layer, the loss for this new output, and a new optimizer to minimize it. We also need another saver to save the whole graph (containing both the entire old graph plus the new operations), and an initialization operation to initialize all the new variables:
# In[43]:
tf.reset_default_graph()
n_hidden4 = 20
n_outputs = 10
saver = tf.train.import_meta_graph("./my_model_final.ckpt.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden3 = tf.get_default_graph().get_tensor_by_name("dnn/hidden3/Relu:0")
new_hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="new_hidden4")
new_logits = tf.layers.dense(new_hidden4, n_outputs, name="new_outputs")
with tf.name_scope("new_loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=new_logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("new_eval"):
correct = tf.nn.in_top_k(new_logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("new_train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
new_saver = tf.train.Saver()
# In[44]:
# And we can train this model:
with tf.Session() as sess:
init.run()
saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = new_saver.save(sess, "./my_new_model_final.ckpt")
# If you have access to the Python code that built the original graph, you can just reuse the parts you need and drop the rest:
# In[45]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new
n_outputs = 10 # new
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# However, you must create one **Saver** to restore the pretrained model (giving it the list of variables to restore, or else it will complain that the graphs don't match), and another **Saver** to save the new model, once it is trained:
# In[46]:
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]")
restore_saver = tf.train.Saver(reuse_vars) # To restore layers 1-3
init = tf.global_variables_initializer() # To init all variables, old and new
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Reusing Models from Other Frameworks
# If the model was trained using another framework, you will need to load the model parameters manually, then assign them to the appropriate variables.
# In[47]:
tf.reset_default_graph()
n_inputs = 2
n_hidden1 = 3
original_W = [[1., 2., 3.], [4., 5., 6.]]
original_b = [7., 8., 9.]
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
# [...] Build the rest of the model
# Get a handle on the assignment nodes for the hidden1 variables
graph = tf.get_default_graph()
assign_kernel = graph.get_operation_by_name("hidden1/kernel/Assign")
assign_bias = graph.get_operation_by_name("hidden1/bias/Assign")
init_kernel = assign_kernel.inputs[1]
init_bias = assign_bias.inputs[1]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init, feed_dict={init_kernel: original_W, init_bias: original_b})
# [...] Train the model on your new task
print(hidden1.eval(feed_dict={X: [[10.0, 11.0]]}))
# the weights variable created by the tf.layers.dense() function is called "kernel" (instead of "weights" when
# using the tf.contrib.layers.fully_connected(), as in the book), and the biases variable is called bias instead
# of biases.
# # Freezing the Lower Layers
# It is likely that the lower layers of the first DNN have learned to detect low-level features up in pictures that will be useful across both image classification tasks, so you can just reuse these layers as they are. It is generally a good idea to "freeze" their weights when training the new DNN: if the lower-layer weights are fixed, then the higher-layer weights will be easier to train.
# In[50]:
# To freeze the lower layers during training, one solution is to give the optimizer the lost of variables to
# train, excluding the variables from the lower layers:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 20
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|outputs")
training_op = optimizer.minimize(loss, var_list=train_vars)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# In[51]:
# Another option is to add a stop_gradient() layer in the graph. Any layer below it will be frozen:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_hidden3 = 50
n_hidden4 = 20
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4")
logits = tf.layers.dense(hidden4, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]")
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Caching the Frozen Layers
# Since the frozen layers won't change then during training, instead of building batches of training instances, it would give a huge boost to training to build batches of outputs from hidden layerr 2 and feed them to the training operation:
# In[61]:
import numpy as np
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300 # reused
n_hidden2 = 50 # reused
n_hidden3 = 50 # reused
n_hidden4 = 20 # new!
n_outputs = 10 # new!
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1") # reused frozen
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2") # reused frozen & cached
hidden2_stop = tf.stop_gradient(hidden2)
hidden3 = tf.layers.dense(hidden2_stop, n_hidden3, activation=tf.nn.relu, name="hidden3") # reused, not frozen
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.relu, name="hidden4") # new!
logits = tf.layers.dense(hidden4, n_outputs, name="outputs") # new!
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
reuse_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="hidden[123]") # regular expression
restore_saver = tf.train.Saver(reuse_vars) # to restore layers 1-3
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_batches = len(X_train) // batch_size
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./my_model_final.ckpt")
h2_cache = sess.run(hidden2, feed_dict={X: X_train})
h2_cache_valid = sess.run(hidden2, feed_dict={X: X_valid})
for epoch in range(n_epochs):
shuffled_idx = np.random.permutation(mnist.train.num_examples)
hidden2_batches = np.array_split(h2_cache[shuffled_idx], n_batches)
y_batches = np.array_split(mnist.train.labels[shuffled_idx], n_batches)
for hidden2_batch, y_batch in zip(hidden2_batches, y_batches):
sess.run(training_op, feed_dict={hidden2: hidden2_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={hidden2: h2_cache_valid, y: y_valid}) # not shown
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_new_model_final.ckpt")
# # Learning Rate Scheduling
# If you start with a high learning rate and then reduce it once it stops making fast progress, you can reach a good solution faster than with the optimal constant learning rate. There are many different strategies to reduce the learning rate during training. These strategies are called learning schedules. We are implementing exponential scheduling here:
# In[85]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
with tf.name_scope("train"):
# The learning rate will drop by a factor of 10 (decay_rate) every 10000 (decay_steps) steps.
initial_learning_rate = 0.1
decay_steps = 10000
decay_rate = 1/10
global_step = tf.Variable(0, trainable=False, name="global_step")
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, decay_steps, decay_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, global_step=global_step)
# After setting the hyperparameter values, we create a nontrainable variable global_step (initialized to 0) to keep
# track of the current training iteration number.
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[86]:
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # L1 and L2 regularization
# We can also use l1 and l2 regularization to constrain a neural network's connection weights (but typically not its biases).
# In[87]:
# For example, assuming you have just one hidden layer with weights W1 and one output layer with weights W2, then
# you can apply L1 regularization like this:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
logits = tf.layers.dense(hidden1, n_outputs, name="outputs")
# In[89]:
W1 = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
W2 = tf.get_default_graph().get_tensor_by_name("outputs/kernel:0")
scale = 0.001 # L1 regularization parameter
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy")
reg_losses = tf.reduce_sum(tf.abs(W1)) + tf.reduce_sum(tf.abs(W2))
loss = tf.add(base_loss, scale * reg_losses, name="loss")
# In[90]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# TensorFlow provides a better option when there are many layers. Many function that create variables (such as **get_variable()** or **tf.layers.dense()**) accept a ***_regularizer** argument for each created variable.You can pass any function that takes weights as an argument and returns the corresponding regularization loss.
# In[101]:
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# This code creates a neural network with two hidden layers and one output layer, and it also creates nodes in the graph to compute the L1 regularization loss corresponding to each layer's weights. TensorFlwo automatically add these nodes to a special collection containing all the regularization losses.
# In[102]:
scale = 0.001
my_dense_layer = partial(tf.layers.dense, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l1_regularizer(scale))
with tf.name_scope("dnn"):
hidden1 = my_dense_layer(X, n_hidden1, name="hidden1")
hidden2 = my_dense_layer(hidden1, n_hidden2, name="hidden2")
logits = my_dense_layer(hidden2, n_outputs, activation=None, name="outputs")
# In[103]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
base_loss = tf.reduce_mean(xentropy, name="avg_xentropy")
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([base_loss] + reg_losses, name="loss")
# In[104]:
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# # Dropout
# To implement dropout using TensorFlow, you can simply apply the **tf.layers.dropout()** function to the input layer and/or to the output of any hidden layer you want. During training, this function randomly drops some items (setting them to 0) and divides the remaining items by the keep probability. After training, this function does nothing at all. The following code applies dropout regularization to our three-layer neural network:
# In[108]:
tf.reset_default_graph()
n_inputs = 28 * 28 # MNIST
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# In[109]:
training = tf.placeholder_with_default(False, shape=(), name="training")
dropout_rate = 0.5
X_drop = tf.layers.dropout(X, dropout_rate, training=training)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden1_drop = tf.layers.dropout(hidden1, dropout_rate, training=training)
hidden2 = tf.layers.dense(hidden1_drop, n_hidden2, activation=tf.nn.relu, name="hidden2")
hidden2_drop = tf.layers.dropout(hidden2, dropout_rate, training=training)
logits = tf.layers.dense(hidden2_drop, n_outputs, name="outputs")
# In[110]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[111]:
# We need to set training to True only when training, and leave the default False value when testing
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# **Note1:** If you observe that the model is overfitting, you can increase the dropout rate. Conversely, you should try decreasing the dropout rate if the model underfits the training set. <br>
# **Note2:** Dropconnect is a variant of dropout where individual conenctions are dropped randomly rather than whole neurons. In general dropout performs better.
# # Max-Norm Regularization
# - Another regularization technique that is quite popular for neural networks is called max-norm regularization: for each neuron, it constrains the weights **w** of the incoming connections such that ||**w**||$_{2}$ $\leq$ r, where r is the max-norm hyperparameter and ||.||$_{2}$ is the l2 norm.<br>
# - Reducing r increases the amount of regularization and helps reduce overfitting. Max-norm regularization can also help alleviate the vanishing/exploding gradients problems (if you are not using Batch Normalization). <br>
#
#
# In[118]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# TensorFlow does not provide an off-the-shelf max-norm regularizer, but it is not too hard to implement. The following code gets a handle on the weights of the first and second hidden layer, then it uses the **clip_by_norm()** function to create an operation that will clip the weights along the second axis so that each row vector ends up with a maximum norm of 1.0. The last line creates an assignment operation that will assign the clipped weights to the weights variable:
# In[119]:
threshold = 1.0
weights = tf.get_default_graph().get_tensor_by_name("hidden1/kernel:0")
clipped_weights = tf.clip_by_norm(weights, clip_norm=threshold, axes=1)
clip_weights = tf.assign(weights, clipped_weights)
# In[120]:
weights2 = tf.get_default_graph().get_tensor_by_name("hidden2/kernel:0")
clipped_weights2 = tf.clip_by_norm(weights, clip_norm=threshold, axes=1)
clip_weights2 = tf.assign(weights, clipped_weights)
# In[121]:
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[122]:
n_epochs = 20
batch_size = 64
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
clip_weights.eval()
clip_weights2.eval()
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", acc_valid)
save_path = saver.save(sess, "./my_model_final.ckpt")
# When we want to do this for every hidden layer, we can create a **max_norm_regularizer()** function and use it just like the earlier **l1_regularizer()** function:
# In[129]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 50
n_outputs = 10
learning_rate = 0.01
momentum = 0.9
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
# In[130]:
def max_norm_regularizer(threshold, axes=1, name="max_norm", collection="max_norm"):
def max_norm(weights):
clipped = tf.clip_by_norm(weights, clip_norm=threshold, axes=axes)
clip_weights = tf.assign(weights, clipped, name=name)
tf.add_to_collection(collection, clip_weights)
return None # there is no regularization loss term
return max_norm
# This function returns a parametrized **max_norm()** function that you can use like any other regularizer:
# In[131]:
max_norm_reg = max_norm_regularizer(threshold=1.0)
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_inputs, activation=tf.nn.relu, kernel_regularizer=max_norm_reg, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, kernel_regularizer=max_norm_reg, name="hidden2")
logits = tf.layers.dense(hidden2, n_outputs, name="outputs")
# In[132]:
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# In[134]:
n_epochs = 20
batch_size = 64
# In[135]:
clip_all_weights = tf.get_collection("max_norm")
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
sess.run(clip_all_weights)
accuracy_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Validation accuracy:", accuracy_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
# **Exercise:** Build a DNN with five hidden layers of 100 neurons each, He initialization, and the ELU activation function.
# In[17]:
tf.reset_default_graph()
n_inputs = 28 * 28
n_hidden1 = 100
n_hidden2 = 100
n_hidden3 = 100
n_hidden4 = 100
n_hidden5 = 100
n_outputs = 5
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=None, name="y")
he_init = tf.variance_scaling_initializer()
with tf.name_scope("dnn"):
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden2")
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden3")
hidden4 = tf.layers.dense(hidden3, n_hidden4, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden4")
hidden5 = tf.layers.dense(hidden4, n_hidden5, activation=tf.nn.elu, kernel_initializer=he_init, name="hidden5")
logits = tf.layers.dense(hidden5, n_outputs, kernel_initializer=he_init, name="outputs")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
# **Exercise:** Using Adam optimization and early stopping, try training it on MNIST but only on digits 0 to 4, as we will use transfer learning for digits 5 to 9 in the next exercise. You will need a softmax output layer with five neurons, and as always make sure to save checkpoints at regular intervals and save the final model so you can reuse it later.
# In[18]:
learning_rate = 0.01
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name="training_op")
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Now let's create the training set, validation and test set (we need the validation set to implement early stopping):
# In[19]:
X_train1 = X_train[y_train < 5]
y_train1 = y_train[y_train < 5]
X_valid1 = X_valid[y_valid < 5]
y_valid1 = y_valid[y_valid < 5]
X_test1 = X_test[y_test < 5]
y_test1 = y_test[y_test < 5]
# In[20]:
n_epochs = 1000
batch_size = 32
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train1))
for rnd_indices in np.array_split(rnd_idx, len(X_train1) // batch_size):
X_batch, y_batch = X_train1[rnd_indices], y_train1[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid1, y: y_valid1})
if loss_val < best_loss:
save_path = saver.save(sess, "./my_mnist_model_0_to_4.ckpt")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\t Accuracy: {:.2f}%".format(epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
saver.restore(sess, "./my_mnist_model_0_to_4.ckpt")
acc_test = accuracy.eval(feed_dict={X: X_test1, y: y_test1})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Exercise:** Tune the hyperparameters using cross-validation and see what precision you can achieve.
#
# Let's create a DNNClassifier class, compatible with Scikit-Learn's RandomizedSearchCV class, to perform hyperparameter tuning. Here are the key points of this implementation:
# - the **\_\_init\_\_()** method (constructor) does nothing more than create instance variables for each of the hyperparameters.
# - the **fit()** method creates the graph, starts a session and trains the model:
# - it calls the _build_graph() method to build the graph (much like the graph we defined earlier). Once this method is done creating the graph, it saves all the important operations as instance variables for easy access by other methods.
# - the _dnn() method builds the hidden layers, just like the dnn() function above, but also with support for batch normalization and dropout (for the next exercises).
# - if the fit() method is given a validation set (X_valid and y_valid), then it implements early stopping. This implementation does not save the best model to disk, but rather to memory: it uses the _get_model_params() method to get all the graph's variables and their values, and the _restore_model_params() method to restore the variable values (of the best model found). This trick helps speed up training.
# - After the fit() method has finished training the model, it keeps the session open so that predictions can be made quickly, without having to save a model to disk and restore it for every prediction. You can close the session by calling the close_session() method.
# - the **predict_proba()** method uses the trained model to predict the class probabilities.
# - the **predict()** method calls predict_proba() and returns the class with the highest probability, for each instance.
# In[21]:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
class DNNClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hidden_layers=5, n_neurons=100, optimizer_class=tf.train.AdamOptimizer,
learning_rate=0.01, batch_size=32, activation=tf.nn.elu, initializer=he_init,
batch_norm_momentum=None, dropout_rate=None, random_state=None):
"""Initialize the DNNClassifier by simply storing all the hyperparameters."""
self.n_hidden_layers = n_hidden_layers
self.n_neurons = n_neurons
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.activation = activation
self.initializer = initializer
self.batch_norm_momentum = batch_norm_momentum
self.dropout_rate = dropout_rate
self.random_state = random_state
self._session = None
def _dnn(self, inputs):
"""Build the hidden layers, with support for batch normalization and dropout."""
for layer in range(self.n_hidden_layers):
if self.dropout_rate:
inputs = tf.layers.dropout(inputs, self.dropout_rate, training=self._training)
inputs = tf.layers.dense(inputs, self.n_neurons,
kernel_initializer=self.initializer,
name="hidden%d" % (layer + 1))
if self.batch_norm_momentum:
inputs = tf.layers.batch_normalization(inputs, momentum=self.batch_norm_momentum,
training=self._training)
inputs = self.activation(inputs, name="hidden%d_out" % (layer + 1))
return inputs
def _build_graph(self, n_inputs, n_outputs):
"""Build the same model as earlier"""
if self.random_state is not None:
tf.set_random_seed(self.random_state)
np.random.seed(self.random_state)
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
if self.batch_norm_momentum or self.dropout_rate:
self._training = tf.placeholder_with_default(False, shape=(), name='training')
else:
self._training = None
dnn_outputs = self._dnn(X)
logits = tf.layers.dense(dnn_outputs, n_outputs, kernel_initializer=he_init, name="logits")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = self.optimizer_class(learning_rate=self.learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Make the important operations available easily through instance variables
self._X, self._y = X, y
self._Y_proba, self._loss = Y_proba, loss
self._training_op, self._accuracy = training_op, accuracy
self._init, self._saver = init, saver
def close_session(self):
if self._session:
self._session.close()
def _get_model_params(self):
"""Get all variable values (used for early stopping, faster than saving to disk)"""
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
"""Set all variables to the given values (for early stopping, faster than loading from disk)"""
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign")
for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict=feed_dict)
def fit(self, X, y, n_epochs=100, X_valid=None, y_valid=None):
"""Fit the model to the training set. If X_valid and y_valid are provided, use early stopping."""
self.close_session()
# infer n_inputs and n_outputs from the training set.
n_inputs = X.shape[1]
self.classes_ = np.unique(y)
n_outputs = len(self.classes_)
# Translate the labels vector to a vector of sorted class indices, containing
# integers from 0 to n_outputs - 1.
# For example, if y is equal to [8, 8, 9, 5, 7, 6, 6, 6], then the sorted class
# labels (self.classes_) will be equal to [5, 6, 7, 8, 9], and the labels vector
# will be translated to [3, 3, 4, 0, 2, 1, 1, 1]
self.class_to_index_ = {label: index
for index, label in enumerate(self.classes_)}
y = np.array([self.class_to_index_[label]
for label in y], dtype=np.int32)
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph(n_inputs, n_outputs)
# extra ops for batch normalization
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# needed in case of early stopping
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
best_params = None
# Now train the model!
self._session = tf.Session(graph=self._graph)
with self._session.as_default() as sess:
self._init.run()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X))
for rnd_indices in np.array_split(rnd_idx, len(X) // self.batch_size):
X_batch, y_batch = X[rnd_indices], y[rnd_indices]
feed_dict = {self._X: X_batch, self._y: y_batch}
if self._training is not None:
feed_dict[self._training] = True
sess.run(self._training_op, feed_dict=feed_dict)
if extra_update_ops:
sess.run(extra_update_ops, feed_dict=feed_dict)
if X_valid is not None and y_valid is not None:
loss_val, acc_val = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_valid,
self._y: y_valid})
if loss_val < best_loss:
best_params = self._get_model_params()
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
else:
loss_train, acc_train = sess.run([self._loss, self._accuracy],
feed_dict={self._X: X_batch,
self._y: y_batch})
print("{}\tLast training batch loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_train, acc_train * 100))
# If we used early stopping then rollback to the best model found
if best_params:
self._restore_model_params(best_params)
return self
def predict_proba(self, X):
if not self._session:
raise NotFittedError("This %s instance is not fitted yet" % self.__class__.__name__)
with self._session.as_default() as sess:
return self._Y_proba.eval(feed_dict={self._X: X})
def predict(self, X):
class_indices = np.argmax(self.predict_proba(X), axis=1)
return np.array([[self.classes_[class_index]]
for class_index in class_indices], np.int32)
def save(self, path):
self._saver.save(self._session, path)
# In[22]:
tf.reset_default_graph()
dnn_clf = DNNClassifier(random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# The model is trained, let's see if it gets the same accuracy as earlier:
# In[23]:
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
# Yep! Working fine. Now we can use Scikit-Learn's RandomizedSearchCV class to search for better hyperparameters (this may take over an hour, depending on your system):
# In[24]:
from sklearn.model_selection import RandomizedSearchCV
def leaky_relu(alpha=0.01):
def parametrized_leaky_relu(z, name=None):
return tf.maximum(alpha * z, z, name=name)
return parametrized_leaky_relu
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [16, 64, 128, 512],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
}
rnd_search = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50,
cv=3, random_state=42, verbose=2)
rnd_search.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# In[25]:
rnd_search.best_params_
# In[26]:
y_pred = rnd_search.predict(X_test1)
accuracy_score(y_test1, y_pred)
# In[27]:
# Let's save this model
rnd_search.best_estimator_.save("./my_best_mnist_model_0_to_4")
# **Exercise:** Now try adding Batch Normalization and compare the learning curves: is it converging faster than before? Does it produce a better model?
# In[28]:
dnn_clf = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=140, random_state=42)
dnn_clf.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# In[29]:
y_pred = dnn_clf.predict(X_test1)
accuracy_score(y_test1, y_pred)
# In[30]:
dnn_clf_bn = DNNClassifier(activation=leaky_relu(alpha=0.1), batch_size=500, learning_rate=0.01,
n_neurons=90, random_state=42,
batch_norm_momentum=0.95)
dnn_clf_bn.fit(X_train1, y_train1, n_epochs=1000, X_valid=X_valid1, y_valid=y_valid1)
# In[31]:
y_pred = dnn_clf_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
# Wow awesome! Batch Normalization improved accuracy! To tweak hyperparameters with batch normalization, you can try RandomizedSearchCV as:
# In[34]:
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_neurons": [10, 30, 50, 70, 90, 100, 120, 140, 160],
"batch_size": [16, 64, 128, 512],
"learning_rate": [0.01, 0.02, 0.05, 0.1],
"activation": [tf.nn.relu, tf.nn.elu, leaky_relu(alpha=0.01), leaky_relu(alpha=0.1)],
# you could also try exploring different numbers of hidden layers, different optimizers, etc.
#"n_hidden_layers": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
#"optimizer_class": [tf.train.AdamOptimizer, partial(tf.train.MomentumOptimizer, momentum=0.95)],
"batch_norm_momentum": [0.9, 0.95, 0.98, 0.99, 0.999],
}
rnd_search_bn = RandomizedSearchCV(DNNClassifier(random_state=42), param_distribs, n_iter=50, cv=3,
random_state=42, verbose=2)
rnd_search_bn.fit(X_train1, y_train1, X_valid=X_valid1, y_valid=y_valid1, n_epochs=1000)
# In[35]:
rnd_search_bn.best_params_
# In[36]:
y_pred = rnd_search_bn.predict(X_test1)
accuracy_score(y_test1, y_pred)
# **Wow 99.39% accuracy! Awesome!**
# In[40]:
# Let's save this model
rnd_search_bn.best_estimator_.save("./best_mnist_model_0_to_4")
# # Transfer Learning
# **Exercise:** create a new DNN that reuses all the pretrained hidden layers of the previous model, freezes them, and replaces the softmax output layer with a new one.<br>
# Let's load the model graph:
# In[44]:
tf.reset_default_graph()
restore_saver = tf.train.import_meta_graph("./best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
loss = tf.get_default_graph().get_tensor_by_name("loss:0")
Y_proba = tf.get_default_graph().get_tensor_by_name("Y_proba:0")
logits = Y_proba.op.inputs[0]
accuracy = tf.get_default_graph().get_tensor_by_name("accuracy:0")
# In[46]:
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# In[47]:
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="logits")
# To freeze the lower layers, we will exclude their variables from the optimizer's list of trainable variables, keeping only the output layer's trainable variables:
# In[48]:
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
# In[49]:
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
init = tf.global_variables_initializer()
five_frozen_saver = tf.train.Saver()
# **Exercise:** train this new DNN on digits 5 to 9, using only 100 images per digit, and time how long it takes. Despite this small number of examples, can you achieve high precision.<br>
# Let's create the training, validation and test sets. We need to subtract 5 from the labels because TensorFlow expects integers from 0 to n_classes-1.
# In[50]:
X_train2_full = X_train[y_train >= 5]
y_train2_full = y_train[y_train >= 5] - 5
X_valid2_full = X_valid[y_valid >= 5]
y_valid2_full = y_valid[y_valid >= 5] - 5
X_test2 = X_test[y_test >= 5]
y_test2 = y_test[y_test >= 5] - 5
# In[51]:
def sample_n_instances_per_class(X, y, n=100):
Xs, ys = [], []
for label in np.unique(y):
idx = (y == label)
Xc = X[idx][:n]
yc = y[idx][:n]
Xs.append(Xc)
ys.append(yc)
return np.concatenate(Xs), np.concatenate(ys)
X_train2, y_train2 = sample_n_instances_per_class(X_train2_full, y_train2_full, n=100)
X_valid2, y_valid2 = sample_n_instances_per_class(X_valid2_full, y_valid2_full, n=30)
# Now let's train the model:
# In[55]:
import time
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
t0 = time.time()
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# Well that's not a great accuracy, is it? Of course with such a tiny training set, and with only one layer to tweak, we should not expect miracles.
# **Exercise:** try caching the frozen layers, and train the model again: how much faster is it now?
# Let's start by getting a handle on the output of the last frozen layer:
# In[56]:
hidden5_out = tf.get_default_graph().get_tensor_by_name("hidden5_out:0")
# Now let's train the model using roughly the same code as earlier. The difference is that we compute the output of the top frozen layer at the beginning (both for the training set and the validation set), and we cache it. This makes training faster:
# In[61]:
import time
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
t0 = time.time()
hidden5_train = hidden5_out.eval(feed_dict={X: X_train2, y: y_train2})
hidden5_valid = hidden5_out.eval(feed_dict={X: X_valid2, y: y_valid2})
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
h5_batch, y_batch = hidden5_train[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={hidden5_out: h5_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={hidden5_out: hidden5_valid, y: y_valid2})
if loss_val < best_loss:
save_path = five_frozen_saver.save(sess, "./my_mnist_model_5_to_9_five_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
t1 = time.time()
print("Total training time: {:.1f}s".format(t1 - t0))
with tf.Session() as sess:
five_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_five_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Exercise:** try again reusing just four hidden layers instead of five. Can you achieve a higher precision?<br>
# Let's load the best model again, but this time we will create a new softmax output layer on top of the 4th hidden layer:
#
# In[62]:
tf.reset_default_graph()
n_outputs = 5
restore_saver = tf.train.import_meta_graph("./best_mnist_model_0_to_4.meta")
X = tf.get_default_graph().get_tensor_by_name("X:0")
y = tf.get_default_graph().get_tensor_by_name("y:0")
hidden4_out = tf.get_default_graph().get_tensor_by_name("hidden4_out:0")
logits = tf.layers.dense(hidden4_out, n_outputs, kernel_initializer=he_init, name="new_logits")
Y_proba = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
# And now let's create the training operation. We want to freeze all the layers except for the new output layer:
# In[63]:
learning_rate = 0.01
output_layer_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam2")
training_op = optimizer.minimize(loss, var_list=output_layer_vars)
init = tf.global_variables_initializer()
four_frozen_saver = tf.train.Saver()
# In[65]:
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
restore_saver.restore(sess, "./best_mnist_model_0_to_4")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = four_frozen_saver.save(sess, "./my_mnist_model_5_to_9_four_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# **Still not good!**
# **Exercise:** now unfreeze the top two hidden layers and continue training: can you get the model to perform even better?
# In[66]:
learning_rate = 0.01
unfrozen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|new_logits")
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam3")
training_op = optimizer.minimize(loss, var_list=unfrozen_vars)
init = tf.global_variables_initializer()
two_frozen_saver = tf.train.Saver()
# In[67]:
n_epochs = 1000
batch_size = 16
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
four_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_four_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = two_frozen_saver.save(sess, "./my_mnist_model_5_to_9_two_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
# Let's check what accuracy we can get by unfreezing all layers:
# In[68]:
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam4")
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
no_frozen_saver = tf.train.Saver()
# In[69]:
n_epochs = 1000
batch_size = 20
max_checks_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
with tf.Session() as sess:
init.run()
two_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_two_frozen")
for epoch in range(n_epochs):
rnd_idx = np.random.permutation(len(X_train2))
for rnd_indices in np.array_split(rnd_idx, len(X_train2) // batch_size):
X_batch, y_batch = X_train2[rnd_indices], y_train2[rnd_indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
loss_val, acc_val = sess.run([loss, accuracy], feed_dict={X: X_valid2, y: y_valid2})
if loss_val < best_loss:
save_path = no_frozen_saver.save(sess, "./my_mnist_model_5_to_9_no_frozen")
best_loss = loss_val
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
break
print("{}\tValidation loss: {:.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(
epoch, loss_val, best_loss, acc_val * 100))
with tf.Session() as sess:
no_frozen_saver.restore(sess, "./my_mnist_model_5_to_9_no_frozen")
acc_test = accuracy.eval(feed_dict={X: X_test2, y: y_test2})
print("Final test accuracy: {:.2f}%".format(acc_test * 100))
#
#
# Let's compare that to a DNN trained from scratch:
#
# In[70]:
dnn_clf_5_to_9 = DNNClassifier(n_hidden_layers=4, random_state=42)
dnn_clf_5_to_9.fit(X_train2, y_train2, n_epochs=1000, X_valid=X_valid2, y_valid=y_valid2)
# In[71]:
y_pred = dnn_clf_5_to_9.predict(X_test2)
accuracy_score(y_test2, y_pred)
# Here due to less training data transfer learning is failing, we need to do osme tweaks to it to make it more accurate.
|
{"hexsha": "7b9328e9ce0f8b99dfc03c50f9f3441069c3d3c4", "size": 72074, "ext": "py", "lang": "Python", "max_stars_repo_path": "DNN on MNIST /mnist_classifcation.py", "max_stars_repo_name": "DhruvAwasthi/ModelsCollection", "max_stars_repo_head_hexsha": "80ab3ada2d5cb23cce7a3db23be1ec1dc14d8733", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-22T16:52:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-03T15:57:14.000Z", "max_issues_repo_path": "DNN on MNIST /mnist_classifcation.py", "max_issues_repo_name": "DhruvAwasthi/ModelsCollection", "max_issues_repo_head_hexsha": "80ab3ada2d5cb23cce7a3db23be1ec1dc14d8733", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DNN on MNIST /mnist_classifcation.py", "max_forks_repo_name": "DhruvAwasthi/ModelsCollection", "max_forks_repo_head_hexsha": "80ab3ada2d5cb23cce7a3db23be1ec1dc14d8733", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9830254618, "max_line_length": 588, "alphanum_fraction": 0.6998224048, "include": true, "reason": "import numpy", "num_tokens": 18193}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from dps import cfg
from dps.utils import Param
from dps.utils.tf import build_scheduled_value, RenderHook, tf_mean_sum
from auto_yolo.models.core import (
VariationalAutoencoder, normal_vae, mAP, xent_loss,
concrete_binary_pre_sigmoid_sample, concrete_binary_sample_kl)
# ------ transformer.py -------
def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f-1.001) / 2.0
y = (y + 1.0)*(height_f-1.001) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(theta, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
num_channels = tf.shape(input_dim)[3]
theta = tf.reshape(theta, (-1, 2, 3))
theta = tf.cast(theta, 'float32')
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = out_size[0]
out_width = out_size[1]
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch]))
grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.matmul(theta, grid)
x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
x_s_flat = tf.reshape(x_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_size)
output = tf.reshape(
input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))
return output
with tf.variable_scope(name):
output = _transform(theta, U, out_size)
return output
class AIR_AP(object):
keys_accessed = "scale shift predicted_n_digits annotations n_annotations"
def __init__(self, iou_threshold=None):
if iou_threshold is not None:
try:
iou_threshold = list(iou_threshold)
except (TypeError, ValueError):
iou_threshold = [float(iou_threshold)]
self.iou_threshold = iou_threshold
def __call__(self, _tensors, updater):
network = updater.network
w, h = np.split(_tensors['scale'], 2, axis=2)
x, y = np.split(_tensors['shift'], 2, axis=2)
predicted_n_digits = _tensors['predicted_n_digits']
annotations = _tensors["annotations"]
n_annotations = _tensors["n_annotations"]
batch_size = w.shape[0]
transformed_x = 0.5 * (x + 1.)
transformed_y = 0.5 * (y + 1.)
height = h * network.image_height
width = w * network.image_width
top = network.image_height * transformed_y - height / 2
left = network.image_width * transformed_x - width / 2
bottom = top + height
right = left + width
ground_truth_boxes = []
predicted_boxes = []
for idx in range(batch_size):
_a = [
[0, *rest]
for (valid, cls, *rest), _
in zip(annotations[idx], range(n_annotations[idx]))
if valid]
ground_truth_boxes.append(_a)
_predicted_boxes = []
for t in range(predicted_n_digits[idx]):
_predicted_boxes.append(
[0, 1,
top[idx, t, 0],
bottom[idx, t, 0],
left[idx, t, 0],
right[idx, t, 0]])
predicted_boxes.append(_predicted_boxes)
return mAP(
predicted_boxes, ground_truth_boxes, n_classes=1,
iou_threshold=self.iou_threshold)
class AIR_Network(VariationalAutoencoder):
max_time_steps = Param()
run_all_time_steps = Param(help="If true, always run for `max_time_steps` and don't predict `z_pres`")
object_shape = Param()
scale_prior_mean = Param()
scale_prior_std = Param()
shift_prior_mean = Param()
shift_prior_std = Param()
z_pres_prior_log_odds = Param()
z_pres_temperature = Param()
stopping_threshold = Param()
training_wheels = Param()
kl_weight = Param()
difference_air = Param()
complete_rnn_input = Param()
image_encoder = None
cell = None
output_network = None
object_encoder = None
object_decoder = None
def __init__(self, env, updater, scope=None, **kwargs):
super(AIR_Network, self).__init__(env, updater, scope=scope, **kwargs)
ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
self.eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AIR_AP(v) for v in ap_iou_values}
self.eval_funcs["AP"] = AIR_AP(ap_iou_values)
self.training_wheels = build_scheduled_value(self.training_wheels, "training_wheels")
self.scale_prior_mean = build_scheduled_value(self.scale_prior_mean, "scale_prior_mean")
self.scale_prior_std = build_scheduled_value(self.scale_prior_std, "scale_prior_std")
self.shift_prior_mean = build_scheduled_value(self.shift_prior_mean, "shift_prior_mean")
self.shift_prior_std = build_scheduled_value(self.shift_prior_std, "shift_prior_std")
self.z_pres_prior_log_odds = build_scheduled_value(self.z_pres_prior_log_odds, "z_pres_prior_log_odds")
def apply_training_wheel(self, signal):
return (
self.training_wheels * tf.stop_gradient(signal)
+ (1-self.training_wheels) * signal)
def apply_fixed_value(self, key, signal):
value = self.fixed_values.get(key, None)
if value is not None:
return value * tf.ones_like(signal)
else:
return signal
def build_representation(self):
# --- process input ---
if self.image_encoder is None:
self.image_encoder = cfg.build_image_encoder(scope="image_encoder")
if "image_encoder" in self.fixed_weights:
self.image_encoder.fix_variables()
if self.cell is None:
self.cell = cfg.build_cell(scope="cell")
if "cell" in self.fixed_weights:
self.cell.fix_variables()
if self.output_network is None:
self.output_network = cfg.build_output_network(scope="output_network")
if "output" in self.fixed_weights:
self.output_network.fix_variables()
if self.object_encoder is None:
self.object_encoder = cfg.build_object_encoder(scope="object_encoder")
if "object_encoder" in self.fixed_weights:
self.object_encoder.fix_variables()
if self.object_decoder is None:
self.object_decoder = cfg.build_object_decoder(scope="object_decoder")
if "object_decoder" in self.fixed_weights:
self.object_decoder.fix_variables()
self.target_n_digits = self._tensors["n_valid_annotations"]
if not self.difference_air:
encoded_inp = self.image_encoder(
self._tensors["inp"], 0, self.is_training)
self.encoded_inp = tf.layers.flatten(encoded_inp)
# --- condition of while-loop ---
def cond(step, stopping_sum, *_):
return tf.logical_and(
tf.less(step, self.max_time_steps),
tf.reduce_any(tf.less(stopping_sum, self.stopping_threshold))
)
# --- body of while-loop ---
def body(step, stopping_sum, prev_state,
running_recon, kl_loss, running_digits,
scale_ta, scale_kl_ta, scale_std_ta,
shift_ta, shift_kl_ta, shift_std_ta,
attr_ta, attr_kl_ta, attr_std_ta,
z_pres_ta, z_pres_probs_ta, z_pres_kl_ta,
vae_input_ta, vae_output_ta,
scale, shift, attr, z_pres):
if self.difference_air:
inp = (
self._tensors["inp"]
- tf.reshape(running_recon, (self.batch_size, *self.obs_shape))
)
encoded_inp = self.image_encoder(inp, 0, self.is_training)
encoded_inp = tf.layers.flatten(encoded_inp)
else:
encoded_inp = self.encoded_inp
if self.complete_rnn_input:
rnn_input = tf.concat([encoded_inp, scale, shift, attr, z_pres], axis=1)
else:
rnn_input = encoded_inp
hidden_rep, next_state = self.cell(rnn_input, prev_state)
outputs = self.output_network(hidden_rep, 9, self.is_training)
(scale_mean, scale_log_std,
shift_mean, shift_log_std,
z_pres_log_odds) = tf.split(outputs, [2, 2, 2, 2, 1], axis=1)
# --- scale ---
scale_std = tf.exp(scale_log_std)
scale_mean = self.apply_fixed_value("scale_mean", scale_mean)
scale_std = self.apply_fixed_value("scale_std", scale_std)
scale_logits, scale_kl = normal_vae(
scale_mean, scale_std, self.scale_prior_mean, self.scale_prior_std)
scale_kl = tf.reduce_sum(scale_kl, axis=1, keepdims=True)
scale = tf.nn.sigmoid(tf.clip_by_value(scale_logits, -10, 10))
# --- shift ---
shift_std = tf.exp(shift_log_std)
shift_mean = self.apply_fixed_value("shift_mean", shift_mean)
shift_std = self.apply_fixed_value("shift_std", shift_std)
shift_logits, shift_kl = normal_vae(
shift_mean, shift_std, self.shift_prior_mean, self.shift_prior_std)
shift_kl = tf.reduce_sum(shift_kl, axis=1, keepdims=True)
shift = tf.nn.tanh(tf.clip_by_value(shift_logits, -10, 10))
# --- Extract windows from scene ---
w, h = scale[:, 0:1], scale[:, 1:2]
x, y = shift[:, 0:1], shift[:, 1:2]
theta = tf.concat([w, tf.zeros_like(w), x, tf.zeros_like(h), h, y], axis=1)
theta = tf.reshape(theta, (-1, 2, 3))
vae_input = transformer(self._tensors["inp"], theta, self.object_shape)
# This is a necessary reshape, as the output of transformer will have unknown dims
vae_input = tf.reshape(vae_input, (self.batch_size, *self.object_shape, self.image_depth))
# --- Apply Object-level VAE (object encoder/object decoder) to windows ---
attr = self.object_encoder(vae_input, 2*self.A, self.is_training)
attr_mean, attr_log_std = tf.split(attr, 2, axis=1)
attr_std = tf.exp(attr_log_std)
attr, attr_kl = normal_vae(attr_mean, attr_std, self.attr_prior_mean, self.attr_prior_std)
attr_kl = tf.reduce_sum(attr_kl, axis=1, keepdims=True)
vae_output = self.object_decoder(
attr, self.object_shape[0] * self.object_shape[1] * self.image_depth, self.is_training)
vae_output = tf.nn.sigmoid(tf.clip_by_value(vae_output, -10, 10))
# --- Place reconstructed objects in image ---
theta_inverse = tf.concat([1. / w, tf.zeros_like(w), -x / w, tf.zeros_like(h), 1. / h, -y / h], axis=1)
theta_inverse = tf.reshape(theta_inverse, (-1, 2, 3))
vae_output_transformed = transformer(
tf.reshape(vae_output, (self.batch_size, *self.object_shape, self.image_depth,)),
theta_inverse, self.obs_shape[:2]
)
vae_output_transformed = tf.reshape(
vae_output_transformed, [self.batch_size, self.image_height * self.image_width * self.image_depth])
# --- z_pres ---
if self.run_all_time_steps:
z_pres = tf.ones_like(z_pres_log_odds)
z_pres_prob = tf.ones_like(z_pres_log_odds)
z_pres_kl = tf.zeros_like(z_pres_log_odds)
else:
z_pres_log_odds = tf.clip_by_value(z_pres_log_odds, -10, 10)
z_pres_pre_sigmoid = concrete_binary_pre_sigmoid_sample(
z_pres_log_odds, self.z_pres_temperature
)
z_pres = tf.nn.sigmoid(z_pres_pre_sigmoid)
z_pres = (
self.float_is_training * z_pres
+ (1 - self.float_is_training) * tf.round(z_pres)
)
z_pres_prob = tf.nn.sigmoid(z_pres_log_odds)
z_pres_kl = concrete_binary_sample_kl(
z_pres_pre_sigmoid,
self.z_pres_prior_log_odds, self.z_pres_temperature,
z_pres_log_odds, self.z_pres_temperature
)
stopping_sum += (1.0 - z_pres)
alive = tf.less(stopping_sum, self.stopping_threshold)
running_digits += tf.to_int32(alive)
# --- adjust reconstruction ---
running_recon += tf.where(
tf.tile(alive, (1, vae_output_transformed.shape[1])),
z_pres * vae_output_transformed, tf.zeros_like(running_recon)
)
# --- add kl to loss ---
kl_loss += tf.where(
alive, scale_kl, tf.zeros_like(kl_loss)
)
kl_loss += tf.where(
alive, shift_kl, tf.zeros_like(kl_loss)
)
kl_loss += tf.where(
alive, attr_kl, tf.zeros_like(kl_loss)
)
kl_loss += tf.where(
alive, z_pres_kl, tf.zeros_like(kl_loss)
)
# --- record values ---
scale_ta = scale_ta.write(scale_ta.size(), scale)
scale_kl_ta = scale_kl_ta.write(scale_kl_ta.size(), scale_kl)
scale_std_ta = scale_std_ta.write(scale_std_ta.size(), scale_std)
shift_ta = shift_ta.write(shift_ta.size(), shift)
shift_kl_ta = shift_kl_ta.write(shift_kl_ta.size(), shift_kl)
shift_std_ta = shift_std_ta.write(shift_std_ta.size(), shift_std)
attr_ta = attr_ta.write(attr_ta.size(), attr)
attr_kl_ta = attr_kl_ta.write(attr_kl_ta.size(), attr_kl)
attr_std_ta = attr_std_ta.write(attr_std_ta.size(), attr_std)
vae_input_ta = vae_input_ta.write(vae_input_ta.size(), tf.layers.flatten(vae_input))
vae_output_ta = vae_output_ta.write(vae_output_ta.size(), vae_output)
z_pres_ta = z_pres_ta.write(z_pres_ta.size(), z_pres)
z_pres_probs_ta = z_pres_probs_ta.write(z_pres_probs_ta.size(), z_pres_prob)
z_pres_kl_ta = z_pres_kl_ta.write(z_pres_kl_ta.size(), z_pres_kl)
return (
step + 1, stopping_sum, next_state,
running_recon, kl_loss, running_digits,
scale_ta, scale_kl_ta, scale_std_ta,
shift_ta, shift_kl_ta, shift_std_ta,
attr_ta, attr_kl_ta, attr_std_ta,
z_pres_ta, z_pres_probs_ta, z_pres_kl_ta,
vae_input_ta, vae_output_ta,
scale, shift, attr, z_pres,
)
# --- end of while-loop body ---
rnn_init_state = self.cell.zero_state(self.batch_size, tf.float32)
(_, _, _, reconstruction, kl_loss, self.predicted_n_digits,
scale, scale_kl, scale_std, shift, shift_kl, shift_std,
attr, attr_kl, attr_std, z_pres, z_pres_probs, z_pres_kl,
vae_input, vae_output, _, _, _, _) = tf.while_loop(
cond, body, [
tf.constant(0), # RNN time step, initially zero
tf.zeros((self.batch_size, 1)), # running sum of z_pres samples
rnn_init_state, # initial RNN state
tf.zeros((self.batch_size, np.product(self.obs_shape))), # reconstruction canvas, initially empty
tf.zeros((self.batch_size, 1)), # running value of the loss function
tf.zeros((self.batch_size, 1), dtype=tf.int32), # running inferred number of digits
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True),
tf.zeros((self.batch_size, 2)), # scale
tf.zeros((self.batch_size, 2)), # shift
tf.zeros((self.batch_size, self.A)), # attr
tf.zeros((self.batch_size, 1)), # z_pres
]
)
def process_tensor_array(tensor_array, name, shape=None):
tensor = tf.transpose(tensor_array.stack(), (1, 0, 2))
time_pad = self.max_time_steps - tf.shape(tensor)[1]
padding = [[0, 0], [0, time_pad]]
padding += [[0, 0]] * (len(tensor.shape)-2)
tensor = tf.pad(tensor, padding, name=name)
if shape is not None:
tensor = tf.reshape(tensor, shape)
return tensor
self.predicted_n_digits = self.predicted_n_digits[:, 0]
self._tensors["predicted_n_digits"] = self.predicted_n_digits
self._tensors['scale'] = process_tensor_array(scale, 'scale')
self._tensors['scale_kl'] = process_tensor_array(scale_kl, 'scale_kl')
self._tensors['scale_std'] = process_tensor_array(scale_std, 'scale_std')
self._tensors['shift'] = process_tensor_array(shift, 'shift')
self._tensors['shift_kl'] = process_tensor_array(shift_kl, 'shift_kl')
self._tensors['shift_std'] = process_tensor_array(shift_std, 'shift_std')
self._tensors['attr'] = process_tensor_array(attr, 'attr', (self.batch_size, self.max_time_steps, self.A))
self._tensors['attr_kl'] = process_tensor_array(attr_kl, 'attr_kl')
self._tensors['attr_std'] = process_tensor_array(attr_std, 'attr_std')
self._tensors['z_pres'] = process_tensor_array(z_pres, 'z_pres', (self.batch_size, self.max_time_steps, 1))
self._tensors['obj'] = tf.round(self._tensors['z_pres']) # for `build_math_representation`
self._tensors['z_pres_probs'] = process_tensor_array(z_pres_probs, 'z_pres_probs')
self._tensors['z_pres_kl'] = process_tensor_array(z_pres_kl, 'z_pres_kl')
self._tensors['vae_input'] = process_tensor_array(vae_input, 'vae_input')
self._tensors['vae_output'] = process_tensor_array(vae_output, 'vae_output')
reconstruction = tf.clip_by_value(reconstruction, 0.0, 1.0)
flat_inp = tf.layers.flatten(self._tensors["inp"])
self._tensors['per_pixel_reconstruction_loss'] = xent_loss(pred=reconstruction, label=flat_inp)
self.losses.update(
reconstruction=tf_mean_sum(self._tensors['per_pixel_reconstruction_loss']),
running=self.kl_weight * tf.reduce_mean(kl_loss),
)
self._tensors['output'] = tf.reshape(reconstruction, (self.batch_size,) + self.obs_shape)
count_error = 1 - tf.to_float(tf.equal(self.target_n_digits, self.predicted_n_digits))
count_1norm = tf.abs(self.target_n_digits - self.predicted_n_digits)
self.record_tensors(
predicted_n_digits=self.predicted_n_digits,
count_error=count_error,
count_1norm=count_1norm,
scale=self._tensors["scale"],
x=self._tensors["shift"][:, :, 0],
y=self._tensors["shift"][:, :, 1],
z_pres_prob=self._tensors["z_pres_probs"],
z_pres_kl=self._tensors["z_pres_kl"],
scale_kl=self._tensors["scale_kl"],
shift_kl=self._tensors["shift_kl"],
attr_kl=self._tensors["attr_kl"],
scale_std=self._tensors["scale_std"],
shift_std=self._tensors["shift_std"],
attr_std=self._tensors["attr_std"],
)
class AIR_RenderHook(RenderHook):
fetches = (
"inp annotations n_annotations output scale shift "
"predicted_n_digits vae_input vae_output background")
def __call__(self, updater):
fetched = self._fetch(updater)
self._plot_reconstruction(updater, fetched)
def _plot_reconstruction(self, updater, fetched):
network = updater.network
inp = fetched['inp'].reshape(self.N, *network.obs_shape)
output = fetched['output'].reshape(self.N, *network.obs_shape)
object_shape = network.object_shape
vae_input = fetched['vae_input'].reshape(
self.N, network.max_time_steps, *object_shape, network.image_depth)
vae_output = fetched['vae_output'].reshape(
self.N, network.max_time_steps, *object_shape, network.image_depth)
# background = fetched['background']
scale = fetched['scale'].reshape(self.N, network.max_time_steps, 2)
shift = fetched['shift'].reshape(self.N, network.max_time_steps, 2)
predicted_n_digits = fetched['predicted_n_digits']
annotations = fetched["annotations"]
n_annotations = fetched["n_annotations"]
color_order = plt.rcParams['axes.prop_cycle'].by_key()['color']
max_n_digits = max(predicted_n_digits)
fig_width = 30
fig, axes = plt.subplots(
max_n_digits + 1, 2 * self.N,
figsize=(fig_width, (max_n_digits+1) / (2*self.N) * fig_width))
for i in range(self.N):
ax_gt = axes[0, 2*i]
self.imshow(ax_gt, inp[i])
ax_gt.set_axis_off()
ax_rec = axes[0, 2*i+1]
self.imshow(ax_rec, output[i])
ax_rec.set_axis_off()
# Plot true bounding boxes
for j in range(n_annotations[i]):
valid, _, t, b, l, r = annotations[i][j]
if not valid:
continue
h = b - t
w = r - l
rect = patches.Rectangle(
(l, t), w, h, linewidth=1, edgecolor="white", facecolor='none')
ax_gt.add_patch(rect)
rect = patches.Rectangle(
(l, t), w, h, linewidth=1, edgecolor="white", facecolor='none')
ax_rec.add_patch(rect)
for t in range(max_n_digits):
axes[t+1, 2*i].set_axis_off()
axes[t+1, 2*i+1].set_axis_off()
if t >= predicted_n_digits[i]:
axes[t+1, 2*i].set_aspect('equal')
axes[t+1, 2*i+1].set_aspect('equal')
continue
w, h = scale[i, t, :]
x, y = shift[i, t, :]
transformed_x = 0.5 * (x + 1.)
transformed_y = 0.5 * (y + 1.)
height = h * network.image_height
width = w * network.image_width
top = network.image_height * transformed_y - height / 2
left = network.image_width * transformed_x - width / 2
rect = patches.Rectangle(
(left, top), width, height, linewidth=1, edgecolor=color_order[t], facecolor='none')
ax_rec.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=1, edgecolor=color_order[t], facecolor='none')
ax_gt.add_patch(rect)
ax = axes[t+1, 2*i]
self.imshow(ax, vae_input[i, t])
ax.set_ylabel("t={}".format(t))
obj_rect = patches.Rectangle(
(1, 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color_order[t])
ax.add_patch(obj_rect)
ax = axes[t+1, 2*i+1]
self.imshow(ax, vae_output[i, t])
plt.subplots_adjust(left=0.01, right=.99, top=.99, bottom=0.01, wspace=0.14, hspace=0.1)
self.savefig("sampled_reconstruction", fig, updater)
class AIR_ComparisonRenderHook(AIR_RenderHook):
def _plot_reconstruction(self, updater, fetched):
network = updater.network
inp = fetched['inp'].reshape(self.N, *network.obs_shape)
scale = fetched['scale'].reshape(self.N, network.max_time_steps, 2)
shift = fetched['shift'].reshape(self.N, network.max_time_steps, 2)
predicted_n_digits = fetched['predicted_n_digits']
color = "xkcd:azure"
for i in range(self.N):
fig = plt.figure(figsize=(5, 5))
ax = plt.gca()
self.imshow(ax, inp[i])
ax.set_axis_off()
for t in range(predicted_n_digits[i]):
w, h = scale[i, t, :]
x, y = shift[i, t, :]
transformed_x = 0.5 * (x + 1.)
transformed_y = 0.5 * (y + 1.)
height = h * network.image_height
width = w * network.image_width
top = network.image_height * transformed_y - height / 2
left = network.image_width * transformed_x - width / 2
rect = patches.Rectangle(
(left, top), width, height, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(rect)
plt.subplots_adjust(left=.01, right=.99, top=.99, bottom=0.01, wspace=0.1, hspace=0.1)
self.savefig("ground_truth/" + str(i), fig, updater, is_dir=False)
|
{"hexsha": "880f3dcc4d3c2cb88ae5fec8e7c4b249e47e0482", "size": 31720, "ext": "py", "lang": "Python", "max_stars_repo_path": "auto_yolo/models/air.py", "max_stars_repo_name": "cvoelcker/auto_yolo", "max_stars_repo_head_hexsha": "9137ca48a0413d347b1cb97947079d2cea1d25a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "auto_yolo/models/air.py", "max_issues_repo_name": "cvoelcker/auto_yolo", "max_issues_repo_head_hexsha": "9137ca48a0413d347b1cb97947079d2cea1d25a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auto_yolo/models/air.py", "max_forks_repo_name": "cvoelcker/auto_yolo", "max_forks_repo_head_hexsha": "9137ca48a0413d347b1cb97947079d2cea1d25a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3049555273, "max_line_length": 115, "alphanum_fraction": 0.5792559899, "include": true, "reason": "import numpy", "num_tokens": 7625}
|
using Test
using CVChannel
"""
This script verifies that the channels formed from axisymmetric states are multiplicative
for qubits and qutrits.
"""
println("Verifying qubit multiplicativity of axisymmetric channels")
@testset "qubit multiplicativity of axisymmetric channels" begin
y_step = 0.1
x_step = 0.1
d = 2
y_bounds = CVChannel._axisymmetric_y_bounds(d)
it = 0
y_range = y_bounds[1]:y_step:y_bounds[2]
for y in y_range
it = it + 1
println("verifying y-slice $it of $(length(y_range))")
x_constraints = CVChannel._axisymmetric_x_constraints(d,y)
for x in x_constraints[1]:x_step:x_constraints[2]
@testset "(x,y) = ($x,$y)" begin
ρ_axi = axisymmetricState(d,x,y)
J_N = d*ρ_axi
(opt_cv_N, opt_σAB_N) = pptCVDual(J_N, d, d)
J_NN = permuteSubsystems(kron(J_N,J_N), [1,3,2,4], [d,d,d,d])
(opt_cv_NN, opt_σAB_NN) = pptCVPrimal(J_NN, d^2, d^2)
multiplicativity = opt_cv_NN - opt_cv_N^2
@test multiplicativity ≈ 0 atol=2e-5
end
end
end
end
println("Verifying qutrit multiplicativity of axisymmetric channels")
@time @testset "qutrit multiplicativity of axisymmetric channels" begin
y_step = 0.1
x_step = 0.1
d = 3
y_bounds = CVChannel._axisymmetric_y_bounds(d)
y_range = y_bounds[1]:y_step:y_bounds[2]
it = 0
for y in y_range
it = it + 1
println("verifying y-slice $it of $(length(y_range))")
x_constraints = CVChannel._axisymmetric_x_constraints(d,y)
for x in x_constraints[1]:x_step:x_constraints[2]
@testset "(x,y) = ($x,$y)" begin
ρ_axi = axisymmetricState(d,x,y)
J_N = d*ρ_axi
(opt_cv_N, opt_σAB_N) = pptCVDual(J_N, d, d)
J_NN = permuteSubsystems(kron(J_N,J_N), [1,3,2,4], [d,d,d,d])
(opt_cv_NN, opt_σAB_NN) = pptCVPrimal(J_NN, d^2, d^2)
multiplicativity = opt_cv_NN - opt_cv_N^2
@test multiplicativity ≈ 0 atol=2e-5
end
end
end
end
|
{"hexsha": "e655ce4258e9bba235be64eee894e960ec433f55", "size": 2169, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "script/verify/multiplicativity-of-qutrit-axisymmetric-channels.jl", "max_stars_repo_name": "ChitambarLab/CVChannel.jl", "max_stars_repo_head_hexsha": "479fa1e70d19b5434137f9017d99830796802d87", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/verify/multiplicativity-of-qutrit-axisymmetric-channels.jl", "max_issues_repo_name": "ChitambarLab/CVChannel.jl", "max_issues_repo_head_hexsha": "479fa1e70d19b5434137f9017d99830796802d87", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-21T00:29:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-15T00:35:15.000Z", "max_forks_repo_path": "script/verify/multiplicativity-of-qutrit-axisymmetric-channels.jl", "max_forks_repo_name": "ChitambarLab/cv-channel", "max_forks_repo_head_hexsha": "479fa1e70d19b5434137f9017d99830796802d87", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7123287671, "max_line_length": 89, "alphanum_fraction": 0.5984324574, "num_tokens": 668}
|
\documentclass[11pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{amsthm} %numéroter les questions
\usepackage[english]{babel}
\usepackage{datetime}
\usepackage{xspace} % typographie IN
\usepackage{hyperref}% hyperliens
\usepackage[all]{hypcap} %lien pointe en haut des figures
\usepackage[french]{varioref} %voir x p y
\usepackage{fancyhdr}% en têtes
%\input cyracc.def
\usepackage[]{graphicx} %include pictures
\usepackage{pgfplots}
\usepackage[]{circuitikz}
\usepackage{ifthen}
\usepackage[top=1.3 in, bottom=1.3 in, left=1.3 in, right=1.3 in]{geometry} % Yeah, that's bad to play with margins
\usepackage[]{pdfpages}
\usepackage[]{attachfile}
\usepackage{float}
\usepackage{subfig}
\usepackage{todonotes} % \missingfigure
\usepackage{gensymb} % \ohm
\usepackage{framed}
\newdateformat{mydate}{v1.0.0}%hack pour remplacer \THEYEAR
\newboolean{corrige}
\ifx\correction\undefined
\setboolean{corrige}{false}% pas de corrigé
\else
\setboolean{corrige}{true}%corrigé
\fi
%\setboolean{corrige}{false}% pas de corrigé
\newboolean{annexes}
\setboolean{annexes}{true}%annexes
%\setboolean{annexes}{false}% pas de annexes
\definecolor{darkblue}{rgb}{0,0,0.5}
\newboolean{mos}
%\setboolean{mos}{true}%annexes
\setboolean{mos}{false}% pas de annexes
\usepackage{aeguill} %guillemets
%% fancy header & foot
\pagestyle{fancy}
%Numero du TP :
\def \labonumber {Projet -- Part 3}
\lhead{[ELEC-H-310] Choucroute numérique\\ \labonumber}
\rhead{\mydate\today\\ page \thepage}
\chead{\ifthenelse{\boolean{corrige}}{Corrigé}{}}
\cfoot{}
%%
\pdfinfo{
/Author (Quentin Delhaye, Ken Hasselmann, ULB -- BEAMS)
/Title (\labonumber ELEC-H-310)
/ModDate (D:\pdfdate)
}
\hypersetup{
pdftitle={\labonumber [ELEC-H-310] Choucroute numérique},
pdfauthor={Quentin Delhaye, Ken Hasselmann, ULB -- BEAMS},
pdfsubject={}
}
\theoremstyle{definition}% questions pas en italique
\newtheorem{Q}{Question}[] % numéroter les questions [section] ou non []
\newcommand{\reponse}[1]{% pour intégrer une réponse : \reponse{texte} : sera inclus si \boolean{corrige}
\ifthenelse {\boolean{corrige}} {\paragraph{Réponse :} \color{darkblue} #1\color{black}} {}
}
\newcommand{\addcontentslinenono}[4]{\addtocontents{#1}{\protect\contentsline{#2}{#3}{#4}{}}}
\date{\vspace{-1.7cm}\mydate\today}
\title{\vspace{-2cm}\labonumber\\ Électronique numérique [ELEC-H-310]\\Conception d'une régulation de refroidissement: \\ interactions avec l'utilisateur\ifthenelse{\boolean{corrige}}{~\\Corrigé}{}}
\title{\vspace{-2cm}\labonumber \\ Digital electronics [ELEC-H-310]\\Design of a cooling control system: \\ interactions with user\ifthenelse{\boolean{corrige}}{~\\Corrigé}{}}
%\author{\vspace{-1cm}}%\textsc{Yannick Allard}}
\setlength{\parskip}{0.2cm plus2mm minus1mm} %espacement entre §
\setlength{\parindent}{0pt}
\begin{document}
\pagestyle{empty}
\maketitle
% \vspace*{-1cm}
% ######## ## ## ##########
% ## ## ## ## ##
% ## ## ## ## ##
% ######## ## ## ##
% ## ## ## ## ##
% ## ## ## ## ##
% ######## ####### ##
\section*{But de la manipulation}
During three laboratory sessions, you will have to design a small cooling control system based on a propeller fan.
You will also have to be able to interact locally (keyboard) with this system.
During this third lab session, you will add two interaction modes between the temperature control system and the user: either locally with a keyboard, or remotely through a serial port.
At the end of this lab session, you should finish your whole cooling control system.
\section*{Prerequisite}
Before entering in the lab, you have to read the project specifications defined in the document ``Design of a temperature regulation system".
\section*{Objectifs}
At the end of this lab session, you'll be able to:
\begin{itemize}
\item Design and implement a serial connection between two processors and to explain how it works.
\item To make numerous peripheral devices communicate with each other.
\end{itemize}
\newpage
% ######## ## ## ########## ######## #####
% ## ### ## ## ## ## ## ##
% ## ## ## ## ## ## ## ## ##
% ## ## ## ## ## ######## ## ##
% ## ## ## ## ## ## ## ## ##
% ## ## ### ## ## ## ## ##
% ######## ## ## ## ## ## #####
\section{Introduction}
During three laboratory sessions, you will have to design a small cooling control system based on a propeller fan.
You will also have to be able to interact locally (keyboard) with this system.
During this lab session, you will first build a way to interact with the keyboard of the extension PCB.
Depending on the character string in input, different changes will have to be forecast in the operating of the PCB (ex: change of the desired temperature, of the sampling period, ...).
Then, you will ensure than similar changes could be done... remotely!
A PC, connected to the processor through a serial cable, will play the role of the control room allowing you to interact with the processes without being physically present.
% ####### ## ### ## ## ######## ######### ########
% ## ## ## ## ## ## ## ## ## ## ##
% ## ## ## ## ## ## ## ## ## ##
% ## ## ## ## ## ## ## ###### ########
% ## ## ######### ## ## ## ## ## ##
% ## ## ## ## ## ## ## ## ## ## ##
% ####### ######### ## ## ### ######## ######### ## ##
\section{Keyboard interfacing}
\begin{figure}[H]
\center
\includegraphics[width=0.3\textwidth]{utilisateur}
\caption{Interaction netween the user and the system.}
\label{fig:user}
\end{figure}
As explained in the coding guide, the keyboard is connected to generic I/O pins of the $\mu$C.
A function allowing to receive the pressed key will be provided.
\begin{itemize}
\item Thanks to the coding guide, explain the general principal of this ``matrix" keyboard.
\item During the initializing phase of your code, set correctly the I/O pins connected to the keyboard.
\item A function allowing you to read the pressed key is given.
If the key is effectively pressed, it sents the corresponding character (‘0’ to ‘9’ or ‘A’ to ‘F’).
Otherwise, it sents the value ‘z’.
\item Look over the code of this function, and explain how it works.
\end{itemize}
Now that the keyboard is working, you can write the function allowing to modify the behaviour of your temperature control center depending on which keys are pressed.
The list of the possible commands is given in the global project specifications.
\begin{itemize}
\item The function allowing you to read the pressed keys requires a large amount of instructions.
Where do you have to call this function in order to avoid blocking the processor uselessly?
\item When the user is typing on the keyboard, the LCD has to display the pressed key.
Once the command is entered or cancelled, the LCD has to display the temperature again.
\end{itemize}
% ####### ######### ######## ######## #########
% ## ## ## ## ## ## ##
% ## ## ## ## ## ##
% ####### ###### ######## ## ######
% ## ## ## ## ## ##
% ## ## ## ## ## ## ##
% ####### ######### ## ## ######## #########
\section{Serial connection}
In this part, you will send data to the PC through the serial port in order to follow the temperature evolution, as well as to receive the commands from the PC to adjust the temperature control.
The last module to set is the serial port, allowing to communicate remotely with a PC.
This module is named UART (Universal Asynchronous Receiver / Transmitter).
Before designing this communication, you will have to set the dsPIC and the PC in order to make them ``speak the same language".
We will start by the dsPIC setup.
\begin{itemize}
\item Thanks to the coding guide, configure the UART such that the communication respects the following format:
\begin{itemize}
\item Baudrate: 9600
\item Data bits: 8
\item No parity bit
\item Only one stop bit
\item No stream control
\end{itemize}
\item Give the meaning of those parameters.
\end{itemize}
\subsection{Sending data to the PC}
To send the data,
Pour envoyer des données, refer to the code \texttt{initUART} given as an example and to the coding guide.
As mentioned in the project specifications, you have to sent five characters for each temperature measure: ``42.5\textbackslash n", where ``\textbackslash n" is the character for line break.
To display the data on the PC, you just have the execute the script \texttt{graph.py}.
\subsection{Sending commands from the PC}
On the PC, open the program \texttt{putty}, and set the same format parameters.
Finally, all that remains is to set up the $\mu$C so that it can accept commands coming from the serial port.
\begin{itemize}
\item Write a routine allowing to read the last character received.
Justify why this routine must be called with an interruption.
\item Check if you $\mu$C is capable of receiving a character coming from the PC.
\item Complete your routine in order to make your system behave as described in the global project specifications.
\end{itemize}
% ####### ## ## ####### ########## ##### ## ##
% ## ## ## ## ## ## ## ## ## ### ###
% ## ## ## ## ## ## ## ## ## ## ##
% ## ## ## ####### ## ## ## ## ### ##
% ## ## ## ## ## ## ## ## ##
% ## ## ## ## ## ## ## ## ## ## ##
% ####### ####### ####### ## ##### ## ##
\section{Project customization}
Your cooling control system is now functional, you can add different features to enhance it!
\end{document}
|
{"hexsha": "261f5fd5907db3430b964c83fc066b0d59b66278", "size": 10382, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "projet/partie 3/elech310_projet_partie3_eng.tex", "max_stars_repo_name": "qgontie/ELECH310", "max_stars_repo_head_hexsha": "4ff2797a618ddde2ed564c6f504956c4334b7b70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projet/partie 3/elech310_projet_partie3_eng.tex", "max_issues_repo_name": "qgontie/ELECH310", "max_issues_repo_head_hexsha": "4ff2797a618ddde2ed564c6f504956c4334b7b70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projet/partie 3/elech310_projet_partie3_eng.tex", "max_forks_repo_name": "qgontie/ELECH310", "max_forks_repo_head_hexsha": "4ff2797a618ddde2ed564c6f504956c4334b7b70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7224080268, "max_line_length": 198, "alphanum_fraction": 0.6046041225, "num_tokens": 2785}
|
from __future__ import division
import unittest
import numpy as np
from numpy import testing as np_testing
from pax.plugins.peak_processing.BasicProperties import integrate_until_fraction, put_w_in_center_of_field
class TestPeakProperties(unittest.TestCase):
def test_integrate_until_fraction(self):
# Test a simple ones-only waveform, for which no interpolation will be needed
w = np.ones(100, dtype=np.float32)
fractions_desired = np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], dtype=np.float64) / 100
result = np.zeros(len(fractions_desired))
integrate_until_fraction(w, fractions_desired, result)
np_testing.assert_almost_equal(result, fractions_desired * 100, decimal=4)
# Now test a one-sample waveform, which will probe the interpolation stuff
w = np.ones(1, dtype=np.float32)
result = np.zeros(len(fractions_desired))
integrate_until_fraction(w, fractions_desired, result)
np_testing.assert_almost_equal(result, fractions_desired, decimal=4)
def test_store_waveform(self):
field = np.zeros(5)
put_w_in_center_of_field(np.ones(3), field, 0)
np_testing.assert_equal(field, np.array([0, 0, 1, 1, 1]))
field = np.zeros(5)
put_w_in_center_of_field(np.ones(3), field, 1)
np_testing.assert_equal(field, np.array([0, 1, 1, 1, 0]))
field = np.zeros(5)
put_w_in_center_of_field(np.ones(3), field, 2)
np_testing.assert_equal(field, np.array([1, 1, 1, 0, 0]))
# Left overhang
field = np.zeros(5)
put_w_in_center_of_field(np.ones(4), field, 3)
np_testing.assert_equal(field, np.array([1, 1, 1, 0, 0]))
field = np.zeros(5)
put_w_in_center_of_field(np.ones(7), field, 6)
np_testing.assert_equal(field, np.array([1, 1, 1, 0, 0]))
# Right overhang
field = np.zeros(5)
put_w_in_center_of_field(np.ones(4), field, 0)
np_testing.assert_equal(field, np.array([0, 0, 1, 1, 1]))
field = np.zeros(5)
put_w_in_center_of_field(np.ones(7), field, 0)
np_testing.assert_equal(field, np.array([0, 0, 1, 1, 1]))
# Waveform larger than field
field = np.zeros(5)
put_w_in_center_of_field(np.ones(20), field, 10)
np_testing.assert_equal(field, np.array([1, 1, 1, 1, 1]))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "0ddae8c995ed97349e47411d71055dd1d6b8ab29", "size": 2427, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_peak_properties.py", "max_stars_repo_name": "jacr20/pax", "max_stars_repo_head_hexsha": "d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-04-24T12:02:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T19:39:47.000Z", "max_issues_repo_path": "tests/test_peak_properties.py", "max_issues_repo_name": "jacr20/pax", "max_issues_repo_head_hexsha": "d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 300, "max_issues_repo_issues_event_min_datetime": "2016-04-01T15:29:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-03T23:59:45.000Z", "max_forks_repo_path": "tests/test_peak_properties.py", "max_forks_repo_name": "jacr20/pax", "max_forks_repo_head_hexsha": "d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2016-04-14T15:11:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T06:39:09.000Z", "avg_line_length": 37.921875, "max_line_length": 106, "alphanum_fraction": 0.6584260404, "include": true, "reason": "import numpy,from numpy", "num_tokens": 684}
|
from __future__ import print_function
import sys
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torch.nn.init as init
import argparse
import numpy as np
from torch.autograd import Variable
import torch.utils.data as data
from data import VOCroot, COCOroot,VOC_320, AnnotationTransform, COCODetection, VOCDetection, detection_collate, BaseTransform, preproc,VOC_512
#from layers.modules import MultiBoxLoss
from layers.modules import RefineMultiBoxLoss
from layers.functions import Detect,PriorBox
import time
from utils.nms_wrapper import nms
from utils.timer import Timer
import pickle
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Receptive Field Block Net Training')
parser.add_argument('-v', '--version', default='Refine_vgg',
help='Refine_vgg')
parser.add_argument('-s', '--size', default='320',
help='320 or 512 input size.')
parser.add_argument('-d', '--dataset', default='VOC',
help='VOC or COCO dataset')
parser.add_argument(
'--basenet', default='/mnt/lvmhdd1/zuoxin/ssd_pytorch_models/vgg16_reducedfc.pth', help='pretrained base model')
#parser.add_argument(
# '--basenet', default='/mnt/lvmhdd1/zuoxin/ssd_pytorch_models/mb.pth', help='pretrained base model')
parser.add_argument('--jaccard_threshold', default=0.5,
type=float, help='Min Jaccard index for matching')
parser.add_argument('-b', '--batch_size', default=32,
type=int, help='Batch size for training')
parser.add_argument('--num_workers', default=8,
type=int, help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True,
type=bool, help='Use cuda to train model')
parser.add_argument('--gpu_id', default=[0,1], type=int, help='gpus')
parser.add_argument('--lr', '--learning-rate',
default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--resume_net', default=False, help='resume net for retraining')
parser.add_argument('--resume_epoch', default=0,
type=int, help='resume iter for retraining')
parser.add_argument('-max','--max_epoch', default=300,
type=int, help='max epoch for retraining')
parser.add_argument('-we','--warm_epoch', default=1,
type=int, help='max epoch for retraining')
parser.add_argument('--weight_decay', default=5e-4,
type=float, help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1,
type=float, help='Gamma update for SGD')
parser.add_argument('--log_iters', default=True,
type=bool, help='Print the loss at each iteration')
parser.add_argument('--save_folder', default='/mnt/lvmhdd1/zuoxin/ssd_pytorch_models/refine/',
help='Location to save checkpoint models')
parser.add_argument('--date',default='0327')
parser.add_argument('--save_frequency',default=10)
parser.add_argument('--retest', default=False, type=bool,
help='test cache results')
parser.add_argument('--test_frequency',default=10, type=int)
parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')
parser.add_argument('--send_images_to_visdom', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step')
args = parser.parse_args()
save_folder = os.path.join(args.save_folder,args.version+'_'+args.size,args.date)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
test_save_dir = os.path.join(save_folder,'ss_predict')
if not os.path.exists(test_save_dir):
os.makedirs(test_save_dir)
log_file_path = save_folder + '/train' + time.strftime('_%Y-%m-%d-%H-%M', time.localtime(time.time())) + '.log'
if args.dataset == 'VOC':
train_sets = [('2007', 'trainval'), ('2012', 'trainval')]
cfg = (VOC_320, VOC_512)[args.size == '512']
else:
train_sets = [('2014', 'train'),('2014', 'valminusminival')]
cfg = (VOC_320, VOC_512)[args.size == '512']
img_dim = (320,512)[args.size=='512']
rgb_std = (1,1,1)
if 'vgg' in args.version:
rgb_means = (104, 117, 123)
p = (0.6,0.2)[args.version == 'RFB_mobile']
num_classes = (21, 81)[args.dataset == 'COCO']
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9
if args.visdom:
import visdom
viz = visdom.Visdom()
from models.RefineSSD_vgg import build_net
cfg = VOC_320
net = build_net(320, num_classes,use_refine=True)
print(net)
if not args.resume_net:
base_weights = torch.load(args.basenet)
print('Loading base network...')
net.base.load_state_dict(base_weights)
def xavier(param):
init.xavier_uniform(param)
def weights_init(m):
for key in m.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(m.state_dict()[key], mode='fan_out')
if 'bn' in key:
m.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
m.state_dict()[key][...] = 0
print('Initializing weights...')
# initialize newly added layers' weights with kaiming_normal method
net.extras.apply(weights_init)
net.trans_layers.apply(weights_init)
net.latent_layrs.apply(weights_init)
net.up_layers.apply(weights_init)
net.arm_loc.apply(weights_init)
net.arm_conf.apply(weights_init)
net.odm_loc.apply(weights_init)
net.odm_conf.apply(weights_init)
else:
# load resume network
resume_net_path = os.path.join(save_folder, args.version+'_'+args.dataset + '_epoches_'+ \
str(args.resume_epoch) + '.pth')
print('Loading resume network',resume_net_path)
state_dict = torch.load(resume_net_path)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
if args.gpu_id:
net = torch.nn.DataParallel(net, device_ids=args.gpu_id)
if args.cuda:
net.cuda()
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
#optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
# momentum=args.momentum, weight_decay=args.weight_decay)
arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False)
odm_criterion = RefineMultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False,0.01)
priorbox = PriorBox(cfg)
detector = Detect(num_classes,0,cfg,object_score=0.01)
priors = Variable(priorbox.forward(), volatile=True)
#dataset
print('Loading Dataset...')
if args.dataset == 'VOC':
testset = VOCDetection(
VOCroot, [('2007', 'test')], None, AnnotationTransform())
train_dataset = VOCDetection(VOCroot, train_sets, preproc(
img_dim, rgb_means, p), AnnotationTransform())
elif args.dataset == 'COCO':
testset = COCODetection(
COCOroot, [('2014', 'minival')], None)
train_dataset = COCODetection(COCOroot, train_sets, preproc(
img_dim, rgb_means, p))
else:
print('Only VOC and COCO are supported now!')
exit()
def train():
net.train()
# loss counters
loc_loss = 0 # epoch
conf_loss = 0
epoch = 0
if args.resume_net:
epoch = 0 + args.resume_epoch
epoch_size = len(train_dataset) // args.batch_size
max_iter = args.max_epoch * epoch_size
stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
stepvalues = (stepvalues_VOC,stepvalues_COCO)[args.dataset=='COCO']
print('Training',args.version, 'on', train_dataset.name)
step_index = 0
if args.visdom:
# initialize visdom loss plot
lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title='Current SSD Training Loss',
legend=['Loc Loss', 'Conf Loss', 'Loss']
)
)
epoch_lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel='Epoch',
ylabel='Loss',
title='Epoch SSD Training Loss',
legend=['Loc Loss', 'Conf Loss', 'Loss']
)
)
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
#log_file = open(log_file_path,'w')
batch_iterator = None
mean_odm_loss_c = 0
mean_odm_loss_l = 0
mean_arm_loss_c = 0
mean_arm_loss_l = 0
for iteration in range(start_iter, max_iter+10):
if (iteration % epoch_size == 0):
# create batch iterator
batch_iterator = iter(data.DataLoader(train_dataset, batch_size,
shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
loc_loss = 0
conf_loss = 0
if epoch % args.save_frequency == 0 and epoch > 0:
torch.save(net.state_dict(), os.path.join(save_folder,args.version+'_'+args.dataset + '_epoches_'+
repr(epoch) + '.pth'))
if epoch%args.test_frequency == 0 and epoch>0:
net.eval()
top_k = (300, 200)[args.dataset == 'COCO']
if args.dataset == 'VOC':
APs,mAP = test_net(test_save_dir, net, detector, args.cuda, testset,
BaseTransform(net.module.size, rgb_means,rgb_std, (2, 0, 1)),
top_k, thresh=0.01)
APs = [str(num) for num in APs]
mAP = str(mAP)
with open(log_file_path, 'w+') as log_file:
log_file.write(str(iteration)+' APs:\n'+'\n'.join(APs))
log_file.write('\nmAP:\n'+mAP+'\n')
else:
test_net(test_save_dir, net, detector, args.cuda, testset,
BaseTransform(net.module.size, rgb_means,rgb_std, (2, 0, 1)),
top_k, thresh=0.01)
net.train()
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index = stepvalues.index(iteration)+1
if args.visdom:
viz.line(
X=torch.ones((1, 3)).cpu() * epoch,
Y=torch.Tensor([loc_loss, conf_loss,
loc_loss + conf_loss]).unsqueeze(0).cpu() / epoch_size,
win=epoch_lot,
update='append'
)
lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
#print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))
if args.cuda:
images = Variable(images.cuda())
targets = [Variable(anno.cuda(),volatile=True) for anno in targets]
else:
images = Variable(images)
targets = [Variable(anno, volatile=True) for anno in targets]
# forward
out = net(images)
arm_loc, arm_conf, odm_loc, odm_conf = out
# backprop
optimizer.zero_grad()
#arm branch loss
arm_loss_l,arm_loss_c = arm_criterion((arm_loc,arm_conf),priors,targets)
#odm branch loss
odm_loss_l, odm_loss_c = odm_criterion((odm_loc,odm_conf),priors,targets,(arm_loc,arm_conf),False)
mean_arm_loss_c += arm_loss_c.data[0]
mean_arm_loss_l += arm_loss_l.data[0]
mean_odm_loss_c += odm_loss_c.data[0]
mean_odm_loss_l += odm_loss_l.data[0]
loss = arm_loss_l+arm_loss_c+odm_loss_l+odm_loss_c
loss.backward()
optimizer.step()
load_t1 = time.time()
if iteration % 10 == 0:
print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
+ '|| Total iter ' +
repr(iteration) + ' || AL: %.4f AC: %.4f OL: %.4f OC: %.4f||' % (
mean_arm_loss_l/10,mean_arm_loss_c/10,mean_odm_loss_l/10,mean_odm_loss_c/10) +
'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
# log_file.write('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
# + '|| Total iter ' +
# repr(iteration) + ' || AL: %.4f AC: %.4f OL: %.4f OC: %.4f||' % (
# mean_arm_loss_l / 10,mean_arm_loss_c / 10,mean_odm_loss_l / 10, mean_odm_loss_c / 10) +
# 'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr)+'\n')
mean_odm_loss_c = 0
mean_odm_loss_l = 0
mean_arm_loss_c = 0
mean_arm_loss_l = 0
if args.visdom and args.send_images_to_visdom:
random_batch_index = np.random.randint(images.size(0))
viz.image(images.data[random_batch_index].cpu().numpy())
log_file.close()
torch.save(net.state_dict(), os.path.join(save_folder ,
'Final_' + args.version +'_' + args.dataset+ '.pth'))
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
if epoch < args.warm_epoch:
lr = 1e-6 + (args.lr-1e-6) * iteration / (epoch_size * args.warm_epoch)
else:
lr = args.lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def test_net(save_folder, net, detector, cuda, testset, transform, max_per_image=300, thresh=0.005):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
# dump predictions and assoc. ground truth to text file for now
num_images = len(testset)
num_classes = (21, 81)[args.dataset == 'COCO']
all_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
det_file = os.path.join(save_folder, 'detections.pkl')
if args.retest:
f = open(det_file,'rb')
all_boxes = pickle.load(f)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
return
for i in range(num_images):
img = testset.pull_image(i)
x = Variable(transform(img).unsqueeze(0),volatile=True)
if cuda:
x = x.cuda()
_t['im_detect'].tic()
out = net(x=x, test=True) # forward pass
arm_loc,arm_conf,odm_loc,odm_conf = out
boxes, scores = detector.forward((odm_loc,odm_conf), priors,(arm_loc,arm_conf))
detect_time = _t['im_detect'].toc()
boxes = boxes[0]
scores=scores[0]
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]]).cpu().numpy()
boxes *= scale
_t['misc'].tic()
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(
np.float32, copy=False)
if args.dataset == 'VOC':
cpu = False
else:
cpu = False
keep = nms(c_dets, 0.45, force_cpu=cpu)
keep = keep[:50]
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
nms_time = _t['misc'].toc()
if i % 20 == 0:
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'
.format(i + 1, num_images, detect_time, nms_time))
_t['im_detect'].clear()
_t['misc'].clear()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
if args.dataset == 'VOC':
APs,mAP = testset.evaluate_detections(all_boxes, save_folder)
return APs,mAP
else:
testset.evaluate_detections(all_boxes, save_folder)
if __name__ == '__main__':
train()
|
{"hexsha": "8a50152d9066f04d9eb55f4d01b226f5bc9caf96", "size": 17744, "ext": "py", "lang": "Python", "max_stars_repo_path": "refinedet_train_test.py", "max_stars_repo_name": "AndOneDay/PytorchSSD", "max_stars_repo_head_hexsha": "a9f2cde8d149e14cab3feb0084b5be3c1e6c97c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "refinedet_train_test.py", "max_issues_repo_name": "AndOneDay/PytorchSSD", "max_issues_repo_head_hexsha": "a9f2cde8d149e14cab3feb0084b5be3c1e6c97c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "refinedet_train_test.py", "max_forks_repo_name": "AndOneDay/PytorchSSD", "max_forks_repo_head_hexsha": "a9f2cde8d149e14cab3feb0084b5be3c1e6c97c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3272727273, "max_line_length": 171, "alphanum_fraction": 0.5985121731, "include": true, "reason": "import numpy", "num_tokens": 4501}
|
import pandas as pd
import numpy as np
import nltk
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
import re
class KaggleWord2VecUtility(object):
@staticmethod
def reviewto_wordlist(review,remove_stopwords=False):
review_text=BeautifulSoup(review,"lxml").get_text()
review_text=re.sub("[^a-zA-Z]"," ",review_text)
words=review_text.lower().split()
if remove_stopwords:
stops=set(stopwords.words("english"))
words=[w for w in words if not w in stops]
return (words)
@staticmethod
def review_to_sentences(review,tokenizer,remove_stopwords=False):
raw_sentences=tokenizer.tokenize(review.decode('utf8').strip())
sentences=[]
for raw_sentence in raw_sentences:
if len(raw_sentence)>0:
sentences.append(KaggleWord2VecUtility.reviewto_wordlist(raw_sentence,remove_stopwords))
return sentences
|
{"hexsha": "9440e93a6b07975faab0e919347dcfb6da56fd54", "size": 840, "ext": "py", "lang": "Python", "max_stars_repo_path": "Input_data/KaggleWord2VecUtility.py", "max_stars_repo_name": "mohsincl/ML-Projects", "max_stars_repo_head_hexsha": "5ef14257f2fdd3ae438557b8ddcdbf316bd1dc2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-07-17T16:08:51.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-05T20:28:23.000Z", "max_issues_repo_path": "Input_data/KaggleWord2VecUtility.py", "max_issues_repo_name": "mohsincl/ML-Projects", "max_issues_repo_head_hexsha": "5ef14257f2fdd3ae438557b8ddcdbf316bd1dc2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Input_data/KaggleWord2VecUtility.py", "max_forks_repo_name": "mohsincl/ML-Projects", "max_forks_repo_head_hexsha": "5ef14257f2fdd3ae438557b8ddcdbf316bd1dc2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3076923077, "max_line_length": 92, "alphanum_fraction": 0.780952381, "include": true, "reason": "import numpy", "num_tokens": 211}
|
// Copyright (c) 2015, Daniel Pfeifer <daniel@pfeifer-mail.de>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#ifndef BYOM_DYNAMIC_VIEW_HPP
#define BYOM_DYNAMIC_VIEW_HPP
#include <string>
#include <memory>
#include <functional>
#include <type_traits>
#include <stdexcept>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/if.hpp>
namespace byom {
class dynamic_view;
using visit_function =
std::function<void(dynamic_view const&, dynamic_view const&)>;
template <typename T, typename Enable = void>
struct ext;
class dynamic_view
{
template <typename T>
using no_copy_ctor = typename std::enable_if<!std::is_same<
typename std::remove_reference<T>::type, dynamic_view>::value>::type;
public:
template <typename T, typename Enable = no_copy_ctor<T>>
dynamic_view(T&& t)
: dynamic_view(std::forward<T>(t), std::is_lvalue_reference<T>())
{
}
template <typename T>
dynamic_view(T* t)
{
new (storage()) local_model_t<T*>(t);
}
template <typename T>
dynamic_view(T const* t)
{
new (storage()) local_model_t<T const*>(t);
}
dynamic_view(dynamic_view const& x)
{
x.object().clone(storage());
}
dynamic_view(dynamic_view&& x)
{
x.object().move_clone(storage());
}
~dynamic_view()
{
object().~concept_t();
}
dynamic_view& operator=(dynamic_view&& x) = delete;
dynamic_view& operator=(dynamic_view const& x) = delete;
public:
bool empty() const
{
return object().empty();
}
dynamic_view at(std::string const& n) const &
{
return object().at(n);
}
dynamic_view at(std::string const& n) const &&
{
auto member = object().at(n);
if (object().owned() && !member.object().owned()) {
throw std::invalid_argument{ "dangling reference" };
}
return member;
}
void for_each(visit_function const& v) const
{
object().for_each(v);
}
template <typename T>
T get() const
{
return cast_helper<T>::cast(object());
}
friend std::ostream& operator<<(std::ostream& os, dynamic_view const& self)
{
self.object().print(os);
return os;
}
private:
struct concept_t
{
virtual ~concept_t() = default;
virtual void clone(void* storage) const = 0;
virtual void move_clone(void* storage) = 0;
virtual bool owned() const = 0;
virtual const std::type_info& type() const = 0;
virtual void const* data() const = 0;
virtual bool empty() const = 0;
virtual dynamic_view at(std::string const& n) const = 0;
virtual void for_each(visit_function const& v) const = 0;
virtual unsigned long long int to_integral() const = 0;
virtual long double to_floating_point() const = 0;
virtual std::string to_string() const = 0;
virtual void print(std::ostream& os) const = 0;
};
template <template <typename> class Derived, typename T>
struct model_base_t : concept_t
{
const std::type_info& type() const override
{
return typeid(T);
}
void const* data() const override
{
return &get();
}
bool empty() const override
{
return ext<T>::empty_impl(get());
}
dynamic_view at(std::string const& n) const override
{
return ext<T>::at_impl(get(), n);
}
void for_each(visit_function const& v) const override
{
ext<T>::for_each_impl(get(), v);
}
unsigned long long int to_integral() const override
{
return ext<T>::to_integral(get());
}
long double to_floating_point() const override
{
return ext<T>::to_floating_point(get());
}
std::string to_string() const override
{
return ext<T>::to_string(get());
}
void print(std::ostream& os) const override
{
ext<T>::print_impl(os, get());
}
T const& get() const
{
return static_cast<Derived<T> const*>(this)->get();
}
};
template <typename T>
struct cref_model_t : model_base_t<cref_model_t, T>
{
cref_model_t(T const& x)
: object(x)
{
}
void clone(void* storage) const override
{
new (storage) cref_model_t(object);
}
void move_clone(void* storage) override
{
clone(storage);
}
bool owned() const override
{
return false;
}
T const& get() const
{
return object;
}
T const& object;
};
template <typename T>
struct local_model_t : model_base_t<local_model_t, T>
{
local_model_t(T x)
: object(std::move(x))
{
}
void clone(void* storage) const override
{
new (storage) local_model_t(object);
}
bool owned() const override
{
return true;
}
void move_clone(void* storage) override
{
new (storage) local_model_t(std::move(object));
}
T const& get() const
{
return object;
}
T object;
};
template <typename T>
struct remote_model_t : model_base_t<remote_model_t, T>
{
remote_model_t(T x)
: object(std::make_unique<T const>(std::move(x)))
{
}
void clone(void* storage) const override
{
new (storage) remote_model_t(get());
}
void move_clone(void* storage) override
{
new (storage) remote_model_t(std::move(*this));
}
bool owned() const override
{
return true;
}
T const& get() const
{
return *object;
}
std::unique_ptr<T const> object;
};
private:
template <typename T, typename Enable = void>
struct cast_helper
{
static T cast(concept_t const& object)
{
if (object.type() != typeid(T)) {
throw std::bad_cast{ "bad cast" };
}
return *reinterpret_cast<T const*>(object.data());
}
};
template <typename T>
struct cast_helper<T,
typename std::enable_if<std::is_integral<T>::value>::type>
{
static T cast(concept_t const& object)
{
return static_cast<T>(object.to_integral());
}
};
template <typename T>
struct cast_helper<
T, typename std::enable_if<std::is_floating_point<T>::value>::type>
{
static T cast(concept_t const& object)
{
return static_cast<T>(object.to_floating_point());
}
};
template <typename T>
struct cast_helper<
T, typename std::enable_if<std::is_same<T, std::string>::value>::type>
{
static T cast(concept_t const& object)
{
return object.to_string();
}
};
private:
template <typename T>
dynamic_view(T&& t, std::true_type)
{
using model = cref_model_t<typename std::decay<T>::type>;
static_assert(sizeof(model) <= sizeof(data), "size mismatch");
new (storage()) model(t);
}
template <typename T>
dynamic_view(T&& t, std::false_type)
{
using local_type = local_model_t<typename std::decay<T>::type>;
using remote_type = remote_model_t<typename std::decay<T>::type>;
using use_local_type =
boost::mpl::bool_<(sizeof(local_type) <= sizeof(data)) &&
(std::is_nothrow_copy_constructible<T>::value ||
std::is_nothrow_move_constructible<T>::value)>;
using model =
typename boost::mpl::if_<use_local_type, local_type, remote_type>::type;
static_assert(sizeof(model) <= sizeof(data), "size mismatch");
new (storage()) model(std::move(t));
}
private:
concept_t& object()
{
return *static_cast<concept_t*>(storage());
}
concept_t const& object() const
{
return *static_cast<concept_t const*>(storage());
}
void* storage()
{
return &data;
}
void const* storage() const
{
return &data;
}
double data[2];
};
} // namespace byom
#endif /* BYOM_DYNAMIC_VIEW_HPP */
|
{"hexsha": "6ad15169a9175292dc8d45aab821b47f0f16bdb5", "size": 8307, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/byom/dynamic_view.hpp", "max_stars_repo_name": "purpleKarrot/BYOM", "max_stars_repo_head_hexsha": "1de5f53a1185b37676d76399bd67ff9e08ad828a", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-12-02T15:02:06.000Z", "max_stars_repo_stars_event_max_datetime": "2015-12-03T22:41:19.000Z", "max_issues_repo_path": "include/byom/dynamic_view.hpp", "max_issues_repo_name": "purpleKarrot/BYOM", "max_issues_repo_head_hexsha": "1de5f53a1185b37676d76399bd67ff9e08ad828a", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/byom/dynamic_view.hpp", "max_forks_repo_name": "purpleKarrot/BYOM", "max_forks_repo_head_hexsha": "1de5f53a1185b37676d76399bd67ff9e08ad828a", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.152, "max_line_length": 79, "alphanum_fraction": 0.6330805345, "num_tokens": 2104}
|
[STATEMENT]
lemma all_larger_zero_in_csset: "\<forall>x. x \<in> consumption_set"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x. x \<in> consumption_set
[PROOF STEP]
using cons_set_props pre_arrow_debreu_consumption_set_def
[PROOF STATE]
proof (prove)
using this:
pre_arrow_debreu_consumption_set consumption_set
pre_arrow_debreu_consumption_set ?consumption_set \<equiv> \<forall>x. x \<in> UNIV \<longrightarrow> x \<in> ?consumption_set
goal (1 subgoal):
1. \<forall>x. x \<in> consumption_set
[PROOF STEP]
by blast
|
{"llama_tokens": 209, "file": "First_Welfare_Theorem_Microeconomics_Private_Ownership_Economy", "length": 2}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import os
from madminer.analysis import DataAnalyzer
from madminer.utils.various import math_commands, weighted_quantile, sanitize_array, mdot
from madminer.utils.various import less_logging
from madminer.ml import ParameterizedRatioEstimator, ScoreEstimator, Ensemble, load_estimator
logger = logging.getLogger(__name__)
class FisherInformation(DataAnalyzer):
"""
Functions to calculate expected Fisher information matrices.
After inializing a `FisherInformation` instance with the filename of a MadMiner file, different information matrices
can be calculated:
* `FisherInformation.truth_information()` calculates the full truth-level Fisher information.
This is the information in an idealized measurement where all parton-level particles with their charges, flavours,
and four-momenta can be accessed with perfect accuracy.
* `FisherInformation.full_information()` calculates the full Fisher information in
realistic detector-level observations, estimated with neural networks. In addition to the MadMiner file, this
requires a trained SALLY or SALLINO estimator as well as an unweighted evaluation sample.
* `FisherInformation.rate_information()` calculates the Fisher information in the total cross
section.
* `FisherInformation.histo_information()` calculates the Fisher information in the histogram of
one (parton-level or detector-level) observable.
* `FisherInformation.histo_information_2d()` calculates the Fisher information in a two-dimensional
histogram of two (parton-level or detector-level) observables.
* `FisherInformation.histogram_of_information()` calculates the full truth-level Fisher information in
different slices of one observable (the "distribution of the Fisher information").
Finally, don't forget that in the presence of nuisance parameters the constraint terms also affect the Fisher
information. This term is given by `FisherInformation.calculate_fisher_information_nuisance_constraints()`.
Parameters
----------
filename : str
Path to MadMiner file (for instance the output of `madminer.delphes.DelphesProcessor.save()`).
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
"""
def __init__(self, filename, include_nuisance_parameters=True):
super(FisherInformation, self).__init__(filename, False, include_nuisance_parameters)
def truth_information(
self, theta, luminosity=300000.0, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the full Fisher information at parton / truth level. This is the information in an idealized
measurement where all parton-level particles with their charges, flavours, and four-momenta can be accessed with
perfect accuracy, i.e. the latent variables `z_parton` can be measured directly.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected full truth-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Loop over batches
n_all_parameters = self.n_parameters
if include_nuisance_parameters:
n_all_parameters += self.n_nuisance_parameters
fisher_info = np.zeros((n_all_parameters, n_all_parameters))
covariance = np.zeros((n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher information
this_fisher_info, this_covariance = self._calculate_fisher_information(
theta,
weights,
luminosity,
sum_events=True,
calculate_uncertainty=True,
include_nuisance_parameters=include_nuisance_parameters,
)
fisher_info += this_fisher_info
covariance += this_covariance
return fisher_info, covariance
def full_information(
self,
theta,
model_file,
unweighted_x_sample_file=None,
luminosity=300000.0,
include_xsec_info=True,
mode="score",
calculate_covariance=True,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full Fisher information in realistic detector-level observations, estimated with neural networks.
In addition to the MadMiner file, this requires a trained SALLY or SALLINO estimator.
Nuisance parameter are taken into account automatically if the SALLY / SALLINO model was trained with them.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
model_file : str
Filename of a trained local score regression model that was trained on samples from `theta` (see
`madminer.ml.Estimator`).
unweighted_x_sample_file : str or None
Filename of an unweighted x sample that is sampled according to theta and obeys the cuts
(see `madminer.sampling.SampleAugmenter.extract_samples_train_local()`). If None, the Fisher information
is instead calculated on the full, weighted samples (the data in the MadMiner file). Default value: None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_xsec_info : bool, optional
Whether the rate information is included in the returned Fisher information. Default value: True.
mode : {"score", "information"}, optional
How the ensemble uncertainty on the kinematic Fisher information is calculated. If mode is "information",
the Fisher information for each estimator is calculated individually and only then
are the sample mean and covariance calculated. If mode is "score", the sample mean is
calculated for the score for each event. Default value: "score".
calculate_covariance : bool, optional
If True, the covariance between the different estimators is calculated. Default value: True.
batch_size : int, optional
Batch size. Default value: 100000.
test_split : float or None, optional
If unweighted_x_sample_file is None, this determines the fraction of weighted events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
fisher_information : ndarray or list of ndarray
Estimated expected full detector-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
If more then one value ensemble_vote_expectation_weight is given, this is a list with results for all
entries in ensemble_vote_expectation_weight.
fisher_information_uncertainty : ndarray or list of ndarray or None
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`. If more then one value
ensemble_vote_expectation_weight is given, this is a list with results for all entries in
ensemble_vote_expectation_weight.
"""
# Check input
if mode not in ["score", "information", "modified_score"]:
raise ValueError("Unknown mode {}, has to be 'score', 'modified_score', or 'information'!".format(mode))
# Load Estimator model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
if isinstance(model.estimators[0], ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Ensemble"
elif isinstance(model.estimators[0], ScoreEstimator):
model_type = "Score Ensemble"
else:
raise RuntimeError("Ensemble is not a score or parameterized_ratio type!")
else:
model_is_ensemble = False
model = load_estimator(model_file)
if isinstance(model, ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Estimator"
elif isinstance(model, ScoreEstimator):
model_type = "Score Estimator"
else:
raise RuntimeError("Estimator is not a score or parameterized_ratio type!")
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in %s model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
if include_nuisance_parameters:
logger.debug("Including nuisance parameters")
else:
logger.debug("Not including nuisance parameters")
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Rate part of Fisher information
fisher_info_rate = 0.0
rate_covariance = 0.0
if include_xsec_info:
logger.info("Evaluating rate Fisher information")
fisher_info_rate, rate_covariance = self.rate_information(
theta=theta, luminosity=luminosity, include_nuisance_parameters=include_nuisance_parameters
)
# Evaluation from weighted events
if unweighted_x_sample_file is None:
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Prepare output
fisher_info_kin = None
covariance = None
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
with less_logging():
this_fisher_info, this_covariance = model.calculate_fisher_information(
x=observations,
theta=theta,
obs_weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
calculate_covariance=calculate_covariance,
mode=mode,
)
else:
with less_logging():
this_fisher_info = model.calculate_fisher_information(
x=observations,
theta=theta,
weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
)
this_covariance = None
# Sum up results
if fisher_info_kin is None:
fisher_info_kin = this_fisher_info
elif isinstance(fisher_info_kin, list):
for i in range(len(fisher_info_kin)):
fisher_info_kin[i] += this_fisher_info[i]
else:
fisher_info_kin += this_fisher_info
if this_covariance is not None:
if covariance is None:
covariance = this_covariance
elif isinstance(covariance, list):
for i in range(len(covariance)):
covariance[i] += this_covariance[i]
else:
covariance += this_covariance
# Evaluation from unweighted event sample
else:
with less_logging():
if model_is_ensemble:
fisher_info_kin, covariance = model.calculate_fisher_information(
x=unweighted_x_sample_file,
theta=theta,
n_events=luminosity * total_xsec,
mode=mode,
calculate_covariance=calculate_covariance,
)
else:
fisher_info_kin = model.calculate_fisher_information(
x=unweighted_x_sample_file, n_events=luminosity * total_xsec, theta=theta
)
covariance = None
# Returns
if model_is_ensemble:
return fisher_info_rate + fisher_info_kin, rate_covariance + covariance
return fisher_info_rate + fisher_info_kin, rate_covariance
def rate_information(
self, theta, luminosity, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the Fisher information in a measurement of the total cross section (without any kinematic
information).
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the total cross section with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Get weights at benchmarks
weights_benchmarks, weights_benchmark_uncertainties = self._calculate_xsec(
cuts=cuts,
efficiency_functions=efficiency_functions,
return_benchmark_xsecs=True,
return_error=True,
include_nuisance_parameters=include_nuisance_parameters,
)
weights_benchmarks = weights_benchmarks.reshape((1, -1))
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape((1, -1))
# Get Fisher information
fisher_info, covariance = self._calculate_fisher_information(
theta=theta,
weights_benchmarks=weights_benchmarks,
luminosity=luminosity,
sum_events=True,
calculate_uncertainty=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
include_nuisance_parameters=include_nuisance_parameters,
)
return fisher_info, covariance
def histo_information(
self,
theta,
luminosity,
observable,
bins,
histrange=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in the one-dimensional histogram of an (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observable.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable : str
Expression for the observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins : int or ndarray
If int: number of bins in the histogram, excluding overflow bins. Otherwise, defines the bin boundaries
(excluding overflow bins).
histrange : tuple of float or None, optional
Minimum and maximum value of the histogram in the form `(min, max)`. Overflow bins are always added. If
None and bins is an int, variable-width bins with equal cross section are constructed automatically.
Default value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin_boundaries, n_bins_total = self._calculate_binning(
bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
i_bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= i_bins) & (i_bins < n_bins_total)).all(), "Wrong bin {}".format(i_bins)
# Add up
for i in range(n_bins_total):
if len(weights[i_bins == i]) > 0:
weights_benchmarks[i] += np.sum(weights[i_bins == i], axis=0)
weights_squared_benchmarks[i] += np.sum(weights[i_bins == i] ** 2, axis=0)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Check cross sections per bin
self._check_binning_stats(weights_benchmarks, weights_benchmark_uncertainties, theta)
# Calculate Fisher information in histogram
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histo_information_2d(
self,
theta,
luminosity,
observable1,
bins1,
observable2,
bins2,
histrange1=None,
histrange2=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in a two-dimensional histogram of two (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observables.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable1 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins1 : int or ndarray
If int: number of bins along the first axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the first axis in the histogram (excluding overflow bins).
observable2 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins2 : int or ndarray
If int: number of bins along the second axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the second axis in the histogram (excluding overflow bins).
histrange1 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
histrange2 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin1_boundaries, n_bins1_total = self._calculate_binning(
bins1, cuts, efficiency_functions, histrange1, n_events_dynamic_binning, observable1, theta
)
bin2_boundaries, n_bins2_total = self._calculate_binning(
bins2, cuts, efficiency_functions, histrange2, n_events_dynamic_binning, observable2, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo1_observables = np.asarray(
[self._eval_observable(obs_event, observable1) for obs_event in observations]
)
histo2_observables = np.asarray(
[self._eval_observable(obs_event, observable2) for obs_event in observations]
)
# Find bins
i_bins1 = np.searchsorted(bin1_boundaries, histo1_observables)
i_bins2 = np.searchsorted(bin2_boundaries, histo2_observables)
assert ((0 <= i_bins1) & (i_bins1 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins1)
assert ((0 <= i_bins2) & (i_bins2 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins2)
# Add up
for i in range(n_bins1_total):
for j in range(n_bins2_total):
if len(weights[(i_bins1 == i) & (i_bins2 == j)]) > 0:
weights_benchmarks[i, j] += np.sum(weights[(i_bins1 == i) & (i_bins2 == j)], axis=0)
weights_squared_benchmarks[i, j] += np.sum(
weights[(i_bins1 == i) & (i_bins2 == j)] ** 2, axis=0
)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Calculate Fisher information in histogram
weights_benchmarks = weights_benchmarks.reshape(-1, self.n_benchmarks)
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape(-1, self.n_benchmarks)
self._check_binning_stats(
weights_benchmarks, weights_benchmark_uncertainties, theta, n_bins_last_axis=n_bins2_total
)
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histogram_of_information(
self,
theta,
observable,
nbins,
histrange,
model_file=None,
luminosity=300000.0,
cuts=None,
efficiency_functions=None,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full and rate-only Fisher information in slices of one observable. For the full
information, it will return the truth-level information if model_file is None, and otherwise the
detector-level information based on the SALLY-type score estimator saved in model_file.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
model_file : str or None, optional
If None, the truth-level Fisher information is calculated. If str, filename of a trained local score
regression model that was trained on samples from `theta` (see `madminer.ml.Estimator`). Default value:
None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
batch_size : int, optional
If model_file is not None: Batch size. Default value: 100000.
test_split : float or None, optional
If model_file is not None: If unweighted_x_sample_file is None, this determines the fraction of weighted
events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
fisher_infos_rate : ndarray
Expected rate-only Fisher information for each slice. Has shape `(n_slices, n_parameters, n_parameters)`.
fisher_infos_full : ndarray
Expected full Fisher information for each slice. Has shape
`(n_slices, n_parameters, n_parameters)`.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Number of bins
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
fisher_info_full_bins = np.zeros((n_bins_total, self.n_parameters, self.n_parameters))
# Main loop: truth-level case
if model_file is None:
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher info per event
fisher_info_events = self._calculate_fisher_information(theta, weights, luminosity, sum_events=False)
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Get rid of nuisance parameters
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# ML case
else:
# Load SALLY model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
else:
model_is_ensemble = False
model = ScoreEstimator()
model.load(model_file)
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters in MadMiner file",
model.n_parameters,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in SALLY model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
# ML main loop
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights_benchmarks = weights_benchmarks[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights_benchmarks *= efficiencies[:, np.newaxis]
# Rescale for test_split
if test_split is not None:
correction = np.array([1.0 / test_split for obs_event in observations])
weights_benchmarks *= correction[:, np.newaxis]
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
fisher_info_events, _ = model.calculate_fisher_information(
x=observations,
obs_weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
mode="score",
calculate_covariance=False,
sum_events=False,
)
else:
fisher_info_events = model.calculate_fisher_information(
x=observations,
weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
sum_events=False,
)
# Get rid of nuisance parameters
if include_nuisance_parameters:
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights_benchmarks[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights_benchmarks[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
# Calculate rate-only Fisher informations in bins
fisher_info_rate_bins = self._calculate_fisher_information(
theta, weights_benchmarks_bins, luminosity, sum_events=False
)
# Get rid of nuisance parameters
fisher_info_rate_bins = fisher_info_rate_bins[:, : self.n_parameters, : self.n_parameters]
# If ML: xsec info is still missing !
if model_file is not None:
fisher_info_full_bins += fisher_info_rate_bins
return bin_boundaries, sigma_bins, fisher_info_rate_bins, fisher_info_full_bins
def histogram_of_sigma_dsigma(self, theta, observable, nbins, histrange, cuts=None, efficiency_functions=None):
"""
Fills events into histograms and calculates the cross section and first derivative for each bin
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
dsigma_bins : ndarray
Cross section in pb in each of the slices.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
dynamic_binning = histrange is None
if dynamic_binning:
n_bins_total = nbins
bin_boundaries = self._calculate_dynamic_binning(observable, theta, nbins, None, cuts, efficiency_functions)
else:
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# # Number of bins
# n_bins_total = nbins + 2
# bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
# Main loop: truth-level case
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
dsigma_bins = mdot(dtheta_matrix, weights_benchmarks_bins) # (n_parameters,n_bins,)
return bin_boundaries, sigma_bins, dsigma_bins
def nuisance_constraint_information(self):
""" Builds the Fisher information term representing the Gaussian constraints on the nuisance parameters """
diagonal = np.array([0.0 for _ in range(self.n_parameters)] + [1.0 for _ in range(self.n_nuisance_parameters)])
return np.diag(diagonal)
def _check_binning_stats(
self, weights_benchmarks, weights_benchmark_uncertainties, theta, report=5, n_bins_last_axis=None
):
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_bins,)
sigma_uncertainties = mdot(theta_matrix, weights_benchmark_uncertainties) # Shape (n_bins,)
rel_uncertainties = sigma_uncertainties / np.maximum(sigma, 1.0e-12)
order = np.argsort(rel_uncertainties)[::-1]
logger.info("Bins with largest statistical uncertainties on rates:")
for i_bin in order[:report]:
bin_nd = i_bin + 1
if n_bins_last_axis is not None:
bin_nd = (i_bin // n_bins_last_axis + 1, i_bin % n_bins_last_axis + 1)
logger.info(
" Bin %s: (%.5f +/- %.5f) fb (%.0f %%)",
bin_nd,
1000.0 * sigma[i_bin],
1000.0 * sigma_uncertainties[i_bin],
100.0 * rel_uncertainties[i_bin],
)
def _calculate_binning(
self, bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
):
dynamic_binning = histrange is None and isinstance(bins, int)
if dynamic_binning:
n_bins_total = bins
bin_boundaries = self._calculate_dynamic_binning(
observable, theta, bins, n_events_dynamic_binning, cuts, efficiency_functions
)
logger.debug("Automatic dynamic binning: bin boundaries %s", bin_boundaries)
elif isinstance(bins, int):
n_bins_total = bins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=bins + 1)
else:
bin_boundaries = bins
n_bins_total = len(bins) + 1
return bin_boundaries, n_bins_total
def _calculate_fisher_information(
self,
theta,
weights_benchmarks,
luminosity=300000.0,
include_nuisance_parameters=True,
sum_events=False,
calculate_uncertainty=False,
weights_benchmark_uncertainties=None,
):
"""
Low-level function that calculates a list of full Fisher information matrices for a given parameter point and
benchmark weights. Do not use this function directly, instead use the other `FisherInformation` functions.
Parameters
----------
theta : ndarray
Parameter point.
weights_benchmarks : ndarray
Benchmark weights. Shape (n_events, n_benchmark).
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
sum_events : bool, optional
If True, returns the summed FIsher information. Otherwise, a list of Fisher
information matrices for each event. Default value: False.
calculate_uncertainty : bool, optional
Whether an uncertainty of the result is calculated. Note that this uncertainty is currently only
implemented for the "physical" part of the FIsher information, not for the nuisance parameters. Default
value: False.
weights_benchmark_uncertainties : ndarray or None, optional
If calculate_uncertainty is True, weights_benchmark_uncertainties sets the uncertainties on each entry of
weights_benchmarks. If None, weights_benchmark_uncertainties = weights_benchmarks is assumed.
Returns
-------
fisher_information : ndarray
If sum_events is True, the return value is an nxn matrix, the total Fisher information
summed over all events. Otherwise, a n_events x n_parameters x n_parameters tensor is returned that
includes the Fisher information matrices for each event separately.
fisher_information_uncertainty : ndarray
Only returned if calculate_uncertainty is True. Covariance matrix of the Fisher information. Note that this
does not take into account any uncertainty on the nuisance parameter part of the Fisher information, and
correlations between events are neglected. Note that independent of sum_events, the covariance matrix is
always summed over the events.
"""
include_nuisance_parameters = include_nuisance_parameters and self.include_nuisance_parameters
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Get differential xsec per event, and the derivative wrt to theta
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_events,)
total_xsec = np.sum(sigma)
inv_sigma = sanitize_array(1.0 / sigma) # Shape (n_events,)
dsigma = mdot(dtheta_matrix, weights_benchmarks) # Shape (n_parameters, n_events)
# Calculate physics Fisher info for this event
fisher_info_phys = luminosity * np.einsum("n,in,jn->nij", inv_sigma, dsigma, dsigma)
# Nuisance parameter Fisher info
if include_nuisance_parameters:
nuisance_a = self.nuisance_morpher.calculate_a(weights_benchmarks) # Shape (n_nuisance_params, n_events)
# grad_i dsigma(x), where i is a nuisance parameter, is given by
# sigma[np.newaxis, :] * a
fisher_info_nuisance = luminosity * np.einsum("n,in,jn->nij", sigma, nuisance_a, nuisance_a)
fisher_info_mix = luminosity * np.einsum("in,jn->nij", dsigma, nuisance_a)
fisher_info_mix_transposed = luminosity * np.einsum("in,jn->nji", dsigma, nuisance_a)
n_all_parameters = self.n_parameters + self.n_nuisance_parameters
fisher_info = np.zeros((fisher_info_phys.shape[0], n_all_parameters, n_all_parameters))
fisher_info[:, : self.n_parameters, : self.n_parameters] = fisher_info_phys
fisher_info[:, : self.n_parameters, self.n_parameters :] = fisher_info_mix
fisher_info[:, self.n_parameters :, : self.n_parameters] = fisher_info_mix_transposed
fisher_info[:, self.n_parameters :, self.n_parameters :] = fisher_info_nuisance
else:
n_all_parameters = self.n_parameters
fisher_info = fisher_info_phys
# Error propagation
if calculate_uncertainty:
if weights_benchmarks.shape[1] > self.n_benchmarks_phys:
weights_benchmarks_phys = weights_benchmarks[:, np.logical_not(self.benchmark_is_nuisance)]
else:
weights_benchmarks_phys = weights_benchmarks
n_events = weights_benchmarks_phys.shape[0]
# Input uncertainties
if weights_benchmark_uncertainties is None:
weights_benchmark_uncertainties = weights_benchmarks_phys # Shape (n_events, n_benchmarks_phys)
# Build covariance matrix of inputs
# We assume full correlation between weights_benchmarks[i, b1] and weights_benchmarks[i, b2]
covariance_inputs = np.zeros((n_events, self.n_benchmarks_phys, self.n_benchmarks_phys))
for i in range(n_events):
for b1 in range(self.n_benchmarks_phys):
for b2 in range(self.n_benchmarks_phys):
if b1 == b2: # Diagonal
covariance_inputs[i, b1, b2] = weights_benchmark_uncertainties[i, b1] ** 2
else: # Off-diagonal, same event
covariance_inputs[i, b1, b2] = (
weights_benchmark_uncertainties[i, b1] * weights_benchmark_uncertainties[i, b2]
)
# Jacobian
temp1 = np.einsum("ib,jn,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp2 = np.einsum("jb,in,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp3 = np.einsum("b,in,jn,n,n->ijnb", theta_matrix, dsigma, dsigma, inv_sigma, inv_sigma)
temp1, temp2, temp3 = sanitize_array(temp1), sanitize_array(temp2), sanitize_array(temp3)
jacobian = luminosity * (temp1 + temp2 + temp3) # (n_parameters, n_parameters, n_events, n_benchmarks_phys)
# Covariance of information
covariance_information_phys = np.einsum("ijnb,nbc,klnc->ijkl", jacobian, covariance_inputs, jacobian)
if include_nuisance_parameters:
covariance_information = np.zeros(
(n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters)
)
covariance_information[
: self.n_parameters, : self.n_parameters, : self.n_parameters, : self.n_parameters
] = covariance_information_phys
else:
covariance_information = covariance_information_phys
if sum_events:
return np.sum(fisher_info, axis=0), covariance_information
return fisher_info, covariance_information
if sum_events:
return np.sum(fisher_info, axis=0)
return fisher_info
def _pass_cuts(self, observations, cuts=None):
"""
Checks if an event, specified by a list of observations, passes a set of cuts.
Parameters
----------
observations : list of float
list of float. Values of the observables for a single event.
cuts : list of str or None, optional
Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
Returns
-------
passes : bool
True if the event passes all cuts, False otherwise.
"""
# Check inputs
if cuts is None:
cuts = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in cuts
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
for cut in cuts:
if not bool(eval(cut, variables)):
return False
return True
def _eval_efficiency(self, observations, efficiency_functions=None):
"""
Calculates the efficiency for an event.
Parameters
----------
observations : list of float
Values of the observables.
efficiency_functions : list of str or None
Each entry is a parseable Python expression that returns a float for the efficiency of one component.
Default value: None.
Returns
-------
efficiency : float
Efficiency (0. <= efficiency <= 1.), product of the results of the calls to all entries in
efficiency_functions.
"""
# Check inputs
if efficiency_functions is None:
efficiency_functions = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
efficiency = 1.0
for efficency_function in efficiency_functions:
efficiency *= float(eval(efficency_function, variables))
return efficiency
def _eval_observable(self, observations, observable_definition):
"""
Calculates an observable expression for an event.
Parameters
----------
observations : ndarray
Values of the observables for an event, should have shape `(n_observables,)`.
observable_definition : str
A parseable Python expression that returns the value of the observable to be calculated.
Returns
-------
observable_value : float
Value of the observable defined in observable_definition.
"""
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
return float(eval(observable_definition, variables))
def _calculate_xsec(
self,
theta=None,
cuts=None,
efficiency_functions=None,
return_benchmark_xsecs=False,
return_error=False,
include_nuisance_parameters=True,
start_event=0,
):
"""
Calculates the total cross section for a parameter point.
Parameters
----------
theta : ndarray or None, optional
The parameter point. If None, return_benchmark_xsecs should be True. Default value: None.
cuts : list of str or None, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
return_benchmark_xsecs : bool, optional
If True, this function returns the benchmark xsecs. Otherwise, it returns the xsec at theta. Default value:
False.
return_error : bool, optional
If True, this function also returns the square root of the summed squared weights.
include_nuisance_parameters : bool, optional
If True and if return_benchmark_xsecs is True, the nuisance benchmarks are included in the output. Default
value: True.
start_event : int, optional
Index of first event in MadMiner file to consider. Default value: 0.
Returns
-------
xsec : ndarray or float
If return_benchmark_xsecs is True, an ndarray of benchmark xsecs in pb is returned. Otherwise, the cross
section at theta in pb is returned.
xsec_uncertainty : ndarray or float
Only returned if return_error is True. Uncertainty (square root of the summed squared weights) on xsec.
"""
logger.debug("Calculating total cross section for theta = %s", theta)
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
assert (theta is not None) or return_benchmark_xsecs, "Please supply theta or set return_benchmark_xsecs=True"
# Total xsecs for benchmarks
xsecs_benchmarks = None
xsecs_uncertainty_benchmarks = None
for observations, weights in self.event_loader(
start=start_event, include_nuisance_parameters=include_nuisance_parameters
):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# xsecs
if xsecs_benchmarks is None:
xsecs_benchmarks = np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks = np.sum(weights ** 2, axis=0)
else:
xsecs_benchmarks += np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks += np.sum(weights ** 2, axis=0)
assert xsecs_benchmarks is not None, "No events passed cuts"
xsecs_uncertainty_benchmarks = xsecs_uncertainty_benchmarks ** 0.5
logger.debug("Benchmarks xsecs [pb]: %s", xsecs_benchmarks)
if return_benchmark_xsecs:
if return_error:
return xsecs_benchmarks, xsecs_uncertainty_benchmarks
return xsecs_benchmarks
# Translate to xsec for theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
xsec = mdot(theta_matrix, xsecs_benchmarks)
xsec_error = mdot(theta_matrix, xsecs_uncertainty_benchmarks)
logger.debug("Theta matrix: %s", theta_matrix)
logger.debug("Cross section at theta: %s pb", xsec)
if return_error:
return xsec, xsec_error
return xsec
def _calculate_dynamic_binning(
self, observable, theta, n_bins, n_events=None, cuts=None, efficiency_functions=None
):
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Quantile values
quantile_values = np.linspace(0.0, 1.0, n_bins + 1)
# Get data
x_pilot, weights_pilot = next(self.event_loader(batch_size=n_events))
# Cuts
cut_filter = [self._pass_cuts(x, cuts) for x in x_pilot]
x_pilot = x_pilot[cut_filter]
weights_pilot = weights_pilot[cut_filter]
# Efficiencies
efficiencies = np.array([self._eval_efficiency(x, efficiency_functions) for x in x_pilot])
weights_pilot *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables_pilot = np.asarray([self._eval_observable(x, observable) for x in x_pilot])
# Weights at theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
weight_theta_pilot = mdot(theta_matrix, weights_pilot)
# Bin boundaries
bin_boundaries = weighted_quantile(histo_observables_pilot, quantile_values, weight_theta_pilot)
bin_boundaries = bin_boundaries[1:-1]
return bin_boundaries
# Aliases for backward compatibility
calculate_fisher_information_full_truth = truth_information
calculate_fisher_information_full_detector = full_information
calculate_fisher_information_rate = rate_information
calculate_fisher_information_hist1d = histo_information
calculate_fisher_information_hist2d = histo_information_2d
histogram_of_fisher_information = histogram_of_information
calculate_fisher_information_nuisance_constraints = nuisance_constraint_information
|
{"hexsha": "9c166932b57e72f1f7c40344681851ea7d740f07", "size": 66171, "ext": "py", "lang": "Python", "max_stars_repo_path": "madminer/fisherinformation/information.py", "max_stars_repo_name": "siyuchen95/madminer", "max_stars_repo_head_hexsha": "dfcbd7ee26c47dd294610c195fafce15f74c10eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-09T20:58:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-10T01:35:35.000Z", "max_issues_repo_path": "madminer/fisherinformation/information.py", "max_issues_repo_name": "siyuchen95/madminer", "max_issues_repo_head_hexsha": "dfcbd7ee26c47dd294610c195fafce15f74c10eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "madminer/fisherinformation/information.py", "max_forks_repo_name": "siyuchen95/madminer", "max_forks_repo_head_hexsha": "dfcbd7ee26c47dd294610c195fafce15f74c10eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1925587467, "max_line_length": 120, "alphanum_fraction": 0.6272838555, "include": true, "reason": "import numpy", "num_tokens": 13481}
|
#!/usr/bin/python
import unittest
import numpy as np
import tensorflow as tf
from kblocks.ops.interp import linear_interp
class TestInterp(tf.test.TestCase):
def test_intercepts3d(self):
grid = np.array([[0, 1, 2], [10, 11, 12], [20, 21, 22]], dtype=np.float32)
grid = np.stack([grid, grid + 100, grid + 200, grid + 300])
coords = np.array(
[
[1, 1, 1],
[0, 1, 1],
],
dtype=np.float32,
)
tf_vals = self.evaluate(linear_interp(grid, coords))
expected = np.array([111, 11])
np.testing.assert_allclose(tf_vals, expected)
shift = np.random.randn(*coords.shape) * 1e-5 # pylint:disable=not-an-iterable
max_shift = 1e-5
too_big = np.abs(shift) > max_shift
shift[too_big] *= max_shift / shift[too_big]
coords += shift
close_vals = self.evaluate(linear_interp(grid, coords))
np.testing.assert_allclose(close_vals, expected, atol=1e-2)
def test_known_vals1d(self):
grid = [10, 11, 14]
coords = [[0], [0.5], [1.2]]
expected = np.array([10, 10.5, 11.6])
actual = self.evaluate(linear_interp(grid, coords))
np.testing.assert_allclose(actual, expected)
def test_intercepts3d_batch(self):
grid = np.array([[0, 1, 2], [10, 11, 12], [20, 21, 22]], dtype=np.float32)
grid = np.stack([grid, grid + 100, grid + 200, grid + 300])
coords = np.array(
[
[1, 1, 1],
[0, 1, 1],
],
dtype=np.float32,
)
grid = np.stack([grid, grid + 1000])
coords = np.stack([coords, coords])
actual = self.evaluate(linear_interp(grid, coords))
expected = np.array([111, 11])
expected = np.stack([expected, expected + 1000])
np.testing.assert_allclose(actual, expected)
if __name__ == "__main__":
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# tf.logging.set_verbosity(0)
tf.compat.v1.enable_eager_execution()
unittest.main()
|
{"hexsha": "2e0cd48e75de151c4e65589c2e47758282a7b6c9", "size": 2100, "ext": "py", "lang": "Python", "max_stars_repo_path": "kblocks/ops/interp_test.py", "max_stars_repo_name": "SamuelMarks/kblocks", "max_stars_repo_head_hexsha": "461705c6e89d3ae1c2d3ee90e27c580e683062a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kblocks/ops/interp_test.py", "max_issues_repo_name": "SamuelMarks/kblocks", "max_issues_repo_head_hexsha": "461705c6e89d3ae1c2d3ee90e27c580e683062a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-20T01:50:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-20T01:50:52.000Z", "max_forks_repo_path": "kblocks/ops/interp_test.py", "max_forks_repo_name": "SamuelMarks/kblocks", "max_forks_repo_head_hexsha": "461705c6e89d3ae1c2d3ee90e27c580e683062a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-18T03:16:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T03:16:57.000Z", "avg_line_length": 31.8181818182, "max_line_length": 87, "alphanum_fraction": 0.5657142857, "include": true, "reason": "import numpy", "num_tokens": 584}
|
import iris
import os
import copy
import xarray as xr
import numpy as np
import umdates_utils as um
## FILES -> IRIS
def file_to_cube(filename, filepath, constraints={}, verbose=True):
# Load a cube from a file
cube = iris.load_cube(os.path.join(filepath, filename), iris.AttributeConstraint(**constraints))
if verbose: print(f'Cube loaded from {filename}')
return cube
def file_to_cubelist(filename, filepath, constraints={}, verbose=True):
# Load a cube from a file
cubelist = iris.load(os.path.join(filepath, filename), iris.AttributeConstraint(**constraints))
if verbose: print(f'Cubelist loaded from {filename}')
return cubelist
## IRIS -> XARRAY
def cube_to_da(cube):
# Convert Iris cube to Xarray DataArray
return xr.DataArray.from_iris(cube)
def cube_to_ds(cube):
# Convert Iris cube to Xarray Dataset
return xr.DataArray.from_iris(cube).to_dataset()
def cubelist_to_dalist(cubelist):
# Convert Iris Cubelist to list of Xarray DataArrays
dalist = []
for cube in cubelist:
dalist.append(cube_to_da(cube))
return dalist
def cubelist_to_dataset(cubelist):
# Convert Iris cubelist to Xarray Dataset
dalist = cubelist_to_dalist(cubelist)
return xr.merge(dalist)
## XARRAY + IRIS -> ZARR
def ds_to_zarr(dataset, zarr_store, chunks=None, append_dim='time', verbose=False, **kwargs):
# Write dataset to new zarr store
# OR append dataset to an existing zarr store
if chunks:
dataset = dataset.chunk(chunks=chunks)
if os.path.isdir(zarr_store):
dataset.to_zarr(zarr_store, consolidated=True, append_dim=append_dim, **kwargs)
if verbose: print(f'Appended dataset to {zarr_store}')
else:
dataset.to_zarr(zarr_store, mode='w', consolidated=True, **kwargs)
if verbose: print(f'Written dataset to {zarr_store}')
def cubelist_to_zarr(cubelist, cubenames=None, coordname_mapping=None, **kwargs):
# Write cubelist to new zarr store
# OR append cubelist to an existing zarr store
if cubenames or coordname_mapping:
rename_cubes(cubelist, cubenames, coordname_mapping, dryrun=False, verbose=False)
dataset = cubelist_to_dataset(cubelist)
ds_to_zarr(dataset, **kwargs)
## TIMES FROM DATA
def datetimes_from_cube(cube):
return xr.DataArray.from_iris(cube).time.data
def datetimes_from_zarr(zarr_store):
return xr.open_zarr(zarr_store).time.data
## IRIS CUBE_NAME + COORD_NAME TOOLS
def unique_coords_list(cubelist):
unique = []
for cube in cubelist:
for coord in cube.coords():
if not coord in unique:
unique.append(coord)
return copy.deepcopy(unique)
def get_new_coord_names(coords, verbose=False):
names = []
renamed = []
for coord in coords:
name = coord.name()
names.append(name)
n = names.count(name)
if n > 1:
new_name = f'{name}_{n-1}'
renamed.append((coord, new_name))
if verbose:
print(f'Names: {names}')
if verbose:
print(f'Names: {names}')
return tuple(zip(*renamed))
def get_new_cubename(cube):
suffixes = [cube.standard_name or str(cube.attributes['STASH'])]
# [cube.name()] leads to repeated cell_method suffixes for anonymous cubes
coord_names = [coord.name() for coord in cube.coords()]
if 'pressure' in coord_names:
suffixes.append('at_pressure')
if 'height' in coord_names:
heights = cube.coord('height')
if len(heights.points) > 1:
suffixes.append('at_height')
else:
height = str(int(heights.points[0].round()))
units = str(heights.units)
suffixes.append(f'at_{height}{units}')
for cell_method in cube.cell_methods:
method = cell_method.method.replace('imum', '')
suffixes.append(method)
return '_'.join(suffixes)
def rename_cubes(cubelist, cubenames=None, new_coordnames=None, dryrun=False, verbose=True):
# Rename cubes and coordinates in place where necessary
if cubenames==None:
cubenames = [cube.name() for cube in cubelist]
if new_coordnames==None:
new_coordnames = get_new_coord_names(unique_coords_list(cubelist))
for cube in cubelist:
# Rename cube if duplicate or unknown
if cube.standard_name == None or cubenames.count(cube.name()) > 1:
new_name = get_new_cubename(cube)
if dryrun or verbose:
print(f'{cube.name()} -> {new_name}')
if not dryrun:
cube.var_name = new_name
elif dryrun or verbose:
print(f'{cube.name()}')
# Rename coords
for coord in cube.coords():
if coord in new_coordnames[0]:
new_name = new_coordnames[1][new_coordnames[0].index(coord)]
if not dryrun:
coord.var_name = new_name
if dryrun or verbose:
print(f' {new_name}')
elif dryrun or verbose:
print(f' x {coord.name()}')
## TIMESTAMPS FROM FILENAMES
def umstamp_from_filename(filename):
parts = filename.split('.')
return parts[1][2:]
def filenames_to_datetimes(filenames, fmt='YYMDH'):
datetimes = [um.convertFromUMStamp(umstamp_from_filename(file), fmt) for file in filenames]
return np.array(datetimes)
def datetimes_to_timedeltas(datetimes):
return file_dates[1:]-file_dates[0:-1]
def timedeltas_to_days(timedeltas):
return np.array([td.days for td in timedeltas])
def plot_td_hist(td, frequency='Daily'):
plt.hist(td, bins=np.arange(2.5, 30.5), log=True)
plt.gcf().set_size_inches(15, 5)
plt.xticks(np.arange(2, 31))
plt.xlabel('Days')
plt.ylabel('Number of timedeltas')
plt.title(f'Distribution of times between filenames: {frequency}')
plt.show()
def freq_hist_plot(filenames, frequency='Daily'):
dtimes = filenames_to_datetimes(filenames)
td = timedeltas_to_days(datetimes_to_timedeltas(dtimes))
plot_td_hist(td, frequency=frequency)
|
{"hexsha": "0a7e9b86dd3b8fd208a07fd9d11b83cee1d2062e", "size": 6142, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/crd_utils.py", "max_stars_repo_name": "informatics-lab/pangolin__kevin_scratch", "max_stars_repo_head_hexsha": "98bbaa5205433a16dad44600128aeee029e73122", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/crd_utils.py", "max_issues_repo_name": "informatics-lab/pangolin__kevin_scratch", "max_issues_repo_head_hexsha": "98bbaa5205433a16dad44600128aeee029e73122", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/crd_utils.py", "max_forks_repo_name": "informatics-lab/pangolin__kevin_scratch", "max_forks_repo_head_hexsha": "98bbaa5205433a16dad44600128aeee029e73122", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-10T23:57:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T23:57:01.000Z", "avg_line_length": 33.7472527473, "max_line_length": 100, "alphanum_fraction": 0.6579290134, "include": true, "reason": "import numpy", "num_tokens": 1481}
|
#!/usr/bin/python3
import time
import pyaudio
import audioop
import pigpio
import numpy as np
import math
import threading
from flask import Flask
from util import fft, get_rgb_vol, get_rgb_freq_vol, colors, transform_brightness
# Raspberry PI GPIO pins
R = 17
G = 22
B = 24
# Microphone settings
fs = 32000
sample_format = pyaudio.paInt16
chunk = 2048
channels = 1
pi = pigpio.pi()
p = pyaudio.PyAudio()
app = Flask(__name__)
led_thread = None
def set_rgb(rgb):
'''Set LED color to given rgb value'''
r,g,b = rgb
pi.set_PWM_dutycycle(R, r)
pi.set_PWM_dutycycle(G, g)
pi.set_PWM_dutycycle(B, b)
def music_loop(rgb):
'''Thread reading mic data and setting RGB values based on mic volume'''
t = threading.currentThread()
mic_stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
while getattr(t,'do_run',True):
# Read data from microphone
data = np.fromstring(mic_stream.read(chunk), np.int16)
#xs,ys = fft(data, fs=fs, chunk=chunk)
#freq = np.average(ys, weights=xs)
vol = audioop.max(data,2)
set_rgb(get_rgb_vol(vol, rgb))
mic_stream.close()
print('Stopping audio loop')
def pulse_loop(rgb):
'''Thread for pulsating light'''
t = threading.currentThread()
i = 0
up = True # Using up/down variable so we only have a single loop and can exit thread quickly if requested
while getattr(t,'do_run',True):
set_rgb(transform_brightness(rgb,i))
if up:
i += 1
if i == 255:
up = False
else:
i -= 1
if i == 0:
up = True
time.sleep(0.01)
print('Exiting pulse loop')
@app.route('/pulse/<color>')
def pulse(color):
'''Pulsating light in given color'''
global led_thread
if led_thread != None:
led_thread.do_run = False
led_thread.join()
led_thread = None
if color in colors:
led_thread = threading.Thread(target=pulse_loop, args=(colors[color],))
led_thread.start()
return 'Started pulsing ' + color
else:
return 'Invalid color ' + color
@app.route('/music/<color>')
def music(color):
'''Music mode in given color'''
global led_thread
if led_thread != None:
led_thread.do_run = False
led_thread.join()
led_thread = None
if color in colors:
led_thread = threading.Thread(target=music_loop, args=(colors[color],))
led_thread.start()
return 'Music mode started with color ' + color
else:
return 'Invalid color' + color
@app.route('/color/<color>')
def color(color):
'''Constant light in given color'''
global led_thread
if led_thread != None:
led_thread.do_run = False
led_thread.join()
led_thread = None
if color in colors:
set_rgb(colors[color])
else:
return 'Unknown color'
return 'Set color to ' + color
@app.route('/off')
def off():
'''LEDS off'''
global led_thread
if led_thread != None:
led_thread.do_run = False
led_thread.join()
led_thread = None
set_rgb((0,0,0))
return 'Leds off'
if __name__ == '__main__':
app.run('0.0.0.0', 5000)
|
{"hexsha": "cd3feeed17867b0cf65a06ac49653a4f3a59e791", "size": 3332, "ext": "py", "lang": "Python", "max_stars_repo_path": "led_server/server.py", "max_stars_repo_name": "jalgroy/raspberrypi-led-server", "max_stars_repo_head_hexsha": "8cf518a284e79d7142da838e08dd42e1f060ff72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-03T16:06:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-03T16:06:14.000Z", "max_issues_repo_path": "led_server/server.py", "max_issues_repo_name": "jalgroy/raspberrypi-led-server", "max_issues_repo_head_hexsha": "8cf518a284e79d7142da838e08dd42e1f060ff72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "led_server/server.py", "max_forks_repo_name": "jalgroy/raspberrypi-led-server", "max_forks_repo_head_hexsha": "8cf518a284e79d7142da838e08dd42e1f060ff72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6814814815, "max_line_length": 109, "alphanum_fraction": 0.6143457383, "include": true, "reason": "import numpy", "num_tokens": 841}
|
section \<open>Proof Helpers\<close>
text\<open>In this section we define and prove lemmas that help to show that all identified critical
conditions hold for concurrent operations. Many of the following parts are derivations from the
definitions and lemmas of Gomes et al.\<close>
theory
"IMAP-proof-helpers"
imports
"IMAP-def"
begin
lemma (in imap) apply_operations_never_fails:
assumes "xs prefix of i"
shows "apply_operations xs \<noteq> None"
using assms proof(induction xs rule: rev_induct, clarsimp)
case (snoc x xs) thus ?case
proof (cases x)
case (Broadcast e) thus ?thesis
using snoc by force
next
case (Deliver e) thus ?thesis
using snoc apply clarsimp unfolding interp_msg_def apply_operations_def
by (metis (no_types, lifting) bind.bind_lunit interpret_op_def prefix_of_appendD)
qed
qed
lemma (in imap) create_id_valid:
assumes "xs prefix of j"
and "Deliver (i1, Create i2 e) \<in> set xs"
shows "i1 = i2"
proof -
have "\<exists>s. valid_behaviours s (i1, Create i2 e)"
using assms deliver_in_prefix_is_valid by blast
thus ?thesis
by(simp add: valid_behaviours_def)
qed
lemma (in imap) append_id_valid:
assumes "xs prefix of j"
and "Deliver (i1, Append i2 e) \<in> set xs"
shows "i1 = i2"
proof -
have "\<exists>s. valid_behaviours s (i1, Append i2 e)"
using assms deliver_in_prefix_is_valid by blast
thus ?thesis
by(simp add: valid_behaviours_def)
qed
lemma (in imap) expunge_id_valid:
assumes "xs prefix of j"
and "Deliver (i1, Expunge e mo i2) \<in> set xs"
shows "i1 = i2"
proof -
have "\<exists>s. valid_behaviours s (i1, Expunge e mo i2)"
using assms deliver_in_prefix_is_valid by blast
thus ?thesis
by(simp add: valid_behaviours_def)
qed
lemma (in imap) store_id_valid:
assumes "xs prefix of j"
and "Deliver (i1, Store e mo i2) \<in> set xs"
shows "i1 = i2"
proof -
have "\<exists>s. valid_behaviours s (i1, Store e mo i2)"
using assms deliver_in_prefix_is_valid by blast
thus ?thesis
by(simp add: valid_behaviours_def)
qed
definition (in imap) added_ids :: "('id \<times> ('id, 'b) operation) event list \<Rightarrow> 'b \<Rightarrow> 'id list" where
"added_ids es p \<equiv> List.map_filter (\<lambda>x. case x of
Deliver (i, Create j e) \<Rightarrow> if e = p then Some j else None |
Deliver (i, Expunge e mo j) \<Rightarrow> if e = p then Some j else None |
_ \<Rightarrow> None) es"
definition (in imap) added_files :: "('id \<times> ('id, 'b) operation) event list \<Rightarrow> 'b \<Rightarrow> 'id list" where
"added_files es p \<equiv> List.map_filter (\<lambda>x. case x of
Deliver (i, Append j e) \<Rightarrow> if e = p then Some j else None |
Deliver (i, Store e mo j) \<Rightarrow> if e = p then Some j else None |
_ \<Rightarrow> None) es"
\<comment> \<open>added files simplifier\<close>
lemma (in imap) [simp]:
shows "added_files [] e = []"
by (auto simp: added_files_def map_filter_def)
lemma (in imap) [simp]:
shows "added_files (xs @ ys) e = added_files xs e @ added_files ys e"
by (auto simp: added_files_def map_filter_append)
lemma (in imap) added_files_Broadcast_collapse [simp]:
shows "added_files ([Broadcast e]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Delete_collapse [simp]:
shows "added_files ([Deliver (i, Delete is e)]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Create_collapse [simp]:
shows "added_files ([Deliver (i, Create j e)]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Expunge_collapse [simp]:
shows "added_files ([Deliver (i, Expunge e mo j)]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Append_diff_collapse [simp]:
shows "e \<noteq> e' \<Longrightarrow> added_files ([Deliver (i, Append j e)]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Append_same_collapse [simp]:
shows "added_files ([Deliver (i, Append j e)]) e = [j]"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Store_diff_collapse [simp]:
shows "e \<noteq> e' \<Longrightarrow> added_files ([Deliver (i, Store e mo j)]) e' = []"
by (auto simp: added_files_def map_filter_append map_filter_def)
lemma (in imap) added_files_Deliver_Store_same_collapse [simp]:
shows "added_files ([Deliver (i, Store e mo j)]) e = [j]"
by (auto simp: added_files_def map_filter_append map_filter_def)
\<comment> \<open>added ids simplifier\<close>
lemma (in imap) [simp]:
shows "added_ids [] e = []"
by (auto simp: added_ids_def map_filter_def)
lemma (in imap) split_ids [simp]:
shows "added_ids (xs @ ys) e = added_ids xs e @ added_ids ys e"
by (auto simp: added_ids_def map_filter_append)
lemma (in imap) added_ids_Broadcast_collapse [simp]:
shows "added_ids ([Broadcast e]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Delete_collapse [simp]:
shows "added_ids ([Deliver (i, Delete is e)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Append_collapse [simp]:
shows "added_ids ([Deliver (i, Append j e)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Store_collapse [simp]:
shows "added_ids ([Deliver (i, Store e mo j)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Create_diff_collapse [simp]:
shows "e \<noteq> e' \<Longrightarrow> added_ids ([Deliver (i, Create j e)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Expunge_diff_collapse [simp]:
shows "e \<noteq> e' \<Longrightarrow> added_ids ([Deliver (i, Expunge e mo j)]) e' = []"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Create_same_collapse [simp]:
shows "added_ids ([Deliver (i, Create j e)]) e = [j]"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) added_ids_Deliver_Expunge_same_collapse [simp]:
shows "added_ids ([Deliver (i, Expunge e mo j)]) e = [j]"
by (auto simp: added_ids_def map_filter_append map_filter_def)
lemma (in imap) expunge_id_not_in_set:
assumes "i1 \<notin> set (added_ids [Deliver (i, Expunge e mo i2)] e)"
shows "i1 \<noteq> i2"
using assms by simp
lemma (in imap) apply_operations_added_ids:
assumes "es prefix of j"
and "apply_operations es = Some f"
shows "fst (f x) \<subseteq> set (added_ids es x)"
using assms proof (induct es arbitrary: f rule: rev_induct, force)
case (snoc x xs) thus ?case
proof (cases x, force)
case (Deliver e)
moreover obtain a b where "e = (a, b)" by force
ultimately show ?thesis
using snoc by(case_tac b; clarsimp simp: interp_msg_def split: bind_splits,
force split: if_split_asm simp add: op_elem_def interpret_op_def)
qed
qed
lemma (in imap) apply_operations_added_files:
assumes "es prefix of j"
and "apply_operations es = Some f"
shows "snd (f x) \<subseteq> set (added_files es x)"
using assms proof (induct es arbitrary: f rule: rev_induct, force)
case (snoc x xs) thus ?case
proof (cases x, force)
case (Deliver e)
moreover obtain a b where "e = (a, b)" by force
ultimately show ?thesis
using snoc by(case_tac b; clarsimp simp: interp_msg_def split: bind_splits,
force split: if_split_asm simp add: op_elem_def interpret_op_def)
qed
qed
lemma (in imap) Deliver_added_files:
assumes "xs prefix of j"
and "i \<in> set (added_files xs e)"
shows "Deliver (i, Append i e) \<in> set xs \<or> (\<exists> mo . Deliver (i, Store e mo i) \<in> set xs)"
using assms proof (induct xs rule: rev_induct, clarsimp)
case (snoc x xs) thus ?case
proof (cases x, force)
case X: (Deliver e')
moreover obtain a b where E: "e' = (a, b)" by force
ultimately show ?thesis using snoc
apply (case_tac b; clarify) apply (simp,metis prefix_of_appendD,force)
using append_id_valid apply simp
using E apply (metis
added_files_Deliver_Append_diff_collapse added_files_Deliver_Append_same_collapse
empty_iff in_set_conv_decomp list.set(1) prefix_of_appendD set_ConsD, simp)
using E apply_operations_added_files apply (blast,simp)
using E apply_operations_added_files
by (metis Un_iff
added_files_Deliver_Store_diff_collapse added_files_Deliver_Store_same_collapse empty_iff
empty_set list.set_intros(1) prefix_of_appendD set_ConsD set_append store_id_valid)
qed
qed
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/IMAP-CRDT/IMAP-proof-helpers.thy"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.